drm/nouveau/mmu/gp100-: add privileged methods for fault replay/cancel
[linux-2.6-block.git] / drivers / gpu / drm / nouveau / nvkm / subdev / mmu / vmm.h
CommitLineData
806a7335
BS
1#ifndef __NVKM_VMM_H__
2#define __NVKM_VMM_H__
3#include "priv.h"
4#include <core/memory.h>
eb813999 5enum nvkm_memory_target;
806a7335
BS
6
7struct nvkm_vmm_pt {
8 /* Some GPUs have a mapping level with a dual page tables to
9 * support large and small pages in the same address-range.
10 *
11 * We track the state of both page tables in one place, which
12 * is why there's multiple PT pointers/refcounts here.
13 */
14 struct nvkm_mmu_pt *pt[2];
15 u32 refs[2];
16
17 /* Page size handled by this PT.
18 *
19 * Tesla backend needs to know this when writinge PDEs,
20 * otherwise unnecessary.
21 */
22 u8 page;
23
24 /* Entire page table sparse.
25 *
26 * Used to propagate sparseness to child page tables.
27 */
28 bool sparse:1;
29
30 /* Tracking for page directories.
31 *
32 * The array is indexed by PDE, and will either point to the
33 * child page table, or indicate the PDE is marked as sparse.
34 **/
35#define NVKM_VMM_PDE_INVALID(pde) IS_ERR_OR_NULL(pde)
36#define NVKM_VMM_PDE_SPARSED(pde) IS_ERR(pde)
37#define NVKM_VMM_PDE_SPARSE ERR_PTR(-EBUSY)
38 struct nvkm_vmm_pt **pde;
39
40 /* Tracking for dual page tables.
41 *
42 * There's one entry for each LPTE, keeping track of whether
43 * there are valid SPTEs in the same address-range.
44 *
45 * This information is used to manage LPTE state transitions.
46 */
47#define NVKM_VMM_PTE_SPARSE 0x80
48#define NVKM_VMM_PTE_VALID 0x40
49#define NVKM_VMM_PTE_SPTES 0x3f
50 u8 pte[];
51};
52
eb813999
BS
53typedef void (*nvkm_vmm_pxe_func)(struct nvkm_vmm *,
54 struct nvkm_mmu_pt *, u32 ptei, u32 ptes);
55typedef void (*nvkm_vmm_pde_func)(struct nvkm_vmm *,
56 struct nvkm_vmm_pt *, u32 pdei);
57typedef void (*nvkm_vmm_pte_func)(struct nvkm_vmm *, struct nvkm_mmu_pt *,
58 u32 ptei, u32 ptes, struct nvkm_vmm_map *);
59
806a7335 60struct nvkm_vmm_desc_func {
eb813999
BS
61 nvkm_vmm_pxe_func invalid;
62 nvkm_vmm_pxe_func unmap;
63 nvkm_vmm_pxe_func sparse;
64
65 nvkm_vmm_pde_func pde;
66
67 nvkm_vmm_pte_func mem;
68 nvkm_vmm_pte_func dma;
69 nvkm_vmm_pte_func sgl;
a5ff307f
BS
70
71 nvkm_vmm_pte_func pfn;
72 bool (*pfn_clear)(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32 ptei, u32 ptes);
73 nvkm_vmm_pxe_func pfn_unmap;
806a7335
BS
74};
75
540a1dde 76extern const struct nvkm_vmm_desc_func gf100_vmm_pgd;
b77791da 77void gf100_vmm_pgd_pde(struct nvkm_vmm *, struct nvkm_vmm_pt *, u32);
540a1dde 78extern const struct nvkm_vmm_desc_func gf100_vmm_pgt;
b77791da
BS
79void gf100_vmm_pgt_unmap(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32);
80void gf100_vmm_pgt_mem(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
81 struct nvkm_vmm_map *);
82void gf100_vmm_pgt_dma(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
83 struct nvkm_vmm_map *);
84void gf100_vmm_pgt_sgl(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
85 struct nvkm_vmm_map *);
540a1dde 86
b5977643
BS
87void gk104_vmm_lpt_invalid(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32);
88
806a7335
BS
89struct nvkm_vmm_desc {
90 enum {
91 PGD,
92 PGT,
93 SPT,
94 LPT,
95 } type;
96 u8 bits; /* VMA bits covered by PT. */
97 u8 size; /* Bytes-per-PTE. */
98 u32 align; /* PT address alignment. */
99 const struct nvkm_vmm_desc_func *func;
100};
101
2ffa64eb
BS
102extern const struct nvkm_vmm_desc nv50_vmm_desc_12[];
103extern const struct nvkm_vmm_desc nv50_vmm_desc_16[];
104
7de078aa
BS
105extern const struct nvkm_vmm_desc gk104_vmm_desc_16_12[];
106extern const struct nvkm_vmm_desc gk104_vmm_desc_16_16[];
107extern const struct nvkm_vmm_desc gk104_vmm_desc_17_12[];
108extern const struct nvkm_vmm_desc gk104_vmm_desc_17_17[];
109
5f300fed
BS
110extern const struct nvkm_vmm_desc gm200_vmm_desc_16_12[];
111extern const struct nvkm_vmm_desc gm200_vmm_desc_16_16[];
112extern const struct nvkm_vmm_desc gm200_vmm_desc_17_12[];
113extern const struct nvkm_vmm_desc gm200_vmm_desc_17_17[];
114
8e39abff
BS
115extern const struct nvkm_vmm_desc gp100_vmm_desc_12[];
116extern const struct nvkm_vmm_desc gp100_vmm_desc_16[];
117
806a7335
BS
118struct nvkm_vmm_page {
119 u8 shift;
120 const struct nvkm_vmm_desc *desc;
121#define NVKM_VMM_PAGE_SPARSE 0x01
122#define NVKM_VMM_PAGE_VRAM 0x02
123#define NVKM_VMM_PAGE_HOST 0x04
124#define NVKM_VMM_PAGE_COMP 0x08
125#define NVKM_VMM_PAGE_Sxxx (NVKM_VMM_PAGE_SPARSE)
126#define NVKM_VMM_PAGE_xVxx (NVKM_VMM_PAGE_VRAM)
127#define NVKM_VMM_PAGE_SVxx (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_VRAM)
128#define NVKM_VMM_PAGE_xxHx (NVKM_VMM_PAGE_HOST)
129#define NVKM_VMM_PAGE_SxHx (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_HOST)
130#define NVKM_VMM_PAGE_xVHx (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_HOST)
131#define NVKM_VMM_PAGE_SVHx (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_HOST)
132#define NVKM_VMM_PAGE_xVxC (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_COMP)
133#define NVKM_VMM_PAGE_SVxC (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_COMP)
134#define NVKM_VMM_PAGE_xxHC (NVKM_VMM_PAGE_xxHx | NVKM_VMM_PAGE_COMP)
135#define NVKM_VMM_PAGE_SxHC (NVKM_VMM_PAGE_SxHx | NVKM_VMM_PAGE_COMP)
136 u8 type;
137};
138
139struct nvkm_vmm_func {
140 int (*join)(struct nvkm_vmm *, struct nvkm_memory *inst);
141 void (*part)(struct nvkm_vmm *, struct nvkm_memory *inst);
142
eb813999
BS
143 int (*aper)(enum nvkm_memory_target);
144 int (*valid)(struct nvkm_vmm *, void *argv, u32 argc,
145 struct nvkm_vmm_map *);
146 void (*flush)(struct nvkm_vmm *, int depth);
147
71871aa6
BS
148 int (*mthd)(struct nvkm_vmm *, struct nvkm_client *,
149 u32 mthd, void *argv, u32 argc);
150
d389fd4f
BS
151 void (*invalidate_pdb)(struct nvkm_vmm *, u64 addr);
152
806a7335
BS
153 u64 page_block;
154 const struct nvkm_vmm_page page[];
155};
156
9f6219fd
BS
157struct nvkm_vmm_join {
158 struct nvkm_memory *inst;
159 struct list_head head;
160};
161
806a7335 162int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
2606f291
BS
163 u32 pd_header, bool managed, u64 addr, u64 size,
164 struct lock_class_key *, const char *name,
165 struct nvkm_vmm **);
806a7335 166int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
2606f291
BS
167 u32 pd_header, bool managed, u64 addr, u64 size,
168 struct lock_class_key *, const char *name, struct nvkm_vmm *);
f9463a4b 169struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
729eba33
BS
170struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *,
171 u64 addr, u64 size);
f9463a4b
BS
172int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref,
173 bool sparse, u8 page, u8 align, u64 size,
174 struct nvkm_vma **pvma);
175void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *);
a5ff307f
BS
176void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *, bool pfn);
177void nvkm_vmm_unmap_region(struct nvkm_vmm *, struct nvkm_vma *);
178
179#define NVKM_VMM_PFN_ADDR 0xfffffffffffff000ULL
180#define NVKM_VMM_PFN_ADDR_SHIFT 12
181#define NVKM_VMM_PFN_APER 0x00000000000000f0ULL
182#define NVKM_VMM_PFN_HOST 0x0000000000000000ULL
183#define NVKM_VMM_PFN_VRAM 0x0000000000000010ULL
184#define NVKM_VMM_PFN_W 0x0000000000000002ULL
185#define NVKM_VMM_PFN_V 0x0000000000000001ULL
186#define NVKM_VMM_PFN_NONE 0x0000000000000000ULL
187
188int nvkm_vmm_pfn_map(struct nvkm_vmm *, u8 page, u64 addr, u64 size, u64 *pfn);
189int nvkm_vmm_pfn_unmap(struct nvkm_vmm *, u64 addr, u64 size);
f9463a4b
BS
190
191struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail);
806a7335 192
5b17f362 193int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
2606f291 194 bool, u64, u64, void *, u32, struct lock_class_key *,
5b17f362 195 const char *, struct nvkm_vmm **);
dd12d158 196int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
77783435 197
2ffa64eb
BS
198int nv50_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
199void nv50_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
200int nv50_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
201void nv50_vmm_flush(struct nvkm_vmm *, int);
202
540a1dde 203int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
2606f291 204 struct nvkm_mmu *, bool, u64, u64, void *, u32,
540a1dde
BS
205 struct lock_class_key *, const char *, struct nvkm_vmm **);
206int gf100_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
207int gf100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
208void gf100_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
b77791da
BS
209int gf100_vmm_aper(enum nvkm_memory_target);
210int gf100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
b77791da 211void gf100_vmm_flush(struct nvkm_vmm *, int);
d389fd4f
BS
212void gf100_vmm_invalidate(struct nvkm_vmm *, u32 type);
213void gf100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr);
540a1dde 214
b5977643
BS
215int gk20a_vmm_aper(enum nvkm_memory_target);
216
5f300fed 217int gm200_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
2606f291 218 struct nvkm_mmu *, bool, u64, u64, void *, u32,
5f300fed
BS
219 struct lock_class_key *, const char *, struct nvkm_vmm **);
220int gm200_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
221int gm200_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
222
8e39abff 223int gp100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
f9400afb
BS
224int gp100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
225void gp100_vmm_flush(struct nvkm_vmm *, int);
71871aa6 226int gp100_vmm_mthd(struct nvkm_vmm *, struct nvkm_client *, u32, void *, u32);
d389fd4f 227void gp100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr);
8e39abff 228
7986f813
BS
229int gv100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
230
2606f291 231int nv04_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
5b17f362 232 struct lock_class_key *, const char *, struct nvkm_vmm **);
2606f291 233int nv41_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
77783435 234 struct lock_class_key *, const char *, struct nvkm_vmm **);
2606f291 235int nv44_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
03b0ba7b 236 struct lock_class_key *, const char *, struct nvkm_vmm **);
2606f291 237int nv50_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
9f6219fd 238 struct lock_class_key *, const char *, struct nvkm_vmm **);
2606f291 239int mcp77_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
2ffa64eb 240 struct lock_class_key *, const char *, struct nvkm_vmm **);
2606f291 241int g84_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
9f6219fd 242 struct lock_class_key *, const char *, struct nvkm_vmm **);
2606f291 243int gf100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
540a1dde 244 struct lock_class_key *, const char *, struct nvkm_vmm **);
2606f291 245int gk104_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
7de078aa 246 struct lock_class_key *, const char *, struct nvkm_vmm **);
2606f291 247int gk20a_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
7de078aa 248 struct lock_class_key *, const char *, struct nvkm_vmm **);
2606f291 249int gm200_vmm_new_fixed(struct nvkm_mmu *, bool, u64, u64, void *, u32,
5f300fed
BS
250 struct lock_class_key *, const char *,
251 struct nvkm_vmm **);
2606f291 252int gm200_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
5f300fed
BS
253 struct lock_class_key *, const char *,
254 struct nvkm_vmm **);
2606f291 255int gm20b_vmm_new_fixed(struct nvkm_mmu *, bool, u64, u64, void *, u32,
5f300fed
BS
256 struct lock_class_key *, const char *,
257 struct nvkm_vmm **);
2606f291 258int gm20b_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
5f300fed
BS
259 struct lock_class_key *, const char *,
260 struct nvkm_vmm **);
2606f291 261int gp100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
8e39abff
BS
262 struct lock_class_key *, const char *,
263 struct nvkm_vmm **);
2606f291 264int gp10b_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
8e39abff
BS
265 struct lock_class_key *, const char *,
266 struct nvkm_vmm **);
2606f291 267int gv100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
edf50395
BS
268 struct lock_class_key *, const char *,
269 struct nvkm_vmm **);
2606f291 270int tu102_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
7986f813
BS
271 struct lock_class_key *, const char *,
272 struct nvkm_vmm **);
eb813999
BS
273
274#define VMM_PRINT(l,v,p,f,a...) do { \
275 struct nvkm_vmm *_vmm = (v); \
276 if (CONFIG_NOUVEAU_DEBUG >= (l) && _vmm->debug >= (l)) { \
277 nvkm_printk_(&_vmm->mmu->subdev, 0, p, "%s: "f"\n", \
278 _vmm->name, ##a); \
279 } \
280} while(0)
281#define VMM_DEBUG(v,f,a...) VMM_PRINT(NV_DBG_DEBUG, (v), info, f, ##a)
282#define VMM_TRACE(v,f,a...) VMM_PRINT(NV_DBG_TRACE, (v), info, f, ##a)
283#define VMM_SPAM(v,f,a...) VMM_PRINT(NV_DBG_SPAM , (v), dbg, f, ##a)
284
285#define VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL,BASE,SIZE,NEXT) do { \
286 nvkm_kmap((PT)->memory); \
287 while (PTEN) { \
288 u64 _ptes = ((SIZE) - MAP->off) >> MAP->page->shift; \
289 u64 _addr = ((BASE) + MAP->off); \
290 \
291 if (_ptes > PTEN) { \
292 MAP->off += PTEN << MAP->page->shift; \
293 _ptes = PTEN; \
294 } else { \
295 MAP->off = 0; \
296 NEXT; \
297 } \
298 \
299 VMM_SPAM(VMM, "ITER %08x %08x PTE(s)", PTEI, (u32)_ptes); \
300 \
301 FILL(VMM, PT, PTEI, _ptes, MAP, _addr); \
302 PTEI += _ptes; \
303 PTEN -= _ptes; \
304 }; \
305 nvkm_done((PT)->memory); \
306} while(0)
307
308#define VMM_MAP_ITER_MEM(VMM,PT,PTEI,PTEN,MAP,FILL) \
309 VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \
310 ((u64)MAP->mem->offset << NVKM_RAM_MM_SHIFT), \
311 ((u64)MAP->mem->length << NVKM_RAM_MM_SHIFT), \
312 (MAP->mem = MAP->mem->next))
313#define VMM_MAP_ITER_DMA(VMM,PT,PTEI,PTEN,MAP,FILL) \
314 VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \
315 *MAP->dma, PAGE_SIZE, MAP->dma++)
316#define VMM_MAP_ITER_SGL(VMM,PT,PTEI,PTEN,MAP,FILL) \
317 VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \
318 sg_dma_address(MAP->sgl), sg_dma_len(MAP->sgl), \
319 (MAP->sgl = sg_next(MAP->sgl)))
320
321#define VMM_FO(m,o,d,c,b) nvkm_fo##b((m)->memory, (o), (d), (c))
322#define VMM_WO(m,o,d,c,b) nvkm_wo##b((m)->memory, (o), (d))
323#define VMM_XO(m,v,o,d,c,b,fn,f,a...) do { \
324 const u32 _pteo = (o); u##b _data = (d); \
325 VMM_SPAM((v), " %010llx "f, (m)->addr + _pteo, _data, ##a); \
326 VMM_##fn((m), (m)->base + _pteo, _data, (c), b); \
327} while(0)
328
329#define VMM_WO032(m,v,o,d) VMM_XO((m),(v),(o),(d), 1, 32, WO, "%08x")
330#define VMM_FO032(m,v,o,d,c) \
331 VMM_XO((m),(v),(o),(d),(c), 32, FO, "%08x %08x", (c))
332
333#define VMM_WO064(m,v,o,d) VMM_XO((m),(v),(o),(d), 1, 64, WO, "%016llx")
334#define VMM_FO064(m,v,o,d,c) \
335 VMM_XO((m),(v),(o),(d),(c), 64, FO, "%016llx %08x", (c))
336
337#define VMM_XO128(m,v,o,lo,hi,c,f,a...) do { \
338 u32 _pteo = (o), _ptes = (c); \
339 const u64 _addr = (m)->addr + _pteo; \
340 VMM_SPAM((v), " %010llx %016llx%016llx"f, _addr, (hi), (lo), ##a); \
341 while (_ptes--) { \
342 nvkm_wo64((m)->memory, (m)->base + _pteo + 0, (lo)); \
343 nvkm_wo64((m)->memory, (m)->base + _pteo + 8, (hi)); \
344 _pteo += 0x10; \
345 } \
346} while(0)
347
348#define VMM_WO128(m,v,o,lo,hi) VMM_XO128((m),(v),(o),(lo),(hi), 1, "")
349#define VMM_FO128(m,v,o,lo,hi,c) do { \
350 nvkm_kmap((m)->memory); \
351 VMM_XO128((m),(v),(o),(lo),(hi),(c), " %08x", (c)); \
352 nvkm_done((m)->memory); \
353} while(0)
806a7335 354#endif