Commit | Line | Data |
---|---|---|
2c86e55d MA |
1 | /* SPDX-License-Identifier: MIT */ |
2 | /* | |
3 | * Copyright © 2020 Intel Corporation | |
4 | * | |
5 | * Please try to maintain the following order within this file unless it makes | |
6 | * sense to do otherwise. From top to bottom: | |
7 | * 1. typedefs | |
8 | * 2. #defines, and macros | |
9 | * 3. structure definitions | |
10 | * 4. function prototypes | |
11 | * | |
12 | * Within each section, please try to order by generation in ascending order, | |
13 | * from top to bottom (ie. gen6 on the top, gen8 on the bottom). | |
14 | */ | |
15 | ||
16 | #ifndef __INTEL_GTT_H__ | |
17 | #define __INTEL_GTT_H__ | |
18 | ||
19 | #include <linux/io-mapping.h> | |
20 | #include <linux/kref.h> | |
21 | #include <linux/mm.h> | |
22 | #include <linux/pagevec.h> | |
23 | #include <linux/scatterlist.h> | |
24 | #include <linux/workqueue.h> | |
25 | ||
26 | #include <drm/drm_mm.h> | |
27 | ||
28 | #include "gt/intel_reset.h" | |
2c86e55d | 29 | #include "i915_selftest.h" |
39a2bd34 | 30 | #include "i915_vma_resource.h" |
2c86e55d | 31 | #include "i915_vma_types.h" |
87bd701e MA |
32 | #include "i915_params.h" |
33 | #include "intel_memory_region.h" | |
2c86e55d MA |
34 | |
35 | #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) | |
36 | ||
37 | #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT) | |
38 | #define DBG(...) trace_printk(__VA_ARGS__) | |
39 | #else | |
40 | #define DBG(...) | |
41 | #endif | |
42 | ||
43 | #define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */ | |
44 | ||
45 | #define I915_GTT_PAGE_SIZE_4K BIT_ULL(12) | |
46 | #define I915_GTT_PAGE_SIZE_64K BIT_ULL(16) | |
47 | #define I915_GTT_PAGE_SIZE_2M BIT_ULL(21) | |
48 | ||
49 | #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K | |
50 | #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M | |
51 | ||
52 | #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE | |
53 | ||
54 | #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE | |
55 | ||
56 | #define I915_FENCE_REG_NONE -1 | |
57 | #define I915_MAX_NUM_FENCES 32 | |
58 | /* 32 fences + sign bit for FENCE_REG_NONE */ | |
59 | #define I915_MAX_NUM_FENCE_BITS 6 | |
60 | ||
61 | typedef u32 gen6_pte_t; | |
62 | typedef u64 gen8_pte_t; | |
63 | ||
64 | #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) | |
65 | ||
66 | #define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len))) | |
67 | #define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1) | |
68 | #define I915_PDES 512 | |
69 | #define I915_PDE_MASK (I915_PDES - 1) | |
70 | ||
71 | /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ | |
72 | #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) | |
73 | #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) | |
74 | #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) | |
75 | #define GEN6_PTE_CACHE_LLC (2 << 1) | |
76 | #define GEN6_PTE_UNCACHED (1 << 1) | |
77 | #define GEN6_PTE_VALID REG_BIT(0) | |
78 | ||
79 | #define GEN6_PTES I915_PTES(sizeof(gen6_pte_t)) | |
80 | #define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE) | |
81 | #define GEN6_PD_ALIGN (PAGE_SIZE * 16) | |
82 | #define GEN6_PDE_SHIFT 22 | |
83 | #define GEN6_PDE_VALID REG_BIT(0) | |
84 | #define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT)) | |
85 | ||
86 | #define GEN7_PTE_CACHE_L3_LLC (3 << 1) | |
87 | ||
88 | #define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2) | |
89 | #define BYT_PTE_WRITEABLE REG_BIT(1) | |
90 | ||
e762bdf5 MA |
91 | #define GEN12_PPGTT_PTE_LM BIT_ULL(11) |
92 | ||
93 | #define GEN12_GGTT_PTE_LM BIT_ULL(1) | |
11724eea | 94 | |
5189e312 | 95 | #define GEN12_PDE_64K BIT(6) |
8133a6da | 96 | #define GEN12_PTE_PS64 BIT(8) |
5189e312 | 97 | |
2c86e55d MA |
98 | /* |
99 | * Cacheability Control is a 4-bit value. The low three bits are stored in bits | |
100 | * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. | |
101 | */ | |
102 | #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ | |
103 | (((bits) & 0x8) << (11 - 3))) | |
104 | #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) | |
105 | #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) | |
106 | #define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) | |
107 | #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) | |
108 | #define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) | |
109 | #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) | |
110 | #define HSW_PTE_UNCACHED (0) | |
111 | #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) | |
112 | #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) | |
113 | ||
114 | /* | |
115 | * GEN8 32b style address is defined as a 3 level page table: | |
116 | * 31:30 | 29:21 | 20:12 | 11:0 | |
117 | * PDPE | PDE | PTE | offset | |
118 | * The difference as compared to normal x86 3 level page table is the PDPEs are | |
119 | * programmed via register. | |
120 | * | |
121 | * GEN8 48b style address is defined as a 4 level page table: | |
122 | * 47:39 | 38:30 | 29:21 | 20:12 | 11:0 | |
123 | * PML4E | PDPE | PDE | PTE | offset | |
124 | */ | |
125 | #define GEN8_3LVL_PDPES 4 | |
126 | ||
127 | #define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD) | |
128 | #define PPAT_CACHED_PDE 0 /* WB LLC */ | |
129 | #define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */ | |
130 | #define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */ | |
131 | ||
132 | #define CHV_PPAT_SNOOP REG_BIT(6) | |
133 | #define GEN8_PPAT_AGE(x) ((x)<<4) | |
134 | #define GEN8_PPAT_LLCeLLC (3<<2) | |
135 | #define GEN8_PPAT_LLCELLC (2<<2) | |
136 | #define GEN8_PPAT_LLC (1<<2) | |
137 | #define GEN8_PPAT_WB (3<<0) | |
138 | #define GEN8_PPAT_WT (2<<0) | |
139 | #define GEN8_PPAT_WC (1<<0) | |
140 | #define GEN8_PPAT_UC (0<<0) | |
141 | #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) | |
142 | #define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) | |
143 | ||
5f978167 MC |
144 | #define GEN8_PAGE_PRESENT BIT_ULL(0) |
145 | #define GEN8_PAGE_RW BIT_ULL(1) | |
146 | ||
2c86e55d MA |
147 | #define GEN8_PDE_IPS_64K BIT(11) |
148 | #define GEN8_PDE_PS_2M BIT(7) | |
149 | ||
89351925 CW |
150 | enum i915_cache_level; |
151 | ||
89351925 | 152 | struct drm_i915_gem_object; |
0b6bc81d | 153 | struct i915_fence_reg; |
89351925 CW |
154 | struct i915_vma; |
155 | struct intel_gt; | |
0b6bc81d | 156 | |
2c86e55d MA |
157 | #define for_each_sgt_daddr(__dp, __iter, __sgt) \ |
158 | __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE) | |
159 | ||
2c86e55d | 160 | struct i915_page_table { |
89351925 | 161 | struct drm_i915_gem_object *base; |
cd0452aa CW |
162 | union { |
163 | atomic_t used; | |
164 | struct i915_page_table *stash; | |
165 | }; | |
5189e312 | 166 | bool is_compact; |
2c86e55d MA |
167 | }; |
168 | ||
169 | struct i915_page_directory { | |
170 | struct i915_page_table pt; | |
171 | spinlock_t lock; | |
82adf901 | 172 | void **entry; |
2c86e55d MA |
173 | }; |
174 | ||
175 | #define __px_choose_expr(x, type, expr, other) \ | |
176 | __builtin_choose_expr( \ | |
177 | __builtin_types_compatible_p(typeof(x), type) || \ | |
178 | __builtin_types_compatible_p(typeof(x), const type), \ | |
179 | ({ type __x = (type)(x); expr; }), \ | |
180 | other) | |
181 | ||
182 | #define px_base(px) \ | |
89351925 CW |
183 | __px_choose_expr(px, struct drm_i915_gem_object *, __x, \ |
184 | __px_choose_expr(px, struct i915_page_table *, __x->base, \ | |
185 | __px_choose_expr(px, struct i915_page_directory *, __x->pt.base, \ | |
186 | (void)0))) | |
187 | ||
188 | struct page *__px_page(struct drm_i915_gem_object *p); | |
189 | dma_addr_t __px_dma(struct drm_i915_gem_object *p); | |
190 | #define px_dma(px) (__px_dma(px_base(px))) | |
2c86e55d | 191 | |
529b9ec8 MA |
192 | void *__px_vaddr(struct drm_i915_gem_object *p); |
193 | #define px_vaddr(px) (__px_vaddr(px_base(px))) | |
194 | ||
2c86e55d MA |
195 | #define px_pt(px) \ |
196 | __px_choose_expr(px, struct i915_page_table *, __x, \ | |
197 | __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \ | |
198 | (void)0)) | |
199 | #define px_used(px) (&px_pt(px)->used) | |
200 | ||
cd0452aa CW |
201 | struct i915_vm_pt_stash { |
202 | /* preallocated chains of page tables/directories */ | |
203 | struct i915_page_table *pt[2]; | |
2cff4b9e MA |
204 | /* |
205 | * Optionally override the alignment/size of the physical page that | |
206 | * contains each PT. If not set defaults back to the usual | |
207 | * I915_GTT_PAGE_SIZE_4K. This does not influence the other paging | |
208 | * structures. MUST be a power-of-two. ONLY applicable on discrete | |
209 | * platforms. | |
210 | */ | |
211 | int pt_sz; | |
cd0452aa CW |
212 | }; |
213 | ||
2c86e55d MA |
214 | struct i915_vma_ops { |
215 | /* Map an object into an address space with the given cache flags. */ | |
cd0452aa CW |
216 | void (*bind_vma)(struct i915_address_space *vm, |
217 | struct i915_vm_pt_stash *stash, | |
39a2bd34 | 218 | struct i915_vma_resource *vma_res, |
cd0452aa CW |
219 | enum i915_cache_level cache_level, |
220 | u32 flags); | |
2c86e55d MA |
221 | /* |
222 | * Unmap an object from an address space. This usually consists of | |
223 | * setting the valid PTE entries to a reserved scratch page. | |
224 | */ | |
12b07256 | 225 | void (*unbind_vma)(struct i915_address_space *vm, |
39a2bd34 TH |
226 | struct i915_vma_resource *vma_res); |
227 | ||
2c86e55d MA |
228 | }; |
229 | ||
2c86e55d MA |
230 | struct i915_address_space { |
231 | struct kref ref; | |
dcc5d820 | 232 | struct work_struct release_work; |
2c86e55d MA |
233 | |
234 | struct drm_mm mm; | |
235 | struct intel_gt *gt; | |
236 | struct drm_i915_private *i915; | |
237 | struct device *dma; | |
2c86e55d MA |
238 | u64 total; /* size addr space maps (ex. 2GB for ggtt) */ |
239 | u64 reserved; /* size addr space reserved */ | |
87bd701e | 240 | u64 min_alignment[INTEL_MEMORY_STOLEN_LOCAL + 1]; |
2c86e55d MA |
241 | |
242 | unsigned int bind_async_flags; | |
243 | ||
2c86e55d | 244 | struct mutex mutex; /* protects vma and our lists */ |
4d8151ae TH |
245 | |
246 | struct kref resv_ref; /* kref to keep the reservation lock alive. */ | |
247 | struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */ | |
2c86e55d MA |
248 | #define VM_CLASS_GGTT 0 |
249 | #define VM_CLASS_PPGTT 1 | |
33e7a975 | 250 | #define VM_CLASS_DPT 2 |
2c86e55d | 251 | |
89351925 | 252 | struct drm_i915_gem_object *scratch[4]; |
2c86e55d MA |
253 | /** |
254 | * List of vma currently bound. | |
255 | */ | |
256 | struct list_head bound_list; | |
257 | ||
e1a7ab4f TH |
258 | /** |
259 | * List of vmas not yet bound or evicted. | |
260 | */ | |
261 | struct list_head unbound_list; | |
262 | ||
2c86e55d MA |
263 | /* Global GTT */ |
264 | bool is_ggtt:1; | |
265 | ||
33e7a975 VS |
266 | /* Display page table */ |
267 | bool is_dpt:1; | |
268 | ||
2c86e55d MA |
269 | /* Some systems support read-only mappings for GGTT and/or PPGTT */ |
270 | bool has_read_only:1; | |
271 | ||
e1a7ab4f TH |
272 | /* Skip pte rewrite on unbind for suspend. Protected by @mutex */ |
273 | bool skip_pte_rewrite:1; | |
274 | ||
cd0452aa CW |
275 | u8 top; |
276 | u8 pd_shift; | |
277 | u8 scratch_order; | |
278 | ||
a259cc14 TH |
279 | /* Flags used when creating page-table objects for this vm */ |
280 | unsigned long lmem_pt_obj_flags; | |
281 | ||
2f6b90da TH |
282 | /* Interval tree for pending unbind vma resources */ |
283 | struct rb_root_cached pending_unbind; | |
284 | ||
89351925 CW |
285 | struct drm_i915_gem_object * |
286 | (*alloc_pt_dma)(struct i915_address_space *vm, int sz); | |
fef53be0 MA |
287 | struct drm_i915_gem_object * |
288 | (*alloc_scratch_dma)(struct i915_address_space *vm, int sz); | |
89351925 | 289 | |
2c86e55d MA |
290 | u64 (*pte_encode)(dma_addr_t addr, |
291 | enum i915_cache_level level, | |
292 | u32 flags); /* Create a valid PTE */ | |
293 | #define PTE_READ_ONLY BIT(0) | |
11724eea | 294 | #define PTE_LM BIT(1) |
2c86e55d | 295 | |
cd0452aa CW |
296 | void (*allocate_va_range)(struct i915_address_space *vm, |
297 | struct i915_vm_pt_stash *stash, | |
298 | u64 start, u64 length); | |
2c86e55d MA |
299 | void (*clear_range)(struct i915_address_space *vm, |
300 | u64 start, u64 length); | |
b288d740 AH |
301 | void (*scratch_range)(struct i915_address_space *vm, |
302 | u64 start, u64 length); | |
2c86e55d MA |
303 | void (*insert_page)(struct i915_address_space *vm, |
304 | dma_addr_t addr, | |
305 | u64 offset, | |
306 | enum i915_cache_level cache_level, | |
307 | u32 flags); | |
308 | void (*insert_entries)(struct i915_address_space *vm, | |
39a2bd34 | 309 | struct i915_vma_resource *vma_res, |
2c86e55d MA |
310 | enum i915_cache_level cache_level, |
311 | u32 flags); | |
a0696856 ND |
312 | void (*raw_insert_page)(struct i915_address_space *vm, |
313 | dma_addr_t addr, | |
314 | u64 offset, | |
315 | enum i915_cache_level cache_level, | |
316 | u32 flags); | |
317 | void (*raw_insert_entries)(struct i915_address_space *vm, | |
318 | struct i915_vma_resource *vma_res, | |
319 | enum i915_cache_level cache_level, | |
320 | u32 flags); | |
2c86e55d MA |
321 | void (*cleanup)(struct i915_address_space *vm); |
322 | ||
3607e1e9 CW |
323 | void (*foreach)(struct i915_address_space *vm, |
324 | u64 start, u64 length, | |
325 | void (*fn)(struct i915_address_space *vm, | |
326 | struct i915_page_table *pt, | |
327 | void *data), | |
328 | void *data); | |
329 | ||
2c86e55d MA |
330 | struct i915_vma_ops vma_ops; |
331 | ||
332 | I915_SELFTEST_DECLARE(struct fault_attr fault_attr); | |
333 | I915_SELFTEST_DECLARE(bool scrub_64K); | |
334 | }; | |
335 | ||
336 | /* | |
337 | * The Graphics Translation Table is the way in which GEN hardware translates a | |
338 | * Graphics Virtual Address into a Physical Address. In addition to the normal | |
339 | * collateral associated with any va->pa translations GEN hardware also has a | |
340 | * portion of the GTT which can be mapped by the CPU and remain both coherent | |
341 | * and correct (in cases like swizzling). That region is referred to as GMADR in | |
342 | * the spec. | |
343 | */ | |
344 | struct i915_ggtt { | |
345 | struct i915_address_space vm; | |
346 | ||
347 | struct io_mapping iomap; /* Mapping to our CPU mappable region */ | |
348 | struct resource gmadr; /* GMADR resource */ | |
349 | resource_size_t mappable_end; /* End offset that we can CPU map */ | |
350 | ||
351 | /** "Graphics Stolen Memory" holds the global PTEs */ | |
352 | void __iomem *gsm; | |
353 | void (*invalidate)(struct i915_ggtt *ggtt); | |
354 | ||
355 | /** PPGTT used for aliasing the PPGTT with the GTT */ | |
356 | struct i915_ppgtt *alias; | |
357 | ||
358 | bool do_idle_maps; | |
359 | ||
360 | int mtrr; | |
361 | ||
362 | /** Bit 6 swizzling required for X tiling */ | |
363 | u32 bit_6_swizzle_x; | |
364 | /** Bit 6 swizzling required for Y tiling */ | |
365 | u32 bit_6_swizzle_y; | |
366 | ||
367 | u32 pin_bias; | |
368 | ||
369 | unsigned int num_fences; | |
0b6bc81d | 370 | struct i915_fence_reg *fence_regs; |
2c86e55d MA |
371 | struct list_head fence_list; |
372 | ||
373 | /** | |
374 | * List of all objects in gtt_space, currently mmaped by userspace. | |
375 | * All objects within this list must also be on bound_list. | |
376 | */ | |
377 | struct list_head userfault_list; | |
378 | ||
742379c0 | 379 | struct mutex error_mutex; |
2c86e55d MA |
380 | struct drm_mm_node error_capture; |
381 | struct drm_mm_node uc_fw; | |
0f857158 AI |
382 | |
383 | /** List of GTs mapping this GGTT */ | |
384 | struct list_head gt_list; | |
2c86e55d MA |
385 | }; |
386 | ||
387 | struct i915_ppgtt { | |
388 | struct i915_address_space vm; | |
389 | ||
390 | struct i915_page_directory *pd; | |
391 | }; | |
392 | ||
393 | #define i915_is_ggtt(vm) ((vm)->is_ggtt) | |
33e7a975 | 394 | #define i915_is_dpt(vm) ((vm)->is_dpt) |
74862d4c | 395 | #define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm)) |
2c86e55d | 396 | |
a7f46d5b TU |
397 | bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915); |
398 | ||
26ad4f8b ML |
399 | int __must_check |
400 | i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww); | |
401 | ||
2c86e55d MA |
402 | static inline bool |
403 | i915_vm_is_4lvl(const struct i915_address_space *vm) | |
404 | { | |
405 | return (vm->total - 1) >> 32; | |
406 | } | |
407 | ||
408 | static inline bool | |
409 | i915_vm_has_scratch_64K(struct i915_address_space *vm) | |
410 | { | |
411 | return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K); | |
412 | } | |
413 | ||
87bd701e MA |
414 | static inline u64 i915_vm_min_alignment(struct i915_address_space *vm, |
415 | enum intel_memory_type type) | |
416 | { | |
417 | /* avoid INTEL_MEMORY_MOCK overflow */ | |
418 | if ((int)type >= ARRAY_SIZE(vm->min_alignment)) | |
419 | type = INTEL_MEMORY_SYSTEM; | |
420 | ||
421 | return vm->min_alignment[type]; | |
422 | } | |
423 | ||
424 | static inline u64 i915_vm_obj_min_alignment(struct i915_address_space *vm, | |
425 | struct drm_i915_gem_object *obj) | |
426 | { | |
427 | struct intel_memory_region *mr = READ_ONCE(obj->mm.region); | |
428 | enum intel_memory_type type = mr ? mr->type : INTEL_MEMORY_SYSTEM; | |
429 | ||
430 | return i915_vm_min_alignment(vm, type); | |
431 | } | |
432 | ||
2c86e55d MA |
433 | static inline bool |
434 | i915_vm_has_cache_coloring(struct i915_address_space *vm) | |
435 | { | |
436 | return i915_is_ggtt(vm) && vm->mm.color_adjust; | |
437 | } | |
438 | ||
439 | static inline struct i915_ggtt * | |
440 | i915_vm_to_ggtt(struct i915_address_space *vm) | |
441 | { | |
442 | BUILD_BUG_ON(offsetof(struct i915_ggtt, vm)); | |
443 | GEM_BUG_ON(!i915_is_ggtt(vm)); | |
444 | return container_of(vm, struct i915_ggtt, vm); | |
445 | } | |
446 | ||
447 | static inline struct i915_ppgtt * | |
448 | i915_vm_to_ppgtt(struct i915_address_space *vm) | |
449 | { | |
450 | BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm)); | |
74862d4c | 451 | GEM_BUG_ON(i915_is_ggtt_or_dpt(vm)); |
2c86e55d MA |
452 | return container_of(vm, struct i915_ppgtt, vm); |
453 | } | |
454 | ||
455 | static inline struct i915_address_space * | |
456 | i915_vm_get(struct i915_address_space *vm) | |
457 | { | |
458 | kref_get(&vm->ref); | |
459 | return vm; | |
460 | } | |
461 | ||
e1a7ab4f TH |
462 | static inline struct i915_address_space * |
463 | i915_vm_tryget(struct i915_address_space *vm) | |
464 | { | |
465 | return kref_get_unless_zero(&vm->ref) ? vm : NULL; | |
466 | } | |
467 | ||
468 | static inline void assert_vm_alive(struct i915_address_space *vm) | |
469 | { | |
470 | GEM_BUG_ON(!kref_read(&vm->ref)); | |
471 | } | |
472 | ||
4d8151ae TH |
473 | /** |
474 | * i915_vm_resv_get - Obtain a reference on the vm's reservation lock | |
475 | * @vm: The vm whose reservation lock we want to share. | |
476 | * | |
477 | * Return: A pointer to the vm's reservation lock. | |
478 | */ | |
479 | static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm) | |
480 | { | |
481 | kref_get(&vm->resv_ref); | |
482 | return &vm->_resv; | |
483 | } | |
484 | ||
2c86e55d MA |
485 | void i915_vm_release(struct kref *kref); |
486 | ||
4d8151ae TH |
487 | void i915_vm_resv_release(struct kref *kref); |
488 | ||
2c86e55d MA |
489 | static inline void i915_vm_put(struct i915_address_space *vm) |
490 | { | |
491 | kref_put(&vm->ref, i915_vm_release); | |
492 | } | |
493 | ||
4d8151ae TH |
494 | /** |
495 | * i915_vm_resv_put - Release a reference on the vm's reservation lock | |
496 | * @resv: Pointer to a reservation lock obtained from i915_vm_resv_get() | |
497 | */ | |
498 | static inline void i915_vm_resv_put(struct i915_address_space *vm) | |
499 | { | |
500 | kref_put(&vm->resv_ref, i915_vm_resv_release); | |
501 | } | |
502 | ||
2c86e55d MA |
503 | void i915_address_space_init(struct i915_address_space *vm, int subclass); |
504 | void i915_address_space_fini(struct i915_address_space *vm); | |
505 | ||
506 | static inline u32 i915_pte_index(u64 address, unsigned int pde_shift) | |
507 | { | |
508 | const u32 mask = NUM_PTE(pde_shift) - 1; | |
509 | ||
510 | return (address >> PAGE_SHIFT) & mask; | |
511 | } | |
512 | ||
513 | /* | |
514 | * Helper to counts the number of PTEs within the given length. This count | |
515 | * does not cross a page table boundary, so the max value would be | |
516 | * GEN6_PTES for GEN6, and GEN8_PTES for GEN8. | |
517 | */ | |
518 | static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift) | |
519 | { | |
520 | const u64 mask = ~((1ULL << pde_shift) - 1); | |
521 | u64 end; | |
522 | ||
523 | GEM_BUG_ON(length == 0); | |
524 | GEM_BUG_ON(offset_in_page(addr | length)); | |
525 | ||
526 | end = addr + length; | |
527 | ||
528 | if ((addr & mask) != (end & mask)) | |
529 | return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift); | |
530 | ||
531 | return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift); | |
532 | } | |
533 | ||
534 | static inline u32 i915_pde_index(u64 addr, u32 shift) | |
535 | { | |
536 | return (addr >> shift) & I915_PDE_MASK; | |
537 | } | |
538 | ||
539 | static inline struct i915_page_table * | |
540 | i915_pt_entry(const struct i915_page_directory * const pd, | |
541 | const unsigned short n) | |
542 | { | |
543 | return pd->entry[n]; | |
544 | } | |
545 | ||
546 | static inline struct i915_page_directory * | |
547 | i915_pd_entry(const struct i915_page_directory * const pdp, | |
548 | const unsigned short n) | |
549 | { | |
550 | return pdp->entry[n]; | |
551 | } | |
552 | ||
553 | static inline dma_addr_t | |
554 | i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) | |
555 | { | |
89351925 | 556 | struct i915_page_table *pt = ppgtt->pd->entry[n]; |
2c86e55d | 557 | |
89351925 | 558 | return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]); |
2c86e55d MA |
559 | } |
560 | ||
a259cc14 TH |
561 | void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt, |
562 | unsigned long lmem_pt_obj_flags); | |
7a5c9223 | 563 | void intel_ggtt_bind_vma(struct i915_address_space *vm, |
9ce07d94 LDM |
564 | struct i915_vm_pt_stash *stash, |
565 | struct i915_vma_resource *vma_res, | |
566 | enum i915_cache_level cache_level, | |
567 | u32 flags); | |
7a5c9223 | 568 | void intel_ggtt_unbind_vma(struct i915_address_space *vm, |
9ce07d94 | 569 | struct i915_vma_resource *vma_res); |
7a5c9223 | 570 | |
2c86e55d MA |
571 | int i915_ggtt_probe_hw(struct drm_i915_private *i915); |
572 | int i915_ggtt_init_hw(struct drm_i915_private *i915); | |
573 | int i915_ggtt_enable_hw(struct drm_i915_private *i915); | |
2c86e55d MA |
574 | int i915_init_ggtt(struct drm_i915_private *i915); |
575 | void i915_ggtt_driver_release(struct drm_i915_private *i915); | |
4d8151ae | 576 | void i915_ggtt_driver_late_release(struct drm_i915_private *i915); |
0f857158 | 577 | struct i915_ggtt *i915_ggtt_create(struct drm_i915_private *i915); |
2c86e55d MA |
578 | |
579 | static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt) | |
580 | { | |
581 | return ggtt->mappable_end > 0; | |
582 | } | |
583 | ||
584 | int i915_ppgtt_init_hw(struct intel_gt *gt); | |
585 | ||
a259cc14 TH |
586 | struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt, |
587 | unsigned long lmem_pt_obj_flags); | |
2c86e55d | 588 | |
8d2f683f ID |
589 | void i915_ggtt_suspend_vm(struct i915_address_space *vm); |
590 | bool i915_ggtt_resume_vm(struct i915_address_space *vm); | |
e986209c CW |
591 | void i915_ggtt_suspend(struct i915_ggtt *gtt); |
592 | void i915_ggtt_resume(struct i915_ggtt *ggtt); | |
2c86e55d | 593 | |
2c86e55d | 594 | void |
89351925 | 595 | fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count); |
2c86e55d MA |
596 | |
597 | #define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64)) | |
598 | #define fill32_px(px, v) do { \ | |
599 | u64 v__ = lower_32_bits(v); \ | |
600 | fill_px((px), v__ << 32 | v__); \ | |
601 | } while (0) | |
602 | ||
89351925 | 603 | int setup_scratch_page(struct i915_address_space *vm); |
2c86e55d MA |
604 | void free_scratch(struct i915_address_space *vm); |
605 | ||
89351925 | 606 | struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz); |
6aed5673 | 607 | struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz); |
2cff4b9e | 608 | struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz); |
2c86e55d | 609 | struct i915_page_directory *alloc_pd(struct i915_address_space *vm); |
82adf901 | 610 | struct i915_page_directory *__alloc_pd(int npde); |
2c86e55d | 611 | |
529b9ec8 MA |
612 | int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj); |
613 | int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj); | |
2c86e55d | 614 | |
82adf901 CW |
615 | void free_px(struct i915_address_space *vm, |
616 | struct i915_page_table *pt, int lvl); | |
617 | #define free_pt(vm, px) free_px(vm, px, 0) | |
618 | #define free_pd(vm, px) free_px(vm, px_pt(px), 1) | |
2c86e55d MA |
619 | |
620 | void | |
621 | __set_pd_entry(struct i915_page_directory * const pd, | |
622 | const unsigned short idx, | |
89351925 | 623 | struct i915_page_table *pt, |
2c86e55d MA |
624 | u64 (*encode)(const dma_addr_t, const enum i915_cache_level)); |
625 | ||
626 | #define set_pd_entry(pd, idx, to) \ | |
89351925 | 627 | __set_pd_entry((pd), (idx), px_pt(to), gen8_pde_encode) |
2c86e55d MA |
628 | |
629 | void | |
630 | clear_pd_entry(struct i915_page_directory * const pd, | |
631 | const unsigned short idx, | |
89351925 | 632 | const struct drm_i915_gem_object * const scratch); |
2c86e55d MA |
633 | |
634 | bool | |
635 | release_pd_entry(struct i915_page_directory * const pd, | |
636 | const unsigned short idx, | |
637 | struct i915_page_table * const pt, | |
89351925 | 638 | const struct drm_i915_gem_object * const scratch); |
2c86e55d MA |
639 | void gen6_ggtt_invalidate(struct i915_ggtt *ggtt); |
640 | ||
cd0452aa CW |
641 | void ppgtt_bind_vma(struct i915_address_space *vm, |
642 | struct i915_vm_pt_stash *stash, | |
39a2bd34 | 643 | struct i915_vma_resource *vma_res, |
cd0452aa CW |
644 | enum i915_cache_level cache_level, |
645 | u32 flags); | |
12b07256 | 646 | void ppgtt_unbind_vma(struct i915_address_space *vm, |
39a2bd34 | 647 | struct i915_vma_resource *vma_res); |
12b07256 | 648 | |
2c86e55d MA |
649 | void gtt_write_workarounds(struct intel_gt *gt); |
650 | ||
77fa9efc | 651 | void setup_private_pat(struct intel_gt *gt); |
2c86e55d | 652 | |
cd0452aa CW |
653 | int i915_vm_alloc_pt_stash(struct i915_address_space *vm, |
654 | struct i915_vm_pt_stash *stash, | |
655 | u64 size); | |
529b9ec8 | 656 | int i915_vm_map_pt_stash(struct i915_address_space *vm, |
89351925 | 657 | struct i915_vm_pt_stash *stash); |
cd0452aa CW |
658 | void i915_vm_free_pt_stash(struct i915_address_space *vm, |
659 | struct i915_vm_pt_stash *stash); | |
660 | ||
a4d86249 CW |
661 | struct i915_vma * |
662 | __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size); | |
663 | ||
2a665968 ML |
664 | struct i915_vma * |
665 | __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size); | |
666 | ||
2c86e55d MA |
667 | static inline struct sgt_dma { |
668 | struct scatterlist *sg; | |
669 | dma_addr_t dma, max; | |
39a2bd34 TH |
670 | } sgt_dma(struct i915_vma_resource *vma_res) { |
671 | struct scatterlist *sg = vma_res->bi.pages->sgl; | |
2c86e55d MA |
672 | dma_addr_t addr = sg_dma_address(sg); |
673 | ||
8a473dba | 674 | return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) }; |
2c86e55d MA |
675 | } |
676 | ||
677 | #endif |