2 * SPDX-License-Identifier: MIT
4 * Copyright © 2016 Intel Corporation
7 #ifndef __I915_GEM_OBJECT_TYPES_H__
8 #define __I915_GEM_OBJECT_TYPES_H__
10 #include <linux/mmu_notifier.h>
12 #include <drm/drm_gem.h>
13 #include <drm/ttm/ttm_bo_api.h>
14 #include <uapi/drm/i915_drm.h>
16 #include "i915_active.h"
17 #include "i915_selftest.h"
19 struct drm_i915_gem_object;
20 struct intel_fronbuffer;
21 struct intel_memory_region;
24 * struct i915_lut_handle tracks the fast lookups from handle to vma used
25 * for execbuf. Although we use a radixtree for that mapping, in order to
26 * remove them as the object or context is closed, we need a secondary list
27 * and a translation entry (i915_lut_handle).
29 struct i915_lut_handle {
30 struct list_head obj_link;
31 struct i915_gem_context *ctx;
35 struct drm_i915_gem_object_ops {
37 #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
38 /* Skip the shrinker management in set_pages/unset_pages */
39 #define I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST BIT(2)
40 #define I915_GEM_OBJECT_IS_PROXY BIT(3)
41 #define I915_GEM_OBJECT_NO_MMAP BIT(4)
43 /* Interface between the GEM object and its backing storage.
44 * get_pages() is called once prior to the use of the associated set
45 * of pages before to binding them into the GTT, and put_pages() is
46 * called after we no longer need them. As we expect there to be
47 * associated cost with migrating pages between the backing storage
48 * and making them available for the GPU (e.g. clflush), we may hold
49 * onto the pages after they are no longer referenced by the GPU
50 * in case they may be used again shortly (for example migrating the
51 * pages to a different memory domain within the GTT). put_pages()
52 * will therefore most likely be called when the object itself is
53 * being released or under memory pressure (where we attempt to
54 * reap pages for the shrinker).
56 int (*get_pages)(struct drm_i915_gem_object *obj);
57 void (*put_pages)(struct drm_i915_gem_object *obj,
58 struct sg_table *pages);
59 int (*truncate)(struct drm_i915_gem_object *obj);
60 void (*writeback)(struct drm_i915_gem_object *obj);
61 int (*shrinker_release_pages)(struct drm_i915_gem_object *obj,
63 bool should_writeback);
65 int (*pread)(struct drm_i915_gem_object *obj,
66 const struct drm_i915_gem_pread *arg);
67 int (*pwrite)(struct drm_i915_gem_object *obj,
68 const struct drm_i915_gem_pwrite *arg);
69 u64 (*mmap_offset)(struct drm_i915_gem_object *obj);
70 void (*unmap_virtual)(struct drm_i915_gem_object *obj);
72 int (*dmabuf_export)(struct drm_i915_gem_object *obj);
75 * adjust_lru - notify that the madvise value was updated
76 * @obj: The gem object
78 * The madvise value may have been updated, or object was recently
79 * referenced so act accordingly (Perhaps changing an LRU list etc).
81 void (*adjust_lru)(struct drm_i915_gem_object *obj);
84 * delayed_free - Override the default delayed free implementation
86 void (*delayed_free)(struct drm_i915_gem_object *obj);
89 * migrate - Migrate object to a different region either for
90 * pinning or for as long as the object lock is held.
92 int (*migrate)(struct drm_i915_gem_object *obj,
93 struct intel_memory_region *mr);
95 void (*release)(struct drm_i915_gem_object *obj);
97 const struct vm_operations_struct *mmap_ops;
98 const char *name; /* friendly name for debug, e.g. lockdep classes */
102 * enum i915_cache_level - The supported GTT caching values for system memory
105 * These translate to some special GTT PTE bits when binding pages into some
106 * address space. It also determines whether an object, or rather its pages are
107 * coherent with the GPU, when also reading or writing through the CPU cache
110 * Userspace can also control this through struct drm_i915_gem_caching.
112 enum i915_cache_level {
116 * GPU access is not coherent with the CPU cache. If the cache is dirty
117 * and we need the underlying pages to be coherent with some later GPU
118 * access then we need to manually flush the pages.
120 * On shared LLC platforms reads and writes through the CPU cache are
121 * still coherent even with this setting. See also
122 * &drm_i915_gem_object.cache_coherent for more details. Due to this we
123 * should only ever use uncached for scanout surfaces, otherwise we end
124 * up over-flushing in some places.
126 * This is the default on non-LLC platforms.
132 * GPU access is coherent with the CPU cache. If the cache is dirty,
133 * then the GPU will ensure that access remains coherent, when both
134 * reading and writing through the CPU cache. GPU writes can dirty the
137 * Not used for scanout surfaces.
139 * Applies to both platforms with shared LLC(HAS_LLC), and snooping
140 * based platforms(HAS_SNOOP).
142 * This is the default on shared LLC platforms. The only exception is
143 * scanout objects, where the display engine is not coherent with the
144 * CPU cache. For such objects I915_CACHE_NONE or I915_CACHE_WT is
145 * automatically applied by the kernel in pin_for_display, if userspace
146 * has not done so already.
150 * @I915_CACHE_L3_LLC:
152 * Explicitly enable the Gfx L3 cache, with coherent LLC.
154 * The Gfx L3 sits between the domain specific caches, e.g
155 * sampler/render caches, and the larger LLC. LLC is coherent with the
156 * GPU, but L3 is only visible to the GPU, so likely needs to be flushed
157 * when the workload completes.
159 * Not used for scanout surfaces.
161 * Only exposed on some gen7 + GGTT. More recent hardware has dropped
162 * this explicit setting, where it should now be enabled by default.
168 * Write-through. Used for scanout surfaces.
170 * The GPU can utilise the caches, while still having the display engine
171 * be coherent with GPU writes, as a result we don't need to flush the
172 * CPU caches when moving out of the render domain. This is the default
173 * setting chosen by the kernel, if supported by the HW, otherwise we
174 * fallback to I915_CACHE_NONE. On the CPU side writes through the CPU
175 * cache still need to be flushed, to remain coherent with the display
184 #define I915_MAP_OVERRIDE BIT(31)
185 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
186 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
189 enum i915_mmap_type {
190 I915_MMAP_TYPE_GTT = 0,
194 I915_MMAP_TYPE_FIXED,
197 struct i915_mmap_offset {
198 struct drm_vma_offset_node vma_node;
199 struct drm_i915_gem_object *obj;
200 enum i915_mmap_type mmap_type;
202 struct rb_node offset;
205 struct i915_gem_object_page_iter {
206 struct scatterlist *sg_pos;
207 unsigned int sg_idx; /* in pages, but 32bit eek! */
209 struct radix_tree_root radix;
210 struct mutex lock; /* protects this cache */
213 struct drm_i915_gem_object {
215 * We might have reason to revisit the below since it wastes
216 * a lot of space for non-ttm gem objects.
217 * In any case, always use the accessors for the ttm_buffer_object
221 struct drm_gem_object base;
222 struct ttm_buffer_object __do_not_access;
225 const struct drm_i915_gem_object_ops *ops;
229 * @vma.lock: protect the list/tree of vmas
234 * @vma.list: List of VMAs backed by this object
236 * The VMA on this list are ordered by type, all GGTT vma are
237 * placed at the head and all ppGTT vma are placed at the tail.
238 * The different types of GGTT vma are unordered between
239 * themselves, use the @vma.tree (which has a defined order
240 * between all VMA) to quickly find an exact match.
242 struct list_head list;
245 * @vma.tree: Ordered tree of VMAs backed by this object
247 * All VMA created for this object are placed in the @vma.tree
248 * for fast retrieval via a binary search in
249 * i915_vma_instance(). They are also added to @vma.list for
256 * @lut_list: List of vma lookup entries in use for this object.
258 * If this object is closed, we need to remove all of its VMA from
259 * the fast lookup index in associated contexts; @lut_list provides
260 * this translation from object to context->handles_vma.
262 struct list_head lut_list;
263 spinlock_t lut_lock; /* guards lut_list */
266 * @obj_link: Link into @i915_gem_ww_ctx.obj_list
268 * When we lock this object through i915_gem_object_lock() with a
269 * context, we add it to the list to ensure we can unlock everything
270 * when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called.
272 struct list_head obj_link;
274 * @shared_resv_from: The object shares the resv from this vm.
276 struct i915_address_space *shares_resv_from;
280 struct llist_node freed;
284 * Whether the object is currently in the GGTT mmap.
286 unsigned int userfault_count;
287 struct list_head userfault_link;
290 spinlock_t lock; /* Protects access to mmo offsets */
291 struct rb_root offsets;
294 I915_SELFTEST_DECLARE(struct list_head st_link);
297 #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
298 #define I915_BO_ALLOC_VOLATILE BIT(1)
299 #define I915_BO_ALLOC_CPU_CLEAR BIT(2)
300 #define I915_BO_ALLOC_USER BIT(3)
301 /* Object is allowed to lose its contents on suspend / resume, even if pinned */
302 #define I915_BO_ALLOC_PM_VOLATILE BIT(4)
303 /* Object needs to be restored early using memcpy during resume */
304 #define I915_BO_ALLOC_PM_EARLY BIT(5)
305 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
306 I915_BO_ALLOC_VOLATILE | \
307 I915_BO_ALLOC_CPU_CLEAR | \
308 I915_BO_ALLOC_USER | \
309 I915_BO_ALLOC_PM_VOLATILE | \
310 I915_BO_ALLOC_PM_EARLY)
311 #define I915_BO_READONLY BIT(6)
312 #define I915_TILING_QUIRK_BIT 7 /* unknown swizzling; do not release! */
313 #define I915_BO_PROTECTED BIT(8)
314 #define I915_BO_WAS_BOUND_BIT 9
316 * @mem_flags - Mutable placement-related flags
318 * These are flags that indicate specifics of the memory region
319 * the object is currently in. As such they are only stable
320 * either under the object lock or if the object is pinned.
322 unsigned int mem_flags;
323 #define I915_BO_FLAG_STRUCT_PAGE BIT(0) /* Object backed by struct pages */
324 #define I915_BO_FLAG_IOMEM BIT(1) /* Object backed by IO memory */
326 * @cache_level: The desired GTT caching level.
328 * See enum i915_cache_level for possible values, along with what
331 unsigned int cache_level:3;
335 * Track whether the pages are coherent with the GPU if reading or
336 * writing through the CPU caches. The largely depends on the
337 * @cache_level setting.
339 * On platforms which don't have the shared LLC(HAS_SNOOP), like on Atom
340 * platforms, coherency must be explicitly requested with some special
341 * GTT caching bits(see enum i915_cache_level). When enabling coherency
342 * it does come at a performance and power cost on such platforms. On
343 * the flip side the kernel does not need to manually flush any buffers
344 * which need to be coherent with the GPU, if the object is not coherent
345 * i.e @cache_coherent is zero.
347 * On platforms that share the LLC with the CPU(HAS_LLC), all GT memory
348 * access will automatically snoop the CPU caches(even with CACHE_NONE).
349 * The one exception is when dealing with the display engine, like with
350 * scanout surfaces. To handle this the kernel will always flush the
351 * surface out of the CPU caches when preparing it for scanout. Also
352 * note that since scanout surfaces are only ever read by the display
353 * engine we only need to care about flushing any writes through the CPU
354 * cache, reads on the other hand will always be coherent.
356 * Something strange here is why @cache_coherent is not a simple
357 * boolean, i.e coherent vs non-coherent. The reasoning for this is back
358 * to the display engine not being fully coherent. As a result scanout
359 * surfaces will either be marked as I915_CACHE_NONE or I915_CACHE_WT.
360 * In the case of seeing I915_CACHE_NONE the kernel makes the assumption
361 * that this is likely a scanout surface, and will set @cache_coherent
362 * as only I915_BO_CACHE_COHERENT_FOR_READ, on platforms with the shared
363 * LLC. The kernel uses this to always flush writes through the CPU
364 * cache as early as possible, where it can, in effect keeping
365 * @cache_dirty clean, so we can potentially avoid stalling when
366 * flushing the surface just before doing the scanout. This does mean
367 * we might unnecessarily flush non-scanout objects in some places, but
368 * the default assumption is that all normal objects should be using
369 * I915_CACHE_LLC, at least on platforms with the shared LLC.
373 * I915_BO_CACHE_COHERENT_FOR_READ:
375 * On shared LLC platforms, we use this for special scanout surfaces,
376 * where the display engine is not coherent with the CPU cache. As such
377 * we need to ensure we flush any writes before doing the scanout. As an
378 * optimisation we try to flush any writes as early as possible to avoid
381 * Thus for scanout surfaces using I915_CACHE_NONE, on shared LLC
384 * cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ
386 * While for normal objects that are fully coherent, including special
387 * scanout surfaces marked as I915_CACHE_WT, we use:
389 * cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ |
390 * I915_BO_CACHE_COHERENT_FOR_WRITE
392 * And then for objects that are not coherent at all we use:
396 * I915_BO_CACHE_COHERENT_FOR_WRITE:
398 * When writing through the CPU cache, the GPU is still coherent. Note
399 * that this also implies I915_BO_CACHE_COHERENT_FOR_READ.
401 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
402 #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
403 unsigned int cache_coherent:2;
408 * Track if we are we dirty with writes through the CPU cache for this
409 * object. As a result reading directly from main memory might yield
412 * This also ties into whether the kernel is tracking the object as
413 * coherent with the GPU, as per @cache_coherent, as it determines if
414 * flushing might be needed at various points.
416 * Another part of @cache_dirty is managing flushing when first
417 * acquiring the pages for system memory, at this point the pages are
418 * considered foreign, so the default assumption is that the cache is
419 * dirty, for example the page zeroing done by the kernel might leave
420 * writes though the CPU cache, or swapping-in, while the actual data in
421 * main memory is potentially stale. Note that this is a potential
422 * security issue when dealing with userspace objects and zeroing. Now,
423 * whether we actually need apply the big sledgehammer of flushing all
424 * the pages on acquire depends on if @cache_coherent is marked as
425 * I915_BO_CACHE_COHERENT_FOR_WRITE, i.e that the GPU will be coherent
426 * for both reads and writes though the CPU cache.
428 * Note that on shared LLC platforms we still apply the heavy flush for
429 * I915_CACHE_NONE objects, under the assumption that this is going to
430 * be used for scanout.
432 * Update: On some hardware there is now also the 'Bypass LLC' MOCS
433 * entry, which defeats our @cache_coherent tracking, since userspace
434 * can freely bypass the CPU cache when touching the pages with the GPU,
435 * where the kernel is completely unaware. On such platform we need
436 * apply the sledgehammer-on-acquire regardless of the @cache_coherent.
438 * Special care is taken on non-LLC platforms, to prevent potential
439 * information leak. The driver currently ensures:
441 * 1. All userspace objects, by default, have @cache_level set as
442 * I915_CACHE_NONE. The only exception is userptr objects, where we
443 * instead force I915_CACHE_LLC, but we also don't allow userspace to
444 * ever change the @cache_level for such objects. Another special case
445 * is dma-buf, which doesn't rely on @cache_dirty, but there we
446 * always do a forced flush when acquiring the pages, if there is a
447 * chance that the pages can be read directly from main memory with
450 * 2. All I915_CACHE_NONE objects have @cache_dirty initially true.
452 * 3. All swapped-out objects(i.e shmem) have @cache_dirty set to
455 * 4. The @cache_dirty is never freely reset before the initial
456 * flush, even if userspace adjusts the @cache_level through the
457 * i915_gem_set_caching_ioctl.
459 * 5. All @cache_dirty objects(including swapped-in) are initially
460 * flushed with a synchronous call to drm_clflush_sg in
461 * __i915_gem_object_set_pages. The @cache_dirty can be freely reset
462 * at this point. All further asynchronous clfushes are never security
463 * critical, i.e userspace is free to race against itself.
465 unsigned int cache_dirty:1;
468 * @read_domains: Read memory domains.
470 * These monitor which caches contain read/write data related to the
471 * object. When transitioning from one set of domains to another,
472 * the driver is called to ensure that caches are suitably flushed and
478 * @write_domain: Corresponding unique write memory domain.
482 struct intel_frontbuffer __rcu *frontbuffer;
484 /** Current tiling stride for the object, if it's tiled. */
485 unsigned int tiling_and_stride;
486 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
487 #define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
488 #define STRIDE_MASK (~TILING_MASK)
492 * Protects the pages and their use. Do not use directly, but
493 * instead go through the pin/unpin interfaces.
495 atomic_t pages_pin_count;
498 * @shrink_pin: Prevents the pages from being made visible to
499 * the shrinker, while the shrink_pin is non-zero. Most users
500 * should pretty much never have to care about this, outside of
501 * some special use cases.
503 * By default most objects will start out as visible to the
504 * shrinker(if I915_GEM_OBJECT_IS_SHRINKABLE) as soon as the
505 * backing pages are attached to the object, like in
506 * __i915_gem_object_set_pages(). They will then be removed the
507 * shrinker list once the pages are released.
509 * The @shrink_pin is incremented by calling
510 * i915_gem_object_make_unshrinkable(), which will also remove
511 * the object from the shrinker list, if the pin count was zero.
513 * Callers will then typically call
514 * i915_gem_object_make_shrinkable() or
515 * i915_gem_object_make_purgeable() to decrement the pin count,
516 * and make the pages visible again.
521 * @ttm_shrinkable: True when the object is using shmem pages
522 * underneath. Protected by the object lock.
527 * Priority list of potential placements for this object.
529 struct intel_memory_region **placements;
533 * Memory region for this object.
535 struct intel_memory_region *region;
538 * Memory manager resource allocated for this object. Only
539 * needed for the mock region.
541 struct ttm_resource *res;
544 * Element within memory_region->objects or region->purgeable
545 * if the object is marked as DONTNEED. Access is protected by
548 struct list_head region_link;
550 struct i915_refct_sgt *rsgt;
551 struct sg_table *pages;
554 struct i915_page_sizes {
556 * The sg mask of the pages sg_table. i.e the mask of
557 * of the lengths for each sg entry.
562 * The gtt page sizes we are allowed to use given the
563 * sg mask and the supported page sizes. This will
564 * express the smallest unit we can use for the whole
565 * object, as well as the larger sizes we may be able
566 * to use opportunistically.
571 * The actual gtt page size usage. Since we can have
572 * multiple vma associated with this object we need to
573 * prevent any trampling of state, hence a copy of this
574 * struct also lives in each vma, therefore the gtt
575 * value here should only be read/write through the vma.
580 I915_SELFTEST_DECLARE(unsigned int page_mask);
582 struct i915_gem_object_page_iter get_page;
583 struct i915_gem_object_page_iter get_dma_page;
586 * Element within i915->mm.shrink_list or i915->mm.purge_list,
587 * locked by i915->mm.obj_lock.
589 struct list_head link;
592 * Advice: are the backing pages purgeable?
597 * This is set if the object has been written to since the
598 * pages were last acquired.
604 struct i915_refct_sgt *cached_io_rsgt;
605 struct i915_gem_object_page_iter get_io_page;
606 struct drm_i915_gem_object *backup;
611 * Record which PXP key instance this object was created against (if
612 * any), so we can use it to determine if the encryption is valid by
613 * comparing against the current key instance.
615 u32 pxp_key_instance;
617 /** Record of address bit 17 of each page at last unbind. */
618 unsigned long *bit_17;
621 #ifdef CONFIG_MMU_NOTIFIER
622 struct i915_gem_userptr {
624 unsigned long notifier_seq;
626 struct mmu_interval_notifier notifier;
632 struct drm_mm_node *stolen;
634 unsigned long scratch;
641 static inline struct drm_i915_gem_object *
642 to_intel_bo(struct drm_gem_object *gem)
644 /* Assert that to_intel_bo(NULL) == NULL */
645 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
647 return container_of(gem, struct drm_i915_gem_object, base);