Merge tag 'batadv-next-pullrequest-20210408' of git://git.open-mesh.org/linux-merge
[linux-2.6-block.git] / drivers / gpu / drm / i915 / gem / i915_gem_object_types.h
CommitLineData
5e5d2e20
CW
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2016 Intel Corporation
5 */
6
7#ifndef __I915_GEM_OBJECT_TYPES_H__
8#define __I915_GEM_OBJECT_TYPES_H__
9
5e5d2e20 10#include <drm/drm_gem.h>
b1e3177b 11#include <uapi/drm/i915_drm.h>
5e5d2e20
CW
12
13#include "i915_active.h"
14#include "i915_selftest.h"
15
16struct drm_i915_gem_object;
8e7cb179 17struct intel_fronbuffer;
5e5d2e20
CW
18
19/*
20 * struct i915_lut_handle tracks the fast lookups from handle to vma used
21 * for execbuf. Although we use a radixtree for that mapping, in order to
22 * remove them as the object or context is closed, we need a secondary list
23 * and a translation entry (i915_lut_handle).
24 */
25struct i915_lut_handle {
26 struct list_head obj_link;
5e5d2e20
CW
27 struct i915_gem_context *ctx;
28 u32 handle;
29};
30
31struct drm_i915_gem_object_ops {
32 unsigned int flags;
33#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
01377a0d
AJ
34#define I915_GEM_OBJECT_HAS_IOMEM BIT(1)
35#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2)
36#define I915_GEM_OBJECT_IS_PROXY BIT(3)
f6c26b55 37#define I915_GEM_OBJECT_NO_MMAP BIT(4)
01377a0d 38#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(5)
5e5d2e20
CW
39
40 /* Interface between the GEM object and its backing storage.
41 * get_pages() is called once prior to the use of the associated set
42 * of pages before to binding them into the GTT, and put_pages() is
43 * called after we no longer need them. As we expect there to be
44 * associated cost with migrating pages between the backing storage
45 * and making them available for the GPU (e.g. clflush), we may hold
46 * onto the pages after they are no longer referenced by the GPU
47 * in case they may be used again shortly (for example migrating the
48 * pages to a different memory domain within the GTT). put_pages()
49 * will therefore most likely be called when the object itself is
50 * being released or under memory pressure (where we attempt to
51 * reap pages for the shrinker).
52 */
53 int (*get_pages)(struct drm_i915_gem_object *obj);
54 void (*put_pages)(struct drm_i915_gem_object *obj,
55 struct sg_table *pages);
f033428d
CW
56 void (*truncate)(struct drm_i915_gem_object *obj);
57 void (*writeback)(struct drm_i915_gem_object *obj);
5e5d2e20 58
0049b688
MA
59 int (*pread)(struct drm_i915_gem_object *obj,
60 const struct drm_i915_gem_pread *arg);
5e5d2e20
CW
61 int (*pwrite)(struct drm_i915_gem_object *obj,
62 const struct drm_i915_gem_pwrite *arg);
63
64 int (*dmabuf_export)(struct drm_i915_gem_object *obj);
65 void (*release)(struct drm_i915_gem_object *obj);
7d192daa
CW
66
67 const char *name; /* friendly name for debug, e.g. lockdep classes */
5e5d2e20
CW
68};
69
e2f4367a
MA
70enum i915_map_type {
71 I915_MAP_WB = 0,
72 I915_MAP_WC,
73#define I915_MAP_OVERRIDE BIT(31)
74 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
75 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
76};
77
cc662126
AJ
78enum i915_mmap_type {
79 I915_MMAP_TYPE_GTT = 0,
80 I915_MMAP_TYPE_WC,
81 I915_MMAP_TYPE_WB,
82 I915_MMAP_TYPE_UC,
83};
84
85struct i915_mmap_offset {
cc662126
AJ
86 struct drm_vma_offset_node vma_node;
87 struct drm_i915_gem_object *obj;
cc662126
AJ
88 enum i915_mmap_type mmap_type;
89
78655598 90 struct rb_node offset;
cc662126
AJ
91};
92
934941ed
TU
93struct i915_gem_object_page_iter {
94 struct scatterlist *sg_pos;
95 unsigned int sg_idx; /* in pages, but 32bit eek! */
96
97 struct radix_tree_root radix;
98 struct mutex lock; /* protects this cache */
99};
100
5e5d2e20
CW
101struct drm_i915_gem_object {
102 struct drm_gem_object base;
103
104 const struct drm_i915_gem_object_ops *ops;
105
106 struct {
107 /**
108 * @vma.lock: protect the list/tree of vmas
109 */
110 spinlock_t lock;
111
112 /**
113 * @vma.list: List of VMAs backed by this object
114 *
115 * The VMA on this list are ordered by type, all GGTT vma are
116 * placed at the head and all ppGTT vma are placed at the tail.
117 * The different types of GGTT vma are unordered between
118 * themselves, use the @vma.tree (which has a defined order
119 * between all VMA) to quickly find an exact match.
120 */
121 struct list_head list;
122
123 /**
124 * @vma.tree: Ordered tree of VMAs backed by this object
125 *
126 * All VMA created for this object are placed in the @vma.tree
127 * for fast retrieval via a binary search in
128 * i915_vma_instance(). They are also added to @vma.list for
129 * easy iteration.
130 */
131 struct rb_root tree;
132 } vma;
133
134 /**
135 * @lut_list: List of vma lookup entries in use for this object.
136 *
137 * If this object is closed, we need to remove all of its VMA from
138 * the fast lookup index in associated contexts; @lut_list provides
139 * this translation from object to context->handles_vma.
140 */
141 struct list_head lut_list;
096a42dd 142 spinlock_t lut_lock; /* guards lut_list */
5e5d2e20 143
80f0b679
ML
144 /**
145 * @obj_link: Link into @i915_gem_ww_ctx.obj_list
146 *
147 * When we lock this object through i915_gem_object_lock() with a
148 * context, we add it to the list to ensure we can unlock everything
149 * when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called.
150 */
151 struct list_head obj_link;
152
5e5d2e20
CW
153 union {
154 struct rcu_head rcu;
155 struct llist_node freed;
156 };
157
158 /**
159 * Whether the object is currently in the GGTT mmap.
160 */
161 unsigned int userfault_count;
162 struct list_head userfault_link;
163
cc662126
AJ
164 struct {
165 spinlock_t lock; /* Protects access to mmo offsets */
78655598 166 struct rb_root offsets;
cc662126
AJ
167 } mmo;
168
5e5d2e20
CW
169 I915_SELFTEST_DECLARE(struct list_head st_link);
170
2f0b97ca
MA
171 unsigned long flags;
172#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
7c98501a
MA
173#define I915_BO_ALLOC_VOLATILE BIT(1)
174#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
cc662126 175#define I915_BO_READONLY BIT(2)
0175969e 176#define I915_TILING_QUIRK_BIT 3 /* unknown swizzling; do not release! */
2f0b97ca 177
5e5d2e20
CW
178 /*
179 * Is the object to be mapped as read-only to the GPU
180 * Only honoured if hardware has relevant pte bit
181 */
182 unsigned int cache_level:3;
183 unsigned int cache_coherent:2;
184#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
185#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
186 unsigned int cache_dirty:1;
187
188 /**
189 * @read_domains: Read memory domains.
190 *
191 * These monitor which caches contain read/write data related to the
192 * object. When transitioning from one set of domains to another,
193 * the driver is called to ensure that caches are suitably flushed and
194 * invalidated.
195 */
196 u16 read_domains;
197
198 /**
199 * @write_domain: Corresponding unique write memory domain.
200 */
201 u16 write_domain;
202
da42104f 203 struct intel_frontbuffer __rcu *frontbuffer;
5e5d2e20
CW
204
205 /** Current tiling stride for the object, if it's tiled. */
206 unsigned int tiling_and_stride;
207#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
208#define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
209#define STRIDE_MASK (~TILING_MASK)
210
5e5d2e20 211 struct {
f86dbacb
DV
212 /*
213 * Protects the pages and their use. Do not use directly, but
214 * instead go through the pin/unpin interfaces.
215 */
216 struct mutex lock;
5e5d2e20 217 atomic_t pages_pin_count;
99013b10 218 atomic_t shrink_pin;
5e5d2e20 219
232a6eba
MA
220 /**
221 * Memory region for this object.
222 */
223 struct intel_memory_region *region;
224 /**
225 * List of memory region blocks allocated for this object.
226 */
227 struct list_head blocks;
7c98501a
MA
228 /**
229 * Element within memory_region->objects or region->purgeable
230 * if the object is marked as DONTNEED. Access is protected by
231 * region->obj_lock.
232 */
233 struct list_head region_link;
232a6eba 234
5e5d2e20
CW
235 struct sg_table *pages;
236 void *mapping;
237
5e5d2e20
CW
238 struct i915_page_sizes {
239 /**
240 * The sg mask of the pages sg_table. i.e the mask of
241 * of the lengths for each sg entry.
242 */
243 unsigned int phys;
244
245 /**
246 * The gtt page sizes we are allowed to use given the
247 * sg mask and the supported page sizes. This will
248 * express the smallest unit we can use for the whole
249 * object, as well as the larger sizes we may be able
250 * to use opportunistically.
251 */
252 unsigned int sg;
253
254 /**
255 * The actual gtt page size usage. Since we can have
256 * multiple vma associated with this object we need to
257 * prevent any trampling of state, hence a copy of this
258 * struct also lives in each vma, therefore the gtt
259 * value here should only be read/write through the vma.
260 */
261 unsigned int gtt;
262 } page_sizes;
263
264 I915_SELFTEST_DECLARE(unsigned int page_mask);
265
934941ed
TU
266 struct i915_gem_object_page_iter get_page;
267 struct i915_gem_object_page_iter get_dma_page;
5e5d2e20
CW
268
269 /**
270 * Element within i915->mm.unbound_list or i915->mm.bound_list,
271 * locked by i915->mm.obj_lock.
272 */
273 struct list_head link;
274
275 /**
276 * Advice: are the backing pages purgeable?
277 */
278 unsigned int madv:2;
279
280 /**
281 * This is set if the object has been written to since the
282 * pages were last acquired.
283 */
284 bool dirty:1;
5e5d2e20
CW
285 } mm;
286
5e5d2e20
CW
287 /** Record of address bit 17 of each page at last unbind. */
288 unsigned long *bit_17;
289
290 union {
291 struct i915_gem_userptr {
292 uintptr_t ptr;
293
294 struct i915_mm_struct *mm;
295 struct i915_mmu_object *mmu_object;
296 struct work_struct *work;
297 } userptr;
298
41a9c75d
CW
299 struct drm_mm_node *stolen;
300
5e5d2e20 301 unsigned long scratch;
89351925 302 u64 encode;
5e5d2e20
CW
303
304 void *gvt_info;
305 };
5e5d2e20
CW
306};
307
308static inline struct drm_i915_gem_object *
309to_intel_bo(struct drm_gem_object *gem)
310{
311 /* Assert that to_intel_bo(NULL) == NULL */
312 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
313
314 return container_of(gem, struct drm_i915_gem_object, base);
315}
316
317#endif