2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #ifndef __I915_VMA_H__
26 #define __I915_VMA_H__
28 #include <linux/io-mapping.h>
30 #include <drm/drm_mm.h>
32 #include "i915_gem_gtt.h"
33 #include "i915_gem_fence_reg.h"
34 #include "i915_gem_object.h"
35 #include "i915_gem_request.h"
38 enum i915_cache_level;
41 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
42 * VMA's presence cannot be guaranteed before binding, or after unbinding the
43 * object into/from the address space.
45 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
46 * will always be <= an objects lifetime. So object refcounting should cover us.
49 struct drm_mm_node node;
50 struct drm_i915_gem_object *obj;
51 struct i915_address_space *vm;
52 struct drm_i915_fence_reg *fence;
53 struct sg_table *pages;
56 u64 display_alignment;
60 * How many users have pinned this object in GTT space. The following
61 * users can each hold at most one reference: pwrite/pread, execbuffer
62 * (objects are not allowed multiple times for the same batchbuffer),
63 * and the framebuffer code. When switching/pageflipping, the
64 * framebuffer code has at most two buffers pinned per crtc.
66 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
67 * bits with absolutely no headroom. So use 4 bits.
69 #define I915_VMA_PIN_MASK 0xf
70 #define I915_VMA_PIN_OVERFLOW BIT(5)
72 /** Flags and address space this VMA is bound to */
73 #define I915_VMA_GLOBAL_BIND BIT(6)
74 #define I915_VMA_LOCAL_BIND BIT(7)
75 #define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
77 #define I915_VMA_GGTT BIT(8)
78 #define I915_VMA_CAN_FENCE BIT(9)
79 #define I915_VMA_CLOSED BIT(10)
82 struct i915_gem_active last_read[I915_NUM_ENGINES];
83 struct i915_gem_active last_write;
84 struct i915_gem_active last_fence;
87 * Support different GGTT views into the same object.
88 * This means there can be multiple VMA mappings per object and per VM.
89 * i915_ggtt_view_type is used to distinguish between those entries.
90 * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
91 * assumed in GEM functions which take no ggtt view parameter.
93 struct i915_ggtt_view ggtt_view;
95 /** This object's place on the active/inactive lists */
96 struct list_head vm_link;
98 struct list_head obj_link; /* Link in the object's VMA list */
99 struct rb_node obj_node;
101 /** This vma's place in the batchbuffer or on the eviction list */
102 struct list_head exec_list;
105 * Used for performing relocations during execbuffer insertion.
107 struct hlist_node exec_node;
108 unsigned long exec_handle;
109 struct drm_i915_gem_exec_object2 *exec_entry;
113 i915_vma_create(struct drm_i915_gem_object *obj,
114 struct i915_address_space *vm,
115 const struct i915_ggtt_view *view);
117 void i915_vma_unpin_and_release(struct i915_vma **p_vma);
119 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
121 return vma->flags & I915_VMA_GGTT;
124 static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
126 return vma->flags & I915_VMA_CAN_FENCE;
129 static inline bool i915_vma_is_closed(const struct i915_vma *vma)
131 return vma->flags & I915_VMA_CLOSED;
134 static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
139 static inline bool i915_vma_is_active(const struct i915_vma *vma)
141 return i915_vma_get_active(vma);
144 static inline void i915_vma_set_active(struct i915_vma *vma,
147 vma->active |= BIT(engine);
150 static inline void i915_vma_clear_active(struct i915_vma *vma,
153 vma->active &= ~BIT(engine);
156 static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
159 return vma->active & BIT(engine);
162 static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
164 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
165 GEM_BUG_ON(!vma->node.allocated);
166 GEM_BUG_ON(upper_32_bits(vma->node.start));
167 GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
168 return lower_32_bits(vma->node.start);
171 static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
173 i915_gem_object_get(vma->obj);
177 static inline void i915_vma_put(struct i915_vma *vma)
179 i915_gem_object_put(vma->obj);
183 i915_vma_compare(struct i915_vma *vma,
184 struct i915_address_space *vm,
185 const struct i915_ggtt_view *view)
187 GEM_BUG_ON(view && !i915_vma_is_ggtt(vma));
193 return vma->ggtt_view.type;
195 if (vma->ggtt_view.type != view->type)
196 return vma->ggtt_view.type - view->type;
198 return memcmp(&vma->ggtt_view.params,
200 sizeof(view->params));
203 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
205 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
207 i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
208 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
209 int __must_check i915_vma_unbind(struct i915_vma *vma);
210 void i915_vma_close(struct i915_vma *vma);
211 void i915_vma_destroy(struct i915_vma *vma);
213 int __i915_vma_do_pin(struct i915_vma *vma,
214 u64 size, u64 alignment, u64 flags);
215 static inline int __must_check
216 i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
218 BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
219 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
220 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
222 /* Pin early to prevent the shrinker/eviction logic from destroying
223 * our vma as we insert and bind.
225 if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
228 return __i915_vma_do_pin(vma, size, alignment, flags);
231 static inline int i915_vma_pin_count(const struct i915_vma *vma)
233 return vma->flags & I915_VMA_PIN_MASK;
236 static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
238 return i915_vma_pin_count(vma);
241 static inline void __i915_vma_pin(struct i915_vma *vma)
244 GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
247 static inline void __i915_vma_unpin(struct i915_vma *vma)
249 GEM_BUG_ON(!i915_vma_is_pinned(vma));
253 static inline void i915_vma_unpin(struct i915_vma *vma)
255 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
256 __i915_vma_unpin(vma);
260 * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
263 * The passed in VMA has to be pinned in the global GTT mappable region.
264 * An extra pinning of the VMA is acquired for the return iomapping,
265 * the caller must call i915_vma_unpin_iomap to relinquish the pinning
266 * after the iomapping is no longer required.
268 * Callers must hold the struct_mutex.
270 * Returns a valid iomapped pointer or ERR_PTR.
272 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
273 #define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
276 * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
279 * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
281 * Callers must hold the struct_mutex. This function is only valid to be
282 * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
284 static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
286 lockdep_assert_held(&vma->vm->dev->struct_mutex);
287 GEM_BUG_ON(vma->iomap == NULL);
291 static inline struct page *i915_vma_first_page(struct i915_vma *vma)
293 GEM_BUG_ON(!vma->pages);
294 return sg_page(vma->pages->sgl);
298 * i915_vma_pin_fence - pin fencing state
299 * @vma: vma to pin fencing for
301 * This pins the fencing state (whether tiled or untiled) to make sure the
302 * vma (and its object) is ready to be used as a scanout target. Fencing
303 * status must be synchronize first by calling i915_vma_get_fence():
305 * The resulting fence pin reference must be released again with
306 * i915_vma_unpin_fence().
310 * True if the vma has a fence, false otherwise.
313 i915_vma_pin_fence(struct i915_vma *vma)
315 lockdep_assert_held(&vma->vm->dev->struct_mutex);
317 vma->fence->pin_count++;
324 * i915_vma_unpin_fence - unpin fencing state
325 * @vma: vma to unpin fencing for
327 * This releases the fence pin reference acquired through
328 * i915_vma_pin_fence. It will handle both objects with and without an
329 * attached fence correctly, callers do not need to distinguish this.
332 i915_vma_unpin_fence(struct i915_vma *vma)
334 lockdep_assert_held(&vma->vm->dev->struct_mutex);
336 GEM_BUG_ON(vma->fence->pin_count <= 0);
337 vma->fence->pin_count--;