2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
8 #include "i915_gem_object.h"
9 #include "i915_scatterlist.h"
11 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
12 struct sg_table *pages,
13 unsigned int sg_page_sizes)
15 struct drm_i915_private *i915 = to_i915(obj->base.dev);
16 unsigned long supported = INTEL_INFO(i915)->page_sizes;
19 lockdep_assert_held(&obj->mm.lock);
21 /* Make the pages coherent with the GPU (flushing any swapin). */
22 if (obj->cache_dirty) {
23 obj->write_domain = 0;
24 if (i915_gem_object_has_struct_page(obj))
25 drm_clflush_sg(pages);
26 obj->cache_dirty = false;
29 obj->mm.get_page.sg_pos = pages->sgl;
30 obj->mm.get_page.sg_idx = 0;
32 obj->mm.pages = pages;
34 if (i915_gem_object_is_tiled(obj) &&
35 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
36 GEM_BUG_ON(obj->mm.quirked);
37 __i915_gem_object_pin_pages(obj);
38 obj->mm.quirked = true;
41 GEM_BUG_ON(!sg_page_sizes);
42 obj->mm.page_sizes.phys = sg_page_sizes;
45 * Calculate the supported page-sizes which fit into the given
46 * sg_page_sizes. This will give us the page-sizes which we may be able
47 * to use opportunistically when later inserting into the GTT. For
48 * example if phys=2G, then in theory we should be able to use 1G, 2M,
49 * 64K or 4K pages, although in practice this will depend on a number of
52 obj->mm.page_sizes.sg = 0;
53 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
54 if (obj->mm.page_sizes.phys & ~0u << i)
55 obj->mm.page_sizes.sg |= BIT(i);
57 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
59 if (i915_gem_object_is_shrinkable(obj)) {
60 struct list_head *list;
63 spin_lock_irqsave(&i915->mm.obj_lock, flags);
65 i915->mm.shrink_count++;
66 i915->mm.shrink_memory += obj->base.size;
68 if (obj->mm.madv != I915_MADV_WILLNEED)
69 list = &i915->mm.purge_list;
71 list = &i915->mm.shrink_list;
72 list_add_tail(&obj->mm.link, list);
74 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
78 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
82 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
83 DRM_DEBUG("Attempting to obtain a purgeable object\n");
87 err = obj->ops->get_pages(obj);
88 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
93 /* Ensure that the associated pages are gathered from the backing storage
94 * and pinned into our object. i915_gem_object_pin_pages() may be called
95 * multiple times before they are released by a single call to
96 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
97 * either as a result of memory pressure (reaping pages under the shrinker)
98 * or as the object is itself released.
100 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
104 err = mutex_lock_interruptible(&obj->mm.lock);
108 if (unlikely(!i915_gem_object_has_pages(obj))) {
109 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
111 err = ____i915_gem_object_get_pages(obj);
115 smp_mb__before_atomic();
117 atomic_inc(&obj->mm.pages_pin_count);
120 mutex_unlock(&obj->mm.lock);
124 /* Immediately discard the backing storage */
125 void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
127 drm_gem_free_mmap_offset(&obj->base);
128 if (obj->ops->truncate)
129 obj->ops->truncate(obj);
132 /* Try to discard unwanted pages */
133 void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
135 lockdep_assert_held(&obj->mm.lock);
136 GEM_BUG_ON(i915_gem_object_has_pages(obj));
138 if (obj->ops->writeback)
139 obj->ops->writeback(obj);
142 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
144 struct radix_tree_iter iter;
148 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
149 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
154 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
156 struct drm_i915_private *i915 = to_i915(obj->base.dev);
157 struct sg_table *pages;
159 pages = fetch_and_zero(&obj->mm.pages);
160 if (IS_ERR_OR_NULL(pages))
163 if (i915_gem_object_is_shrinkable(obj)) {
166 spin_lock_irqsave(&i915->mm.obj_lock, flags);
168 list_del(&obj->mm.link);
169 i915->mm.shrink_count--;
170 i915->mm.shrink_memory -= obj->base.size;
172 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
175 if (obj->mm.mapping) {
178 ptr = page_mask_bits(obj->mm.mapping);
179 if (is_vmalloc_addr(ptr))
182 kunmap(kmap_to_page(ptr));
184 obj->mm.mapping = NULL;
187 __i915_gem_object_reset_page_iter(obj);
188 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
193 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
194 enum i915_mm_subclass subclass)
196 struct sg_table *pages;
199 if (i915_gem_object_has_pinned_pages(obj))
202 GEM_BUG_ON(atomic_read(&obj->bind_count));
204 /* May be called by shrinker from within get_pages() (on another bo) */
205 mutex_lock_nested(&obj->mm.lock, subclass);
206 if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
212 * ->put_pages might need to allocate memory for the bit17 swizzle
213 * array, hence protect them from being reaped by removing them from gtt
216 pages = __i915_gem_object_unset_pages(obj);
219 * XXX Temporary hijinx to avoid updating all backends to handle
220 * NULL pages. In the future, when we have more asynchronous
221 * get_pages backends we should be better able to handle the
222 * cancellation of the async task in a more uniform manner.
224 if (!pages && !i915_gem_object_needs_async_cancel(obj))
225 pages = ERR_PTR(-EINVAL);
228 obj->ops->put_pages(obj, pages);
232 mutex_unlock(&obj->mm.lock);
237 /* The 'mapping' part of i915_gem_object_pin_map() below */
238 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
239 enum i915_map_type type)
241 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
242 struct sg_table *sgt = obj->mm.pages;
243 struct sgt_iter sgt_iter;
245 struct page *stack_pages[32];
246 struct page **pages = stack_pages;
251 /* A single page can always be kmapped */
252 if (n_pages == 1 && type == I915_MAP_WB)
253 return kmap(sg_page(sgt->sgl));
255 if (n_pages > ARRAY_SIZE(stack_pages)) {
256 /* Too big for stack -- allocate temporary array instead */
257 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
262 for_each_sgt_page(page, sgt_iter, sgt)
265 /* Check that we have the expected number of pages */
266 GEM_BUG_ON(i != n_pages);
271 /* fallthrough to use PAGE_KERNEL anyway */
273 pgprot = PAGE_KERNEL;
276 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
279 addr = vmap(pages, n_pages, 0, pgprot);
281 if (pages != stack_pages)
287 /* get, pin, and map the pages of the object into kernel space */
288 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
289 enum i915_map_type type)
291 enum i915_map_type has_type;
296 if (unlikely(!i915_gem_object_has_struct_page(obj)))
297 return ERR_PTR(-ENXIO);
299 err = mutex_lock_interruptible(&obj->mm.lock);
303 pinned = !(type & I915_MAP_OVERRIDE);
304 type &= ~I915_MAP_OVERRIDE;
306 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
307 if (unlikely(!i915_gem_object_has_pages(obj))) {
308 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
310 err = ____i915_gem_object_get_pages(obj);
314 smp_mb__before_atomic();
316 atomic_inc(&obj->mm.pages_pin_count);
319 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
321 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
322 if (ptr && has_type != type) {
328 if (is_vmalloc_addr(ptr))
331 kunmap(kmap_to_page(ptr));
333 ptr = obj->mm.mapping = NULL;
337 ptr = i915_gem_object_map(obj, type);
343 obj->mm.mapping = page_pack_bits(ptr, type);
347 mutex_unlock(&obj->mm.lock);
351 atomic_dec(&obj->mm.pages_pin_count);
357 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
358 unsigned long offset,
361 enum i915_map_type has_type;
364 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
365 GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
366 offset, size, obj->base.size));
368 obj->mm.dirty = true;
370 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
373 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
374 if (has_type == I915_MAP_WC)
377 drm_clflush_virt_range(ptr + offset, size);
378 if (size == obj->base.size) {
379 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
380 obj->cache_dirty = false;
385 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
387 unsigned int *offset)
389 struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
390 struct scatterlist *sg;
391 unsigned int idx, count;
394 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
395 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
397 /* As we iterate forward through the sg, we record each entry in a
398 * radixtree for quick repeated (backwards) lookups. If we have seen
399 * this index previously, we will have an entry for it.
401 * Initial lookup is O(N), but this is amortized to O(1) for
402 * sequential page access (where each new request is consecutive
403 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
404 * i.e. O(1) with a large constant!
406 if (n < READ_ONCE(iter->sg_idx))
409 mutex_lock(&iter->lock);
411 /* We prefer to reuse the last sg so that repeated lookup of this
412 * (or the subsequent) sg are fast - comparing against the last
413 * sg is faster than going through the radixtree.
418 count = __sg_page_count(sg);
420 while (idx + count <= n) {
425 /* If we cannot allocate and insert this entry, or the
426 * individual pages from this range, cancel updating the
427 * sg_idx so that on this lookup we are forced to linearly
428 * scan onwards, but on future lookups we will try the
429 * insertion again (in which case we need to be careful of
430 * the error return reporting that we have already inserted
433 ret = radix_tree_insert(&iter->radix, idx, sg);
434 if (ret && ret != -EEXIST)
437 entry = xa_mk_value(idx);
438 for (i = 1; i < count; i++) {
439 ret = radix_tree_insert(&iter->radix, idx + i, entry);
440 if (ret && ret != -EEXIST)
445 sg = ____sg_next(sg);
446 count = __sg_page_count(sg);
453 mutex_unlock(&iter->lock);
455 if (unlikely(n < idx)) /* insertion completed by another thread */
458 /* In case we failed to insert the entry into the radixtree, we need
459 * to look beyond the current sg.
461 while (idx + count <= n) {
463 sg = ____sg_next(sg);
464 count = __sg_page_count(sg);
473 sg = radix_tree_lookup(&iter->radix, n);
476 /* If this index is in the middle of multi-page sg entry,
477 * the radix tree will contain a value entry that points
478 * to the start of that range. We will return the pointer to
479 * the base page and the offset of this page within the
483 if (unlikely(xa_is_value(sg))) {
484 unsigned long base = xa_to_value(sg);
486 sg = radix_tree_lookup(&iter->radix, base);
498 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
500 struct scatterlist *sg;
503 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
505 sg = i915_gem_object_get_sg(obj, n, &offset);
506 return nth_page(sg_page(sg), offset);
509 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
511 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
516 page = i915_gem_object_get_page(obj, n);
518 set_page_dirty(page);
524 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
528 struct scatterlist *sg;
531 sg = i915_gem_object_get_sg(obj, n, &offset);
534 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
536 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
540 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
543 return i915_gem_object_get_dma_address_len(obj, n, NULL);