2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
7 #include <linux/highmem.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
11 #include <drm/drm.h> /* for drm_legacy.h! */
12 #include <drm/drm_cache.h>
14 #include "gt/intel_gt.h"
16 #include "i915_gem_object.h"
17 #include "i915_gem_region.h"
18 #include "i915_scatterlist.h"
20 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
22 struct address_space *mapping = obj->base.filp->f_mapping;
23 struct scatterlist *sg;
30 if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
34 * Always aligning to the object size, allows a single allocation
35 * to handle all possible callers, and given typical object sizes,
36 * the alignment of the buddy allocation will naturally match.
38 vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
39 roundup_pow_of_two(obj->base.size),
44 st = kmalloc(sizeof(*st), GFP_KERNEL);
48 if (sg_alloc_table(st, 1, GFP_KERNEL))
53 sg->length = obj->base.size;
55 sg_assign_page(sg, (struct page *)vaddr);
56 sg_dma_address(sg) = dma;
57 sg_dma_len(sg) = obj->base.size;
60 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
64 page = shmem_read_mapping_page(mapping, i);
68 src = kmap_atomic(page);
69 memcpy(dst, src, PAGE_SIZE);
70 drm_clflush_virt_range(dst, PAGE_SIZE);
77 intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
79 __i915_gem_object_set_pages(obj, st, sg->length);
86 dma_free_coherent(&obj->base.dev->pdev->dev,
87 roundup_pow_of_two(obj->base.size),
93 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
94 struct sg_table *pages)
96 dma_addr_t dma = sg_dma_address(pages->sgl);
97 void *vaddr = sg_page(pages->sgl);
99 __i915_gem_object_release_shmem(obj, pages, false);
102 struct address_space *mapping = obj->base.filp->f_mapping;
106 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
110 page = shmem_read_mapping_page(mapping, i);
114 dst = kmap_atomic(page);
115 drm_clflush_virt_range(src, PAGE_SIZE);
116 memcpy(dst, src, PAGE_SIZE);
119 set_page_dirty(page);
120 if (obj->mm.madv == I915_MADV_WILLNEED)
121 mark_page_accessed(page);
126 obj->mm.dirty = false;
129 sg_free_table(pages);
132 dma_free_coherent(&obj->base.dev->pdev->dev,
133 roundup_pow_of_two(obj->base.size),
137 static void phys_release(struct drm_i915_gem_object *obj)
139 fput(obj->base.filp);
142 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
143 .name = "i915_gem_object_phys",
144 .get_pages = i915_gem_object_get_pages_phys,
145 .put_pages = i915_gem_object_put_pages_phys,
147 .release = phys_release,
150 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
152 struct sg_table *pages;
155 if (align > obj->base.size)
158 if (obj->ops == &i915_gem_phys_ops)
161 if (obj->ops != &i915_gem_shmem_ops)
164 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
168 mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
170 if (obj->mm.madv != I915_MADV_WILLNEED) {
175 if (obj->mm.quirked) {
180 if (obj->mm.mapping) {
185 pages = __i915_gem_object_unset_pages(obj);
187 obj->ops = &i915_gem_phys_ops;
189 err = ____i915_gem_object_get_pages(obj);
193 /* Perma-pin (until release) the physical set of pages */
194 __i915_gem_object_pin_pages(obj);
196 if (!IS_ERR_OR_NULL(pages))
197 i915_gem_shmem_ops.put_pages(obj, pages);
199 i915_gem_object_release_memory_region(obj);
201 mutex_unlock(&obj->mm.lock);
205 obj->ops = &i915_gem_shmem_ops;
206 if (!IS_ERR_OR_NULL(pages)) {
207 unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
209 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
212 mutex_unlock(&obj->mm.lock);
216 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
217 #include "selftests/i915_gem_phys.c"