Merge tag 'drm-intel-gt-next-2023-09-28' of git://anongit.freedesktop.org/drm/drm...
[linux-block.git] / drivers / gpu / drm / i915 / gem / i915_gem_phys.c
CommitLineData
f033428d
CW
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2014-2016 Intel Corporation
5 */
6
7#include <linux/highmem.h>
8#include <linux/shmem_fs.h>
9#include <linux/swap.h>
10
f033428d 11#include <drm/drm_cache.h>
f033428d 12
baea429d 13#include "gt/intel_gt.h"
f033428d
CW
14#include "i915_drv.h"
15#include "i915_gem_object.h"
da1184cd 16#include "i915_gem_region.h"
0438fd1a 17#include "i915_gem_tiling.h"
37d63f8f 18#include "i915_scatterlist.h"
f033428d
CW
19
20static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
21{
22 struct address_space *mapping = obj->base.filp->f_mapping;
1a9c4db4 23 struct drm_i915_private *i915 = to_i915(obj->base.dev);
f033428d 24 struct scatterlist *sg;
c6790dc2
CW
25 struct sg_table *st;
26 dma_addr_t dma;
27 void *vaddr;
28 void *dst;
f033428d 29 int i;
f033428d 30
c3bfba9a
CW
31 /* Contiguous chunk, with a single scatterlist element */
32 if (overflows_type(obj->base.size, sg->length))
33 return -E2BIG;
34
ea97c4ca 35 if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
f033428d
CW
36 return -EINVAL;
37
c6790dc2
CW
38 /*
39 * Always aligning to the object size, allows a single allocation
f033428d
CW
40 * to handle all possible callers, and given typical object sizes,
41 * the alignment of the buddy allocation will naturally match.
42 */
8ff5446a 43 vaddr = dma_alloc_coherent(obj->base.dev->dev,
c6790dc2
CW
44 roundup_pow_of_two(obj->base.size),
45 &dma, GFP_KERNEL);
46 if (!vaddr)
f033428d
CW
47 return -ENOMEM;
48
c6790dc2
CW
49 st = kmalloc(sizeof(*st), GFP_KERNEL);
50 if (!st)
51 goto err_pci;
52
53 if (sg_alloc_table(st, 1, GFP_KERNEL))
54 goto err_st;
55
56 sg = st->sgl;
57 sg->offset = 0;
58 sg->length = obj->base.size;
59
60 sg_assign_page(sg, (struct page *)vaddr);
61 sg_dma_address(sg) = dma;
62 sg_dma_len(sg) = obj->base.size;
63
64 dst = vaddr;
f033428d
CW
65 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
66 struct page *page;
c6790dc2 67 void *src;
f033428d
CW
68
69 page = shmem_read_mapping_page(mapping, i);
c6790dc2
CW
70 if (IS_ERR(page))
71 goto err_st;
f033428d
CW
72
73 src = kmap_atomic(page);
c6790dc2
CW
74 memcpy(dst, src, PAGE_SIZE);
75 drm_clflush_virt_range(dst, PAGE_SIZE);
f033428d
CW
76 kunmap_atomic(src);
77
78 put_page(page);
c6790dc2 79 dst += PAGE_SIZE;
f033428d
CW
80 }
81
1a9c4db4 82 intel_gt_chipset_flush(to_gt(i915));
f033428d 83
a6117097 84 /* We're no longer struct page backed */
0ff37575 85 obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
8c949515 86 __i915_gem_object_set_pages(obj, st);
f033428d
CW
87
88 return 0;
89
c6790dc2
CW
90err_st:
91 kfree(st);
92err_pci:
8ff5446a 93 dma_free_coherent(obj->base.dev->dev,
c6790dc2
CW
94 roundup_pow_of_two(obj->base.size),
95 vaddr, dma);
96 return -ENOMEM;
f033428d
CW
97}
98
a6117097 99void
f033428d
CW
100i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
101 struct sg_table *pages)
102{
c6790dc2
CW
103 dma_addr_t dma = sg_dma_address(pages->sgl);
104 void *vaddr = sg_page(pages->sgl);
105
f033428d
CW
106 __i915_gem_object_release_shmem(obj, pages, false);
107
108 if (obj->mm.dirty) {
109 struct address_space *mapping = obj->base.filp->f_mapping;
c6790dc2 110 void *src = vaddr;
f033428d
CW
111 int i;
112
113 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
114 struct page *page;
115 char *dst;
116
117 page = shmem_read_mapping_page(mapping, i);
118 if (IS_ERR(page))
119 continue;
120
121 dst = kmap_atomic(page);
c6790dc2
CW
122 drm_clflush_virt_range(src, PAGE_SIZE);
123 memcpy(dst, src, PAGE_SIZE);
f033428d
CW
124 kunmap_atomic(dst);
125
126 set_page_dirty(page);
127 if (obj->mm.madv == I915_MADV_WILLNEED)
128 mark_page_accessed(page);
129 put_page(page);
c6790dc2
CW
130
131 src += PAGE_SIZE;
f033428d
CW
132 }
133 obj->mm.dirty = false;
134 }
135
136 sg_free_table(pages);
137 kfree(pages);
138
8ff5446a 139 dma_free_coherent(obj->base.dev->dev,
c6790dc2
CW
140 roundup_pow_of_two(obj->base.size),
141 vaddr, dma);
f033428d
CW
142}
143
a6117097
ML
144int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
145 const struct drm_i915_gem_pwrite *args)
0eb0feb9
CW
146{
147 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
148 char __user *user_data = u64_to_user_ptr(args->data_ptr);
1a9c4db4 149 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0eb0feb9
CW
150 int err;
151
152 err = i915_gem_object_wait(obj,
153 I915_WAIT_INTERRUPTIBLE |
154 I915_WAIT_ALL,
155 MAX_SCHEDULE_TIMEOUT);
156 if (err)
157 return err;
158
159 /*
160 * We manually control the domain here and pretend that it
161 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
162 */
163 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
164
165 if (copy_from_user(vaddr, user_data, args->size))
166 return -EFAULT;
167
168 drm_clflush_virt_range(vaddr, args->size);
1a9c4db4 169 intel_gt_chipset_flush(to_gt(i915));
0eb0feb9
CW
170
171 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
172 return 0;
173}
174
a6117097
ML
175int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
176 const struct drm_i915_gem_pread *args)
0eb0feb9
CW
177{
178 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
179 char __user *user_data = u64_to_user_ptr(args->data_ptr);
180 int err;
181
182 err = i915_gem_object_wait(obj,
183 I915_WAIT_INTERRUPTIBLE,
184 MAX_SCHEDULE_TIMEOUT);
185 if (err)
186 return err;
187
188 drm_clflush_virt_range(vaddr, args->size);
189 if (copy_to_user(user_data, vaddr, args->size))
190 return -EFAULT;
191
192 return 0;
193}
194
a6117097 195static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj)
3aaf8466 196{
a6117097
ML
197 struct sg_table *pages;
198 int err;
3aaf8466 199
a6117097
ML
200 pages = __i915_gem_object_unset_pages(obj);
201
202 err = i915_gem_object_get_pages_phys(obj);
203 if (err)
204 goto err_xfer;
3aaf8466 205
a6117097
ML
206 /* Perma-pin (until release) the physical set of pages */
207 __i915_gem_object_pin_pages(obj);
0eb0feb9 208
a6117097 209 if (!IS_ERR_OR_NULL(pages))
a85fffe3 210 i915_gem_object_put_pages_shmem(obj, pages);
a6117097
ML
211
212 i915_gem_object_release_memory_region(obj);
213 return 0;
214
215err_xfer:
8c949515
MA
216 if (!IS_ERR_OR_NULL(pages))
217 __i915_gem_object_set_pages(obj, pages);
a6117097
ML
218 return err;
219}
f033428d
CW
220
221int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
222{
f033428d
CW
223 int err;
224
1b321026
ML
225 assert_object_held(obj);
226
f033428d
CW
227 if (align > obj->base.size)
228 return -EINVAL;
229
41a9c75d 230 if (!i915_gem_object_is_shmem(obj))
f033428d
CW
231 return -EINVAL;
232
a6117097
ML
233 if (!i915_gem_object_has_struct_page(obj))
234 return 0;
235
c03467ba 236 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
f033428d
CW
237 if (err)
238 return err;
239
cf41a8f1
ML
240 if (obj->mm.madv != I915_MADV_WILLNEED)
241 return -EFAULT;
f033428d 242
cf41a8f1
ML
243 if (i915_gem_object_has_tiling_quirk(obj))
244 return -EFAULT;
f033428d 245
cf41a8f1
ML
246 if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj))
247 return -EBUSY;
f033428d 248
a6117097
ML
249 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
250 drm_dbg(obj->base.dev,
251 "Attempting to obtain a purgeable object\n");
cf41a8f1 252 return -EFAULT;
a6117097 253 }
f033428d 254
cf41a8f1 255 return i915_gem_object_shmem_to_phys(obj);
f033428d
CW
256}
257
258#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
259#include "selftests/i915_gem_phys.c"
260#endif