drm/i915: mark dmabuf objects as ALLOC_USER
[linux-block.git] / drivers / gpu / drm / i915 / gem / i915_gem_dmabuf.c
CommitLineData
1286ff73 1/*
10be98a7 2 * SPDX-License-Identifier: MIT
1286ff73 3 *
10be98a7 4 * Copyright 2012 Red Hat Inc
1286ff73 5 */
ad778f89
CW
6
7#include <linux/dma-buf.h>
10be98a7 8#include <linux/highmem.h>
52791eee 9#include <linux/dma-resv.h>
ad778f89 10
1286ff73 11#include "i915_drv.h"
10be98a7 12#include "i915_gem_object.h"
37d63f8f 13#include "i915_scatterlist.h"
1286ff73 14
d7b2cb38
TH
15I915_SELFTEST_DECLARE(static bool force_different_devices;)
16
608806a5
DV
17static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
18{
19 return to_intel_bo(buf->priv);
20}
21
6a101cb2 22static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
9da3da66 23 enum dma_data_direction dir)
1286ff73 24{
608806a5 25 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
9da3da66
CW
26 struct sg_table *st;
27 struct scatterlist *src, *dst;
28 int ret, i;
1286ff73 29
9da3da66
CW
30 /* Copy sg so that we make an independent mapping */
31 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
32 if (st == NULL) {
5cfacded 33 ret = -ENOMEM;
d7b2cb38 34 goto err;
1286ff73
DV
35 }
36
a4f5ea64 37 ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
5cfacded
CW
38 if (ret)
39 goto err_free;
9da3da66 40
a4f5ea64 41 src = obj->mm.pages->sgl;
9da3da66 42 dst = st->sgl;
a4f5ea64 43 for (i = 0; i < obj->mm.pages->nents; i++) {
67d5a50c 44 sg_set_page(dst, sg_page(src), src->length, 0);
9da3da66
CW
45 dst = sg_next(dst);
46 src = sg_next(src);
47 }
48
b827e3ac
MS
49 ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC);
50 if (ret)
5cfacded 51 goto err_free_sg;
1286ff73 52
9da3da66 53 return st;
5cfacded
CW
54
55err_free_sg:
56 sg_free_table(st);
57err_free:
58 kfree(st);
5cfacded
CW
59err:
60 return ERR_PTR(ret);
1286ff73
DV
61}
62
6a101cb2 63static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
2f745ad3
CW
64 struct sg_table *sg,
65 enum dma_data_direction dir)
1286ff73 66{
b827e3ac 67 dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC);
1286ff73
DV
68 sg_free_table(sg);
69 kfree(sg);
70}
71
6619ccf1 72static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
9a70cc2a 73{
608806a5 74 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
6619ccf1 75 void *vaddr;
9a70cc2a 76
e944e3cf 77 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
6619ccf1
TZ
78 if (IS_ERR(vaddr))
79 return PTR_ERR(vaddr);
80
81 dma_buf_map_set_vaddr(map, vaddr);
82
83 return 0;
9a70cc2a
DA
84}
85
20e76f1a 86static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
9a70cc2a 87{
608806a5 88 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
9a70cc2a 89
a679f58d 90 i915_gem_object_flush_map(obj);
0a798eb9 91 i915_gem_object_unpin_map(obj);
9a70cc2a
DA
92}
93
2dad9d4d
DA
94static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
95{
2dbf0d90
TV
96 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
97 int ret;
98
99 if (obj->base.size < vma->vm_end - vma->vm_start)
100 return -EINVAL;
101
102 if (!obj->base.filp)
103 return -ENODEV;
104
f74ac015 105 ret = call_mmap(obj->base.filp, vma);
2dbf0d90
TV
106 if (ret)
107 return ret;
108
295992fb 109 vma_set_file(vma, obj->base.filp);
2dbf0d90
TV
110
111 return 0;
2dad9d4d
DA
112}
113
831e9da7 114static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
ec6f1bb9 115{
608806a5 116 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
ec6f1bb9 117 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
e944e3cf 118 struct i915_gem_ww_ctx ww;
7dd737f3 119 int err;
ec6f1bb9 120
e944e3cf
ML
121 i915_gem_ww_ctx_init(&ww, true);
122retry:
123 err = i915_gem_object_lock(obj, &ww);
124 if (!err)
125 err = i915_gem_object_pin_pages(obj);
126 if (!err) {
127 err = i915_gem_object_set_to_cpu_domain(obj, write);
128 i915_gem_object_unpin_pages(obj);
129 }
130 if (err == -EDEADLK) {
131 err = i915_gem_ww_ctx_backoff(&ww);
132 if (!err)
133 goto retry;
134 }
135 i915_gem_ww_ctx_fini(&ww);
7dd737f3 136 return err;
ec6f1bb9
DA
137}
138
18b862dc 139static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
346400c8
TV
140{
141 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
e944e3cf 142 struct i915_gem_ww_ctx ww;
7dd737f3 143 int err;
346400c8 144
e944e3cf
ML
145 i915_gem_ww_ctx_init(&ww, true);
146retry:
147 err = i915_gem_object_lock(obj, &ww);
148 if (!err)
149 err = i915_gem_object_pin_pages(obj);
150 if (!err) {
151 err = i915_gem_object_set_to_gtt_domain(obj, false);
152 i915_gem_object_unpin_pages(obj);
153 }
154 if (err == -EDEADLK) {
155 err = i915_gem_ww_ctx_backoff(&ww);
156 if (!err)
157 goto retry;
158 }
159 i915_gem_ww_ctx_fini(&ww);
7dd737f3 160 return err;
346400c8
TV
161}
162
d7b2cb38
TH
163static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf,
164 struct dma_buf_attachment *attach)
165{
166 struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
cdb35d1e
TH
167 struct i915_gem_ww_ctx ww;
168 int err;
169
170 if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM))
171 return -EOPNOTSUPP;
172
173 for_i915_gem_ww(&ww, err, true) {
174 err = i915_gem_object_lock(obj, &ww);
175 if (err)
176 continue;
177
178 err = i915_gem_object_migrate(obj, &ww, INTEL_REGION_SMEM);
179 if (err)
180 continue;
d7b2cb38 181
cdb35d1e
TH
182 err = i915_gem_object_wait_migration(obj, 0);
183 if (err)
184 continue;
185
186 err = i915_gem_object_pin_pages(obj);
187 }
188
189 return err;
d7b2cb38
TH
190}
191
192static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf,
193 struct dma_buf_attachment *attach)
194{
195 struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
196
197 i915_gem_object_unpin_pages(obj);
198}
199
6a101cb2 200static const struct dma_buf_ops i915_dmabuf_ops = {
d7b2cb38
TH
201 .attach = i915_gem_dmabuf_attach,
202 .detach = i915_gem_dmabuf_detach,
1286ff73
DV
203 .map_dma_buf = i915_gem_map_dma_buf,
204 .unmap_dma_buf = i915_gem_unmap_dma_buf,
c1d6798d 205 .release = drm_gem_dmabuf_release,
2dad9d4d 206 .mmap = i915_gem_dmabuf_mmap,
9a70cc2a
DA
207 .vmap = i915_gem_dmabuf_vmap,
208 .vunmap = i915_gem_dmabuf_vunmap,
ec6f1bb9 209 .begin_cpu_access = i915_gem_begin_cpu_access,
346400c8 210 .end_cpu_access = i915_gem_end_cpu_access,
1286ff73
DV
211};
212
d8080976 213struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
1286ff73 214{
5cc9ed4b 215 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
d8fbe341
SS
216 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
217
218 exp_info.ops = &i915_dmabuf_ops;
219 exp_info.size = gem_obj->size;
220 exp_info.flags = flags;
221 exp_info.priv = gem_obj;
ef78f7b1 222 exp_info.resv = obj->base.resv;
d8fbe341 223
5cc9ed4b
CW
224 if (obj->ops->dmabuf_export) {
225 int ret = obj->ops->dmabuf_export(obj);
226 if (ret)
227 return ERR_PTR(ret);
228 }
229
d8080976 230 return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
1286ff73
DV
231}
232
b91b09ee 233static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
2f745ad3 234{
b91b09ee 235 struct sg_table *pages;
84e8978e 236 unsigned int sg_page_sizes;
b91b09ee 237
d7b2cb38
TH
238 assert_object_held(obj);
239
b91b09ee
MA
240 pages = dma_buf_map_attachment(obj->base.import_attach,
241 DMA_BIDIRECTIONAL);
242 if (IS_ERR(pages))
243 return PTR_ERR(pages);
244
62445a97 245 sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
a5c08166 246
84e8978e 247 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
b91b09ee
MA
248
249 return 0;
1286ff73
DV
250}
251
03ac84f1
CW
252static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
253 struct sg_table *pages)
2f745ad3 254{
03ac84f1
CW
255 dma_buf_unmap_attachment(obj->base.import_attach, pages,
256 DMA_BIDIRECTIONAL);
2f745ad3
CW
257}
258
259static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
7d192daa 260 .name = "i915_gem_object_dmabuf",
2f745ad3
CW
261 .get_pages = i915_gem_object_get_pages_dmabuf,
262 .put_pages = i915_gem_object_put_pages_dmabuf,
263};
264
1286ff73 265struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
9da3da66 266 struct dma_buf *dma_buf)
1286ff73 267{
7867d709 268 static struct lock_class_key lock_class;
1286ff73 269 struct dma_buf_attachment *attach;
1286ff73 270 struct drm_i915_gem_object *obj;
1286ff73
DV
271 int ret;
272
273 /* is this one of own objects? */
274 if (dma_buf->ops == &i915_dmabuf_ops) {
608806a5 275 obj = dma_buf_to_obj(dma_buf);
1286ff73 276 /* is it from our device? */
d7b2cb38
TH
277 if (obj->base.dev == dev &&
278 !I915_SELFTEST_ONLY(force_different_devices)) {
be8a42ae
SWK
279 /*
280 * Importing dmabuf exported from out own gem increases
281 * refcount on gem itself instead of f_count of dmabuf.
282 */
25dc556a 283 return &i915_gem_object_get(obj)->base;
1286ff73
DV
284 }
285 }
286
ae2fb480 287 if (i915_gem_object_size_2big(dma_buf->size))
f63f452e
MA
288 return ERR_PTR(-E2BIG);
289
1286ff73
DV
290 /* need to attach */
291 attach = dma_buf_attach(dma_buf, dev->dev);
292 if (IS_ERR(attach))
293 return ERR_CAST(attach);
294
011c2282
ID
295 get_dma_buf(dma_buf);
296
13f1bfd3 297 obj = i915_gem_object_alloc();
1286ff73
DV
298 if (obj == NULL) {
299 ret = -ENOMEM;
2f745ad3 300 goto fail_detach;
1286ff73
DV
301 }
302
89c8233f 303 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
e1f17ea4
MA
304 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class,
305 I915_BO_ALLOC_USER);
1286ff73 306 obj->base.import_attach = attach;
ef78f7b1 307 obj->base.resv = dma_buf->resv;
1286ff73 308
30bc06c0
CW
309 /* We use GTT as shorthand for a coherent domain, one that is
310 * neither in the GPU cache nor in the CPU cache, where all
311 * writes are immediately visible in memory. (That's not strictly
312 * true, but it's close! There are internal buffers such as the
313 * write-combined buffer or a delay through the chipset for GTT
314 * writes that do require us to treat GTT as a separate cache domain.)
315 */
c0a51fd0
CK
316 obj->read_domains = I915_GEM_DOMAIN_GTT;
317 obj->write_domain = 0;
30bc06c0 318
1286ff73
DV
319 return &obj->base;
320
1286ff73
DV
321fail_detach:
322 dma_buf_detach(dma_buf, attach);
011c2282
ID
323 dma_buf_put(dma_buf);
324
1286ff73
DV
325 return ERR_PTR(ret);
326}
6cca22ed
CW
327
328#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
329#include "selftests/mock_dmabuf.c"
330#include "selftests/i915_gem_dmabuf.c"
331#endif