Merge tag 'batadv-next-pullrequest-20210408' of git://git.open-mesh.org/linux-merge
[linux-2.6-block.git] / drivers / gpu / drm / i915 / gem / i915_gem_client_blt.c
CommitLineData
6501aa4e
MA
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 */
6501aa4e 5
6da4a2c4 6#include "i915_drv.h"
963ad128 7#include "gt/intel_context.h"
554e330c 8#include "gt/intel_engine_pm.h"
1d455f8d 9#include "i915_gem_client_blt.h"
6501aa4e 10#include "i915_gem_object_blt.h"
6501aa4e
MA
11
12struct i915_sleeve {
13 struct i915_vma *vma;
14 struct drm_i915_gem_object *obj;
15 struct sg_table *pages;
16 struct i915_page_sizes page_sizes;
17};
18
19static int vma_set_pages(struct i915_vma *vma)
20{
21 struct i915_sleeve *sleeve = vma->private;
22
23 vma->pages = sleeve->pages;
24 vma->page_sizes = sleeve->page_sizes;
25
26 return 0;
27}
28
29static void vma_clear_pages(struct i915_vma *vma)
30{
31 GEM_BUG_ON(!vma->pages);
32 vma->pages = NULL;
33}
34
cd0452aa
CW
35static void vma_bind(struct i915_address_space *vm,
36 struct i915_vm_pt_stash *stash,
37 struct i915_vma *vma,
38 enum i915_cache_level cache_level,
39 u32 flags)
6501aa4e 40{
cd0452aa 41 vm->vma_ops.bind_vma(vm, stash, vma, cache_level, flags);
6501aa4e
MA
42}
43
12b07256 44static void vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
6501aa4e 45{
12b07256 46 vm->vma_ops.unbind_vma(vm, vma);
6501aa4e
MA
47}
48
49static const struct i915_vma_ops proxy_vma_ops = {
50 .set_pages = vma_set_pages,
51 .clear_pages = vma_clear_pages,
52 .bind_vma = vma_bind,
53 .unbind_vma = vma_unbind,
54};
55
56static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
57 struct drm_i915_gem_object *obj,
58 struct sg_table *pages,
59 struct i915_page_sizes *page_sizes)
60{
61 struct i915_sleeve *sleeve;
62 struct i915_vma *vma;
63 int err;
64
65 sleeve = kzalloc(sizeof(*sleeve), GFP_KERNEL);
66 if (!sleeve)
67 return ERR_PTR(-ENOMEM);
68
69 vma = i915_vma_instance(obj, vm, NULL);
70 if (IS_ERR(vma)) {
71 err = PTR_ERR(vma);
72 goto err_free;
73 }
74
75 vma->private = sleeve;
76 vma->ops = &proxy_vma_ops;
77
78 sleeve->vma = vma;
6501aa4e
MA
79 sleeve->pages = pages;
80 sleeve->page_sizes = *page_sizes;
81
82 return sleeve;
83
84err_free:
85 kfree(sleeve);
86 return ERR_PTR(err);
87}
88
89static void destroy_sleeve(struct i915_sleeve *sleeve)
90{
6501aa4e
MA
91 kfree(sleeve);
92}
93
94struct clear_pages_work {
95 struct dma_fence dma;
96 struct dma_fence_cb cb;
97 struct i915_sw_fence wait;
98 struct work_struct work;
99 struct irq_work irq_work;
100 struct i915_sleeve *sleeve;
101 struct intel_context *ce;
102 u32 value;
103};
104
105static const char *clear_pages_work_driver_name(struct dma_fence *fence)
106{
107 return DRIVER_NAME;
108}
109
110static const char *clear_pages_work_timeline_name(struct dma_fence *fence)
111{
112 return "clear";
113}
114
115static void clear_pages_work_release(struct dma_fence *fence)
116{
117 struct clear_pages_work *w = container_of(fence, typeof(*w), dma);
118
119 destroy_sleeve(w->sleeve);
120
121 i915_sw_fence_fini(&w->wait);
122
123 BUILD_BUG_ON(offsetof(typeof(*w), dma));
124 dma_fence_free(&w->dma);
125}
126
127static const struct dma_fence_ops clear_pages_work_ops = {
128 .get_driver_name = clear_pages_work_driver_name,
129 .get_timeline_name = clear_pages_work_timeline_name,
130 .release = clear_pages_work_release,
131};
132
133static void clear_pages_signal_irq_worker(struct irq_work *work)
134{
135 struct clear_pages_work *w = container_of(work, typeof(*w), irq_work);
136
137 dma_fence_signal(&w->dma);
138 dma_fence_put(&w->dma);
139}
140
141static void clear_pages_dma_fence_cb(struct dma_fence *fence,
142 struct dma_fence_cb *cb)
143{
144 struct clear_pages_work *w = container_of(cb, typeof(*w), cb);
145
146 if (fence->error)
147 dma_fence_set_error(&w->dma, fence->error);
148
149 /*
150 * Push the signalling of the fence into yet another worker to avoid
151 * the nightmare locking around the fence spinlock.
152 */
153 irq_work_queue(&w->irq_work);
154}
155
156static void clear_pages_worker(struct work_struct *work)
157{
158 struct clear_pages_work *w = container_of(work, typeof(*w), work);
b2dbf8d9 159 struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
6501aa4e 160 struct i915_vma *vma = w->sleeve->vma;
6b050304 161 struct i915_gem_ww_ctx ww;
6501aa4e 162 struct i915_request *rq;
554e330c 163 struct i915_vma *batch;
6501aa4e
MA
164 int err = w->dma.error;
165
166 if (unlikely(err))
167 goto out_signal;
168
169 if (obj->cache_dirty) {
6501aa4e
MA
170 if (i915_gem_object_has_struct_page(obj))
171 drm_clflush_sg(w->sleeve->pages);
172 obj->cache_dirty = false;
173 }
871918df
CW
174 obj->read_domains = I915_GEM_GPU_DOMAINS;
175 obj->write_domain = 0;
6501aa4e 176
6b050304
ML
177 i915_gem_ww_ctx_init(&ww, false);
178 intel_engine_pm_get(w->ce->engine);
179retry:
180 err = intel_context_pin_ww(w->ce, &ww);
181 if (err)
7e805762 182 goto out_signal;
6501aa4e 183
6b050304 184 batch = intel_emit_vma_fill_blt(w->ce, vma, &ww, w->value);
554e330c
MA
185 if (IS_ERR(batch)) {
186 err = PTR_ERR(batch);
6b050304 187 goto out_ctx;
554e330c
MA
188 }
189
6b050304 190 rq = i915_request_create(w->ce);
6501aa4e
MA
191 if (IS_ERR(rq)) {
192 err = PTR_ERR(rq);
554e330c 193 goto out_batch;
6501aa4e
MA
194 }
195
196 /* There's no way the fence has signalled */
197 if (dma_fence_add_callback(&rq->fence, &w->cb,
198 clear_pages_dma_fence_cb))
199 GEM_BUG_ON(1);
200
554e330c
MA
201 err = intel_emit_vma_mark_active(batch, rq);
202 if (unlikely(err))
203 goto out_request;
204
b2dbf8d9
CW
205 /*
206 * w->dma is already exported via (vma|obj)->resv we need only
207 * keep track of the GPU activity within this vma/request, and
208 * propagate the signal from the request to w->dma.
209 */
2850748e 210 err = __i915_vma_move_to_active(vma, rq);
6501aa4e
MA
211 if (err)
212 goto out_request;
213
c648ae33
CW
214 if (rq->engine->emit_init_breadcrumb) {
215 err = rq->engine->emit_init_breadcrumb(rq);
216 if (unlikely(err))
217 goto out_request;
218 }
219
220 err = rq->engine->emit_bb_start(rq,
221 batch->node.start, batch->node.size,
222 0);
6501aa4e
MA
223out_request:
224 if (unlikely(err)) {
36e191f0 225 i915_request_set_error_once(rq, err);
6501aa4e
MA
226 err = 0;
227 }
228
229 i915_request_add(rq);
554e330c
MA
230out_batch:
231 intel_emit_vma_release(w->ce, batch);
6b050304
ML
232out_ctx:
233 intel_context_unpin(w->ce);
6501aa4e 234out_signal:
6b050304
ML
235 if (err == -EDEADLK) {
236 err = i915_gem_ww_ctx_backoff(&ww);
237 if (!err)
238 goto retry;
239 }
240 i915_gem_ww_ctx_fini(&ww);
241
242 i915_vma_unpin(w->sleeve->vma);
243 intel_engine_pm_put(w->ce->engine);
244
6501aa4e
MA
245 if (unlikely(err)) {
246 dma_fence_set_error(&w->dma, err);
247 dma_fence_signal(&w->dma);
248 dma_fence_put(&w->dma);
249 }
250}
251
6b050304
ML
252static int pin_wait_clear_pages_work(struct clear_pages_work *w,
253 struct intel_context *ce)
254{
255 struct i915_vma *vma = w->sleeve->vma;
256 struct i915_gem_ww_ctx ww;
257 int err;
258
259 i915_gem_ww_ctx_init(&ww, false);
260retry:
261 err = i915_gem_object_lock(vma->obj, &ww);
262 if (err)
263 goto out;
264
265 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
266 if (unlikely(err))
267 goto out;
268
269 err = i915_sw_fence_await_reservation(&w->wait,
270 vma->obj->base.resv, NULL,
271 true, 0, I915_FENCE_GFP);
272 if (err)
273 goto err_unpin_vma;
274
275 dma_resv_add_excl_fence(vma->obj->base.resv, &w->dma);
276
277err_unpin_vma:
278 if (err)
279 i915_vma_unpin(vma);
280out:
281 if (err == -EDEADLK) {
282 err = i915_gem_ww_ctx_backoff(&ww);
283 if (!err)
284 goto retry;
285 }
286 i915_gem_ww_ctx_fini(&ww);
287 return err;
288}
289
6501aa4e
MA
290static int __i915_sw_fence_call
291clear_pages_work_notify(struct i915_sw_fence *fence,
292 enum i915_sw_fence_notify state)
293{
294 struct clear_pages_work *w = container_of(fence, typeof(*w), wait);
295
296 switch (state) {
297 case FENCE_COMPLETE:
298 schedule_work(&w->work);
299 break;
300
301 case FENCE_FREE:
302 dma_fence_put(&w->dma);
303 break;
304 }
305
306 return NOTIFY_DONE;
307}
308
309static DEFINE_SPINLOCK(fence_lock);
310
311/* XXX: better name please */
312int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
313 struct intel_context *ce,
314 struct sg_table *pages,
315 struct i915_page_sizes *page_sizes,
316 u32 value)
317{
6501aa4e
MA
318 struct clear_pages_work *work;
319 struct i915_sleeve *sleeve;
320 int err;
321
f5d974f9 322 sleeve = create_sleeve(ce->vm, obj, pages, page_sizes);
6501aa4e
MA
323 if (IS_ERR(sleeve))
324 return PTR_ERR(sleeve);
325
326 work = kmalloc(sizeof(*work), GFP_KERNEL);
327 if (!work) {
328 destroy_sleeve(sleeve);
329 return -ENOMEM;
330 }
331
332 work->value = value;
333 work->sleeve = sleeve;
334 work->ce = ce;
335
336 INIT_WORK(&work->work, clear_pages_worker);
337
338 init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
339
cc337560 340 dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
6501aa4e
MA
341 i915_sw_fence_init(&work->wait, clear_pages_work_notify);
342
6b050304
ML
343 err = pin_wait_clear_pages_work(work, ce);
344 if (err < 0)
6501aa4e 345 dma_fence_set_error(&work->dma, err);
6501aa4e
MA
346
347 dma_fence_get(&work->dma);
348 i915_sw_fence_commit(&work->wait);
349
350 return err;
351}
352
353#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
354#include "selftests/i915_gem_client_blt.c"
355#endif