drm/i915: Mark up all locked waiters
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_gem_request.c
CommitLineData
05235c53
CW
1/*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
fa545cbf
CW
25#include <linux/prefetch.h>
26
05235c53
CW
27#include "i915_drv.h"
28
04769652
CW
29static const char *i915_fence_get_driver_name(struct fence *fence)
30{
31 return "i915";
32}
33
34static const char *i915_fence_get_timeline_name(struct fence *fence)
35{
36 /* Timelines are bound by eviction to a VM. However, since
37 * we only have a global seqno at the moment, we only have
38 * a single timeline. Note that each timeline will have
39 * multiple execution contexts (fence contexts) as we allow
40 * engines within a single timeline to execute in parallel.
41 */
42 return "global";
43}
44
45static bool i915_fence_signaled(struct fence *fence)
46{
47 return i915_gem_request_completed(to_request(fence));
48}
49
50static bool i915_fence_enable_signaling(struct fence *fence)
51{
52 if (i915_fence_signaled(fence))
53 return false;
54
55 intel_engine_enable_signaling(to_request(fence));
56 return true;
57}
58
59static signed long i915_fence_wait(struct fence *fence,
60 bool interruptible,
61 signed long timeout_jiffies)
62{
63 s64 timeout_ns, *timeout;
64 int ret;
65
66 if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
67 timeout_ns = jiffies_to_nsecs(timeout_jiffies);
68 timeout = &timeout_ns;
69 } else {
70 timeout = NULL;
71 }
72
776f3236
CW
73 ret = i915_wait_request(to_request(fence),
74 interruptible, timeout,
75 NO_WAITBOOST);
04769652
CW
76 if (ret == -ETIME)
77 return 0;
78
79 if (ret < 0)
80 return ret;
81
82 if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT)
83 timeout_jiffies = nsecs_to_jiffies(timeout_ns);
84
85 return timeout_jiffies;
86}
87
88static void i915_fence_value_str(struct fence *fence, char *str, int size)
89{
90 snprintf(str, size, "%u", fence->seqno);
91}
92
93static void i915_fence_timeline_value_str(struct fence *fence, char *str,
94 int size)
95{
96 snprintf(str, size, "%u",
97 intel_engine_get_seqno(to_request(fence)->engine));
98}
99
100static void i915_fence_release(struct fence *fence)
101{
102 struct drm_i915_gem_request *req = to_request(fence);
103
104 kmem_cache_free(req->i915->requests, req);
105}
106
107const struct fence_ops i915_fence_ops = {
108 .get_driver_name = i915_fence_get_driver_name,
109 .get_timeline_name = i915_fence_get_timeline_name,
110 .enable_signaling = i915_fence_enable_signaling,
111 .signaled = i915_fence_signaled,
112 .wait = i915_fence_wait,
113 .release = i915_fence_release,
114 .fence_value_str = i915_fence_value_str,
115 .timeline_value_str = i915_fence_timeline_value_str,
116};
117
05235c53
CW
118int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
119 struct drm_file *file)
120{
121 struct drm_i915_private *dev_private;
122 struct drm_i915_file_private *file_priv;
123
124 WARN_ON(!req || !file || req->file_priv);
125
126 if (!req || !file)
127 return -EINVAL;
128
129 if (req->file_priv)
130 return -EINVAL;
131
132 dev_private = req->i915;
133 file_priv = file->driver_priv;
134
135 spin_lock(&file_priv->mm.lock);
136 req->file_priv = file_priv;
137 list_add_tail(&req->client_list, &file_priv->mm.request_list);
138 spin_unlock(&file_priv->mm.lock);
139
05235c53
CW
140 return 0;
141}
142
143static inline void
144i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
145{
146 struct drm_i915_file_private *file_priv = request->file_priv;
147
148 if (!file_priv)
149 return;
150
151 spin_lock(&file_priv->mm.lock);
152 list_del(&request->client_list);
153 request->file_priv = NULL;
154 spin_unlock(&file_priv->mm.lock);
05235c53
CW
155}
156
fa545cbf
CW
157void i915_gem_retire_noop(struct i915_gem_active *active,
158 struct drm_i915_gem_request *request)
159{
160 /* Space left intentionally blank */
161}
162
05235c53
CW
163static void i915_gem_request_retire(struct drm_i915_gem_request *request)
164{
fa545cbf
CW
165 struct i915_gem_active *active, *next;
166
05235c53 167 trace_i915_gem_request_retire(request);
209b3f7e 168 list_del(&request->link);
05235c53
CW
169
170 /* We know the GPU must have read the request to have
171 * sent us the seqno + interrupt, so use the position
172 * of tail of the request to update the last known position
173 * of the GPU head.
174 *
175 * Note this requires that we are always called in request
176 * completion order.
177 */
675d9ad7 178 list_del(&request->ring_link);
1dae2dfb 179 request->ring->last_retired_head = request->postfix;
05235c53 180
fa545cbf
CW
181 /* Walk through the active list, calling retire on each. This allows
182 * objects to track their GPU activity and mark themselves as idle
183 * when their *last* active request is completed (updating state
184 * tracking lists for eviction, active references for GEM, etc).
185 *
186 * As the ->retire() may free the node, we decouple it first and
187 * pass along the auxiliary information (to avoid dereferencing
188 * the node after the callback).
189 */
190 list_for_each_entry_safe(active, next, &request->active_list, link) {
191 /* In microbenchmarks or focusing upon time inside the kernel,
192 * we may spend an inordinate amount of time simply handling
193 * the retirement of requests and processing their callbacks.
194 * Of which, this loop itself is particularly hot due to the
195 * cache misses when jumping around the list of i915_gem_active.
196 * So we try to keep this loop as streamlined as possible and
197 * also prefetch the next i915_gem_active to try and hide
198 * the likely cache miss.
199 */
200 prefetchw(next);
201
202 INIT_LIST_HEAD(&active->link);
0eafec6d 203 RCU_INIT_POINTER(active->request, NULL);
fa545cbf
CW
204
205 active->retire(active, request);
206 }
207
05235c53
CW
208 i915_gem_request_remove_from_client(request);
209
210 if (request->previous_context) {
211 if (i915.enable_execlists)
212 intel_lr_context_unpin(request->previous_context,
213 request->engine);
214 }
215
9a6feaf0 216 i915_gem_context_put(request->ctx);
e8a261ea 217 i915_gem_request_put(request);
05235c53
CW
218}
219
220void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
221{
222 struct intel_engine_cs *engine = req->engine;
223 struct drm_i915_gem_request *tmp;
224
225 lockdep_assert_held(&req->i915->drm.struct_mutex);
209b3f7e 226 GEM_BUG_ON(list_empty(&req->link));
05235c53
CW
227
228 do {
229 tmp = list_first_entry(&engine->request_list,
efdf7c06 230 typeof(*tmp), link);
05235c53
CW
231
232 i915_gem_request_retire(tmp);
233 } while (tmp != req);
05235c53
CW
234}
235
8af29b0c 236static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
05235c53 237{
8af29b0c
CW
238 struct i915_gpu_error *error = &dev_priv->gpu_error;
239
240 if (i915_terminally_wedged(error))
05235c53
CW
241 return -EIO;
242
8af29b0c 243 if (i915_reset_in_progress(error)) {
05235c53
CW
244 /* Non-interruptible callers can't handle -EAGAIN, hence return
245 * -EIO unconditionally for these.
246 */
8af29b0c 247 if (!dev_priv->mm.interruptible)
05235c53
CW
248 return -EIO;
249
250 return -EAGAIN;
251 }
252
253 return 0;
254}
255
256static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
257{
258 struct intel_engine_cs *engine;
259 int ret;
260
261 /* Carefully retire all requests without writing to the rings */
262 for_each_engine(engine, dev_priv) {
22dd3bb9
CW
263 ret = intel_engine_idle(engine,
264 I915_WAIT_INTERRUPTIBLE |
265 I915_WAIT_LOCKED);
05235c53
CW
266 if (ret)
267 return ret;
268 }
269 i915_gem_retire_requests(dev_priv);
270
271 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
272 if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
273 while (intel_kick_waiters(dev_priv) ||
274 intel_kick_signalers(dev_priv))
275 yield();
276 }
277
278 /* Finally reset hw state */
279 for_each_engine(engine, dev_priv)
7e37f889 280 intel_engine_init_seqno(engine, seqno);
05235c53
CW
281
282 return 0;
283}
284
285int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
286{
287 struct drm_i915_private *dev_priv = to_i915(dev);
288 int ret;
289
290 if (seqno == 0)
291 return -EINVAL;
292
293 /* HWS page needs to be set less than what we
294 * will inject to ring
295 */
296 ret = i915_gem_init_seqno(dev_priv, seqno - 1);
297 if (ret)
298 return ret;
299
05235c53 300 dev_priv->next_seqno = seqno;
05235c53
CW
301 return 0;
302}
303
304static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
305{
306 /* reserve 0 for non-seqno */
307 if (unlikely(dev_priv->next_seqno == 0)) {
308 int ret;
309
310 ret = i915_gem_init_seqno(dev_priv, 0);
311 if (ret)
312 return ret;
313
314 dev_priv->next_seqno = 1;
315 }
316
ddf07be7 317 *seqno = dev_priv->next_seqno++;
05235c53
CW
318 return 0;
319}
320
8e637178
CW
321/**
322 * i915_gem_request_alloc - allocate a request structure
323 *
324 * @engine: engine that we wish to issue the request on.
325 * @ctx: context that the request will be associated with.
326 * This can be NULL if the request is not directly related to
327 * any specific user context, in which case this function will
328 * choose an appropriate context to use.
329 *
330 * Returns a pointer to the allocated request if successful,
331 * or an error code if not.
332 */
333struct drm_i915_gem_request *
334i915_gem_request_alloc(struct intel_engine_cs *engine,
335 struct i915_gem_context *ctx)
05235c53
CW
336{
337 struct drm_i915_private *dev_priv = engine->i915;
05235c53 338 struct drm_i915_gem_request *req;
04769652 339 u32 seqno;
05235c53
CW
340 int ret;
341
05235c53
CW
342 /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
343 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
344 * and restart.
345 */
8af29b0c 346 ret = i915_gem_check_wedge(dev_priv);
05235c53 347 if (ret)
8e637178 348 return ERR_PTR(ret);
05235c53 349
9b5f4e5e 350 /* Move the oldest request to the slab-cache (if not in use!) */
2a1d7752 351 req = list_first_entry_or_null(&engine->request_list,
efdf7c06 352 typeof(*req), link);
2a1d7752
CW
353 if (req && i915_gem_request_completed(req))
354 i915_gem_request_retire(req);
9b5f4e5e 355
5a198b8c
CW
356 /* Beware: Dragons be flying overhead.
357 *
358 * We use RCU to look up requests in flight. The lookups may
359 * race with the request being allocated from the slab freelist.
360 * That is the request we are writing to here, may be in the process
1426f715 361 * of being read by __i915_gem_active_get_rcu(). As such,
5a198b8c
CW
362 * we have to be very careful when overwriting the contents. During
363 * the RCU lookup, we change chase the request->engine pointer,
364 * read the request->fence.seqno and increment the reference count.
365 *
366 * The reference count is incremented atomically. If it is zero,
367 * the lookup knows the request is unallocated and complete. Otherwise,
368 * it is either still in use, or has been reallocated and reset
369 * with fence_init(). This increment is safe for release as we check
370 * that the request we have a reference to and matches the active
371 * request.
372 *
373 * Before we increment the refcount, we chase the request->engine
374 * pointer. We must not call kmem_cache_zalloc() or else we set
375 * that pointer to NULL and cause a crash during the lookup. If
376 * we see the request is completed (based on the value of the
377 * old engine and seqno), the lookup is complete and reports NULL.
378 * If we decide the request is not completed (new engine or seqno),
379 * then we grab a reference and double check that it is still the
380 * active request - which it won't be and restart the lookup.
381 *
382 * Do not use kmem_cache_zalloc() here!
383 */
384 req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
05235c53 385 if (!req)
8e637178 386 return ERR_PTR(-ENOMEM);
05235c53 387
04769652 388 ret = i915_gem_get_seqno(dev_priv, &seqno);
05235c53
CW
389 if (ret)
390 goto err;
391
04769652
CW
392 spin_lock_init(&req->lock);
393 fence_init(&req->fence,
394 &i915_fence_ops,
395 &req->lock,
396 engine->fence_context,
397 seqno);
398
fa545cbf 399 INIT_LIST_HEAD(&req->active_list);
05235c53
CW
400 req->i915 = dev_priv;
401 req->engine = engine;
9a6feaf0 402 req->ctx = i915_gem_context_get(ctx);
05235c53 403
5a198b8c
CW
404 /* No zalloc, must clear what we need by hand */
405 req->previous_context = NULL;
406 req->file_priv = NULL;
058d88c4 407 req->batch = NULL;
5a198b8c 408
05235c53
CW
409 /*
410 * Reserve space in the ring buffer for all the commands required to
411 * eventually emit this request. This is to guarantee that the
412 * i915_add_request() call can't fail. Note that the reserve may need
413 * to be redone if the request is not actually submitted straight
414 * away, e.g. because a GPU scheduler has deferred it.
415 */
416 req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
417
418 if (i915.enable_execlists)
419 ret = intel_logical_ring_alloc_request_extras(req);
420 else
421 ret = intel_ring_alloc_request_extras(req);
422 if (ret)
423 goto err_ctx;
424
d045446d
CW
425 /* Record the position of the start of the request so that
426 * should we detect the updated seqno part-way through the
427 * GPU processing the request, we never over-estimate the
428 * position of the head.
429 */
430 req->head = req->ring->tail;
431
8e637178 432 return req;
05235c53
CW
433
434err_ctx:
9a6feaf0 435 i915_gem_context_put(ctx);
05235c53
CW
436err:
437 kmem_cache_free(dev_priv->requests, req);
8e637178 438 return ERR_PTR(ret);
05235c53
CW
439}
440
441static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
442{
443 struct drm_i915_private *dev_priv = engine->i915;
444
445 dev_priv->gt.active_engines |= intel_engine_flag(engine);
446 if (dev_priv->gt.awake)
447 return;
448
449 intel_runtime_pm_get_noresume(dev_priv);
450 dev_priv->gt.awake = true;
451
54b4f68f 452 intel_enable_gt_powersave(dev_priv);
05235c53
CW
453 i915_update_gfx_val(dev_priv);
454 if (INTEL_GEN(dev_priv) >= 6)
455 gen6_rps_busy(dev_priv);
456
457 queue_delayed_work(dev_priv->wq,
458 &dev_priv->gt.retire_work,
459 round_jiffies_up_relative(HZ));
460}
461
462/*
463 * NB: This function is not allowed to fail. Doing so would mean the the
464 * request is not being tracked for completion but the work itself is
465 * going to happen on the hardware. This would be a Bad Thing(tm).
466 */
17f298cf 467void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
05235c53 468{
95b2ab56
CW
469 struct intel_engine_cs *engine = request->engine;
470 struct intel_ring *ring = request->ring;
05235c53
CW
471 u32 request_start;
472 u32 reserved_tail;
473 int ret;
474
05235c53
CW
475 /*
476 * To ensure that this call will not fail, space for its emissions
477 * should already have been reserved in the ring buffer. Let the ring
478 * know that it is time to use that space up.
479 */
ba76d91b 480 request_start = ring->tail;
05235c53
CW
481 reserved_tail = request->reserved_space;
482 request->reserved_space = 0;
483
484 /*
485 * Emit any outstanding flushes - execbuf can fail to emit the flush
486 * after having emitted the batchbuffer command. Hence we need to fix
487 * things up similar to emitting the lazy request. The difference here
488 * is that the flush _must_ happen before the next request, no matter
489 * what.
490 */
491 if (flush_caches) {
7c9cf4e3 492 ret = engine->emit_flush(request, EMIT_FLUSH);
c7fe7d25 493
05235c53 494 /* Not allowed to fail! */
c7fe7d25 495 WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
05235c53
CW
496 }
497
498 trace_i915_gem_request_add(request);
499
05235c53
CW
500 /* Seal the request and mark it as pending execution. Note that
501 * we may inspect this state, without holding any locks, during
502 * hangcheck. Hence we apply the barrier to ensure that we do not
503 * see a more recent value in the hws than we are tracking.
504 */
505 request->emitted_jiffies = jiffies;
506 request->previous_seqno = engine->last_submitted_seqno;
dcff85c8
CW
507 engine->last_submitted_seqno = request->fence.seqno;
508 i915_gem_active_set(&engine->last_request, request);
efdf7c06 509 list_add_tail(&request->link, &engine->request_list);
675d9ad7 510 list_add_tail(&request->ring_link, &ring->request_list);
05235c53 511
d045446d 512 /* Record the position of the start of the breadcrumb so that
05235c53
CW
513 * should we detect the updated seqno part-way through the
514 * GPU processing the request, we never over-estimate the
d045446d 515 * position of the ring's HEAD.
05235c53 516 */
ba76d91b 517 request->postfix = ring->tail;
05235c53 518
05235c53 519 /* Not allowed to fail! */
ddd66c51
CW
520 ret = engine->emit_request(request);
521 WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
c5efa1ad 522
05235c53 523 /* Sanity check that the reserved size was large enough. */
ba76d91b 524 ret = ring->tail - request_start;
05235c53 525 if (ret < 0)
1dae2dfb 526 ret += ring->size;
05235c53
CW
527 WARN_ONCE(ret > reserved_tail,
528 "Not enough space reserved (%d bytes) "
529 "for adding the request (%d bytes)\n",
530 reserved_tail, ret);
531
532 i915_gem_mark_busy(engine);
ddd66c51 533 engine->submit_request(request);
05235c53
CW
534}
535
536static unsigned long local_clock_us(unsigned int *cpu)
537{
538 unsigned long t;
539
540 /* Cheaply and approximately convert from nanoseconds to microseconds.
541 * The result and subsequent calculations are also defined in the same
542 * approximate microseconds units. The principal source of timing
543 * error here is from the simple truncation.
544 *
545 * Note that local_clock() is only defined wrt to the current CPU;
546 * the comparisons are no longer valid if we switch CPUs. Instead of
547 * blocking preemption for the entire busywait, we can detect the CPU
548 * switch and use that as indicator of system load and a reason to
549 * stop busywaiting, see busywait_stop().
550 */
551 *cpu = get_cpu();
552 t = local_clock() >> 10;
553 put_cpu();
554
555 return t;
556}
557
558static bool busywait_stop(unsigned long timeout, unsigned int cpu)
559{
560 unsigned int this_cpu;
561
562 if (time_after(local_clock_us(&this_cpu), timeout))
563 return true;
564
565 return this_cpu != cpu;
566}
567
568bool __i915_spin_request(const struct drm_i915_gem_request *req,
569 int state, unsigned long timeout_us)
570{
571 unsigned int cpu;
572
573 /* When waiting for high frequency requests, e.g. during synchronous
574 * rendering split between the CPU and GPU, the finite amount of time
575 * required to set up the irq and wait upon it limits the response
576 * rate. By busywaiting on the request completion for a short while we
577 * can service the high frequency waits as quick as possible. However,
578 * if it is a slow request, we want to sleep as quickly as possible.
579 * The tradeoff between waiting and sleeping is roughly the time it
580 * takes to sleep on a request, on the order of a microsecond.
581 */
582
583 timeout_us += local_clock_us(&cpu);
584 do {
585 if (i915_gem_request_completed(req))
586 return true;
587
588 if (signal_pending_state(state, current))
589 break;
590
591 if (busywait_stop(timeout_us, cpu))
592 break;
593
594 cpu_relax_lowlatency();
595 } while (!need_resched());
596
597 return false;
598}
599
600/**
776f3236 601 * i915_wait_request - wait until execution of request has finished
05235c53 602 * @req: duh!
ea746f36 603 * @flags: how to wait
05235c53
CW
604 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
605 * @rps: client to charge for RPS boosting
606 *
607 * Note: It is of utmost importance that the passed in seqno and reset_counter
608 * values have been read by the caller in an smp safe manner. Where read-side
609 * locks are involved, it is sufficient to read the reset_counter before
610 * unlocking the lock that protects the seqno. For lockless tricks, the
611 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
612 * inserted.
613 *
614 * Returns 0 if the request was found within the alloted time. Else returns the
615 * errno with remaining time filled in timeout argument.
616 */
776f3236 617int i915_wait_request(struct drm_i915_gem_request *req,
ea746f36 618 unsigned int flags,
776f3236
CW
619 s64 *timeout,
620 struct intel_rps_client *rps)
05235c53 621{
ea746f36
CW
622 const int state = flags & I915_WAIT_INTERRUPTIBLE ?
623 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
05235c53
CW
624 DEFINE_WAIT(reset);
625 struct intel_wait wait;
626 unsigned long timeout_remain;
627 int ret = 0;
628
629 might_sleep();
22dd3bb9
CW
630#if IS_ENABLED(CONFIG_LOCKDEP)
631 GEM_BUG_ON(!!lockdep_is_held(&req->i915->drm.struct_mutex) !=
632 !!(flags & I915_WAIT_LOCKED));
633#endif
05235c53 634
05235c53
CW
635 if (i915_gem_request_completed(req))
636 return 0;
637
638 timeout_remain = MAX_SCHEDULE_TIMEOUT;
639 if (timeout) {
640 if (WARN_ON(*timeout < 0))
641 return -EINVAL;
642
643 if (*timeout == 0)
644 return -ETIME;
645
646 /* Record current time in case interrupted, or wedged */
647 timeout_remain = nsecs_to_jiffies_timeout(*timeout);
648 *timeout += ktime_get_raw_ns();
649 }
650
651 trace_i915_gem_request_wait_begin(req);
652
653 /* This client is about to stall waiting for the GPU. In many cases
654 * this is undesirable and limits the throughput of the system, as
655 * many clients cannot continue processing user input/output whilst
656 * blocked. RPS autotuning may take tens of milliseconds to respond
657 * to the GPU load and thus incurs additional latency for the client.
658 * We can circumvent that by promoting the GPU frequency to maximum
659 * before we wait. This makes the GPU throttle up much more quickly
660 * (good for benchmarks and user experience, e.g. window animations),
661 * but at a cost of spending more power processing the workload
662 * (bad for battery). Not all clients even want their results
663 * immediately and for them we should just let the GPU select its own
664 * frequency to maximise efficiency. To prevent a single client from
665 * forcing the clocks too high for the whole system, we only allow
666 * each client to waitboost once in a busy period.
667 */
42df2714 668 if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6)
05235c53
CW
669 gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
670
437c3087 671 /* Optimistic short spin before touching IRQs */
05235c53
CW
672 if (i915_spin_request(req, state, 5))
673 goto complete;
674
675 set_current_state(state);
22dd3bb9
CW
676 if (flags & I915_WAIT_LOCKED)
677 add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
05235c53 678
04769652 679 intel_wait_init(&wait, req->fence.seqno);
05235c53
CW
680 if (intel_engine_add_wait(req->engine, &wait))
681 /* In order to check that we haven't missed the interrupt
682 * as we enabled it, we need to kick ourselves to do a
683 * coherent check on the seqno before we sleep.
684 */
685 goto wakeup;
686
687 for (;;) {
688 if (signal_pending_state(state, current)) {
689 ret = -ERESTARTSYS;
690 break;
691 }
692
693 timeout_remain = io_schedule_timeout(timeout_remain);
694 if (timeout_remain == 0) {
695 ret = -ETIME;
696 break;
697 }
698
699 if (intel_wait_complete(&wait))
700 break;
701
702 set_current_state(state);
703
704wakeup:
705 /* Carefully check if the request is complete, giving time
706 * for the seqno to be visible following the interrupt.
707 * We also have to check in case we are kicked by the GPU
708 * reset in order to drop the struct_mutex.
709 */
710 if (__i915_request_irq_complete(req))
711 break;
712
713 /* Only spin if we know the GPU is processing this request */
714 if (i915_spin_request(req, state, 2))
715 break;
716 }
05235c53
CW
717
718 intel_engine_remove_wait(req->engine, &wait);
22dd3bb9
CW
719 if (flags & I915_WAIT_LOCKED)
720 remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
05235c53 721 __set_current_state(TASK_RUNNING);
22dd3bb9 722
05235c53
CW
723complete:
724 trace_i915_gem_request_wait_end(req);
725
726 if (timeout) {
727 *timeout -= ktime_get_raw_ns();
728 if (*timeout < 0)
729 *timeout = 0;
730
731 /*
732 * Apparently ktime isn't accurate enough and occasionally has a
733 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
734 * things up to make the test happy. We allow up to 1 jiffy.
735 *
736 * This is a regrssion from the timespec->ktime conversion.
737 */
738 if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
739 *timeout = 0;
740 }
741
42df2714
CW
742 if (IS_RPS_USER(rps) &&
743 req->fence.seqno == req->engine->last_submitted_seqno) {
05235c53
CW
744 /* The GPU is now idle and this client has stalled.
745 * Since no other client has submitted a request in the
746 * meantime, assume that this client is the only one
747 * supplying work to the GPU but is unable to keep that
748 * work supplied because it is waiting. Since the GPU is
749 * then never kept fully busy, RPS autoclocking will
750 * keep the clocks relatively low, causing further delays.
751 * Compensate by giving the synchronous client credit for
752 * a waitboost next time.
753 */
754 spin_lock(&req->i915->rps.client_lock);
755 list_del_init(&rps->link);
756 spin_unlock(&req->i915->rps.client_lock);
757 }
758
759 return ret;
760}
4b8de8e6 761
f6407193 762static bool engine_retire_requests(struct intel_engine_cs *engine)
4b8de8e6
CW
763{
764 struct drm_i915_gem_request *request, *next;
765
766 list_for_each_entry_safe(request, next, &engine->request_list, link) {
767 if (!i915_gem_request_completed(request))
f6407193 768 return false;
4b8de8e6
CW
769
770 i915_gem_request_retire(request);
771 }
f6407193
CW
772
773 return true;
4b8de8e6
CW
774}
775
776void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
777{
778 struct intel_engine_cs *engine;
bafb0fce 779 unsigned int tmp;
4b8de8e6
CW
780
781 lockdep_assert_held(&dev_priv->drm.struct_mutex);
782
783 if (dev_priv->gt.active_engines == 0)
784 return;
785
786 GEM_BUG_ON(!dev_priv->gt.awake);
787
bafb0fce 788 for_each_engine_masked(engine, dev_priv, dev_priv->gt.active_engines, tmp)
f6407193 789 if (engine_retire_requests(engine))
4b8de8e6 790 dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
4b8de8e6
CW
791
792 if (dev_priv->gt.active_engines == 0)
793 queue_delayed_work(dev_priv->wq,
794 &dev_priv->gt.idle_work,
795 msecs_to_jiffies(100));
796}