drm/i915: Mark up the calling context for intel_wakeref_put()
[linux-block.git] / drivers / gpu / drm / i915 / gt / intel_gt_requests.c
CommitLineData
66101975
CW
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2019 Intel Corporation
5 */
6
d99f7b07 7#include "i915_drv.h" /* for_each_engine() */
66101975
CW
8#include "i915_request.h"
9#include "intel_gt.h"
10#include "intel_gt_pm.h"
11#include "intel_gt_requests.h"
12#include "intel_timeline.h"
13
14static void retire_requests(struct intel_timeline *tl)
15{
16 struct i915_request *rq, *rn;
17
18 list_for_each_entry_safe(rq, rn, &tl->requests, link)
19 if (!i915_request_retire(rq))
20 break;
21}
22
d99f7b07
CW
23static void flush_submission(struct intel_gt *gt)
24{
25 struct intel_engine_cs *engine;
26 enum intel_engine_id id;
27
5d904e3c 28 for_each_engine(engine, gt, id)
d99f7b07
CW
29 intel_engine_flush_submission(engine);
30}
31
66101975
CW
32long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
33{
34 struct intel_gt_timelines *timelines = &gt->timelines;
35 struct intel_timeline *tl, *tn;
66101975
CW
36 unsigned long flags;
37 bool interruptible;
38 LIST_HEAD(free);
39
40 interruptible = true;
41 if (unlikely(timeout < 0))
42 timeout = -timeout, interruptible = false;
43
d99f7b07
CW
44 flush_submission(gt); /* kick the ksoftirqd tasklets */
45
66101975
CW
46 spin_lock_irqsave(&timelines->lock, flags);
47 list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
7936a22d 48 if (!mutex_trylock(&tl->mutex))
66101975
CW
49 continue;
50
51 intel_timeline_get(tl);
52 GEM_BUG_ON(!tl->active_count);
53 tl->active_count++; /* pin the list element */
54 spin_unlock_irqrestore(&timelines->lock, flags);
55
56 if (timeout > 0) {
57 struct dma_fence *fence;
58
59 fence = i915_active_fence_get(&tl->last_request);
60 if (fence) {
61 timeout = dma_fence_wait_timeout(fence,
abc47ff6 62 interruptible,
66101975
CW
63 timeout);
64 dma_fence_put(fence);
65 }
66 }
67
68 retire_requests(tl);
69
70 spin_lock_irqsave(&timelines->lock, flags);
71
72 /* Resume iteration after dropping lock */
73 list_safe_reset_next(tl, tn, link);
7936a22d 74 if (!--tl->active_count)
66101975
CW
75 list_del(&tl->link);
76
77 mutex_unlock(&tl->mutex);
78
79 /* Defer the final release to after the spinlock */
80 if (refcount_dec_and_test(&tl->kref.refcount)) {
81 GEM_BUG_ON(tl->active_count);
82 list_add(&tl->link, &free);
83 }
84 }
85 spin_unlock_irqrestore(&timelines->lock, flags);
86
87 list_for_each_entry_safe(tl, tn, &free, link)
88 __intel_timeline_free(&tl->kref);
89
7936a22d 90 return list_empty(&timelines->active_list) ? 0 : timeout;
66101975
CW
91}
92
93int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
94{
95 /* If the device is asleep, we have no requests outstanding */
96 if (!intel_gt_pm_is_awake(gt))
97 return 0;
98
99 while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) {
100 cond_resched();
101 if (signal_pending(current))
102 return -EINTR;
103 }
104
105 return timeout;
106}
107
108static void retire_work_handler(struct work_struct *work)
109{
110 struct intel_gt *gt =
111 container_of(work, typeof(*gt), requests.retire_work.work);
112
66101975
CW
113 schedule_delayed_work(&gt->requests.retire_work,
114 round_jiffies_up_relative(HZ));
dcbebb11 115 intel_gt_retire_requests(gt);
66101975
CW
116}
117
118void intel_gt_init_requests(struct intel_gt *gt)
119{
120 INIT_DELAYED_WORK(&gt->requests.retire_work, retire_work_handler);
121}
122
123void intel_gt_park_requests(struct intel_gt *gt)
124{
125 cancel_delayed_work(&gt->requests.retire_work);
126}
127
128void intel_gt_unpark_requests(struct intel_gt *gt)
129{
130 schedule_delayed_work(&gt->requests.retire_work,
131 round_jiffies_up_relative(HZ));
132}
dea397e8
CW
133
134void intel_gt_fini_requests(struct intel_gt *gt)
135{
136 /* Wait until the work is marked as finished before unloading! */
137 cancel_delayed_work_sync(&gt->requests.retire_work);
138}