Commit | Line | Data |
---|---|---|
24f90d66 | 1 | // SPDX-License-Identifier: MIT |
66101975 | 2 | /* |
66101975 CW |
3 | * Copyright © 2019 Intel Corporation |
4 | */ | |
5 | ||
4f88f874 CW |
6 | #include <linux/workqueue.h> |
7 | ||
d99f7b07 | 8 | #include "i915_drv.h" /* for_each_engine() */ |
66101975 | 9 | #include "i915_request.h" |
e6ba7648 | 10 | #include "intel_engine_heartbeat.h" |
9b4d0598 | 11 | #include "intel_execlists_submission.h" |
66101975 CW |
12 | #include "intel_gt.h" |
13 | #include "intel_gt_pm.h" | |
14 | #include "intel_gt_requests.h" | |
15 | #include "intel_timeline.h" | |
16 | ||
44963bf9 | 17 | static bool retire_requests(struct intel_timeline *tl) |
66101975 CW |
18 | { |
19 | struct i915_request *rq, *rn; | |
20 | ||
21 | list_for_each_entry_safe(rq, rn, &tl->requests, link) | |
22 | if (!i915_request_retire(rq)) | |
44963bf9 CW |
23 | return false; |
24 | ||
25 | /* And check nothing new was submitted */ | |
26 | return !i915_active_fence_isset(&tl->last_request); | |
66101975 CW |
27 | } |
28 | ||
b97f77ba CW |
29 | static bool engine_active(const struct intel_engine_cs *engine) |
30 | { | |
31 | return !list_empty(&engine->kernel_context->timeline->requests); | |
32 | } | |
33 | ||
09137e94 | 34 | static bool flush_submission(struct intel_gt *gt, long timeout) |
d99f7b07 CW |
35 | { |
36 | struct intel_engine_cs *engine; | |
37 | enum intel_engine_id id; | |
cdd280b1 | 38 | bool active = false; |
d99f7b07 | 39 | |
09137e94 CW |
40 | if (!timeout) |
41 | return false; | |
42 | ||
44963bf9 | 43 | if (!intel_gt_pm_is_awake(gt)) |
cdd280b1 | 44 | return false; |
44963bf9 | 45 | |
e6ba7648 | 46 | for_each_engine(engine, gt, id) { |
30084b14 | 47 | intel_engine_flush_submission(engine); |
b97f77ba CW |
48 | |
49 | /* Flush the background retirement and idle barriers */ | |
50 | flush_work(&engine->retire_work); | |
51 | flush_delayed_work(&engine->wakeref.work); | |
52 | ||
53 | /* Is the idle barrier still outstanding? */ | |
54 | active |= engine_active(engine); | |
e6ba7648 | 55 | } |
cdd280b1 CW |
56 | |
57 | return active; | |
d99f7b07 CW |
58 | } |
59 | ||
4f88f874 CW |
60 | static void engine_retire(struct work_struct *work) |
61 | { | |
62 | struct intel_engine_cs *engine = | |
63 | container_of(work, typeof(*engine), retire_work); | |
64 | struct intel_timeline *tl = xchg(&engine->retire, NULL); | |
65 | ||
66 | do { | |
67 | struct intel_timeline *next = xchg(&tl->retire, NULL); | |
68 | ||
69 | /* | |
70 | * Our goal here is to retire _idle_ timelines as soon as | |
71 | * possible (as they are idle, we do not expect userspace | |
72 | * to be cleaning up anytime soon). | |
73 | * | |
74 | * If the timeline is currently locked, either it is being | |
75 | * retired elsewhere or about to be! | |
76 | */ | |
77 | if (mutex_trylock(&tl->mutex)) { | |
78 | retire_requests(tl); | |
79 | mutex_unlock(&tl->mutex); | |
80 | } | |
81 | intel_timeline_put(tl); | |
82 | ||
83 | GEM_BUG_ON(!next); | |
84 | tl = ptr_mask_bits(next, 1); | |
85 | } while (tl); | |
86 | } | |
87 | ||
88 | static bool add_retire(struct intel_engine_cs *engine, | |
89 | struct intel_timeline *tl) | |
90 | { | |
dc93c9b6 | 91 | #define STUB ((struct intel_timeline *)1) |
4f88f874 CW |
92 | struct intel_timeline *first; |
93 | ||
94 | /* | |
95 | * We open-code a llist here to include the additional tag [BIT(0)] | |
96 | * so that we know when the timeline is already on a | |
97 | * retirement queue: either this engine or another. | |
4f88f874 CW |
98 | */ |
99 | ||
dc93c9b6 | 100 | if (cmpxchg(&tl->retire, NULL, STUB)) /* already queued */ |
4f88f874 CW |
101 | return false; |
102 | ||
103 | intel_timeline_get(tl); | |
104 | first = READ_ONCE(engine->retire); | |
105 | do | |
106 | tl->retire = ptr_pack_bits(first, 1, 1); | |
107 | while (!try_cmpxchg(&engine->retire, &first, tl)); | |
108 | ||
109 | return !first; | |
110 | } | |
111 | ||
112 | void intel_engine_add_retire(struct intel_engine_cs *engine, | |
113 | struct intel_timeline *tl) | |
114 | { | |
f91d8156 CW |
115 | /* We don't deal well with the engine disappearing beneath us */ |
116 | GEM_BUG_ON(intel_engine_is_virtual(engine)); | |
117 | ||
4f88f874 CW |
118 | if (add_retire(engine, tl)) |
119 | schedule_work(&engine->retire_work); | |
120 | } | |
121 | ||
122 | void intel_engine_init_retire(struct intel_engine_cs *engine) | |
123 | { | |
124 | INIT_WORK(&engine->retire_work, engine_retire); | |
125 | } | |
126 | ||
127 | void intel_engine_fini_retire(struct intel_engine_cs *engine) | |
128 | { | |
129 | flush_work(&engine->retire_work); | |
130 | GEM_BUG_ON(engine->retire); | |
131 | } | |
132 | ||
b97060a9 MB |
133 | long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout, |
134 | long *remaining_timeout) | |
66101975 CW |
135 | { |
136 | struct intel_gt_timelines *timelines = >->timelines; | |
137 | struct intel_timeline *tl, *tn; | |
689122dc | 138 | unsigned long active_count = 0; |
66101975 CW |
139 | LIST_HEAD(free); |
140 | ||
09137e94 | 141 | flush_submission(gt, timeout); /* kick the ksoftirqd tasklets */ |
88cec497 | 142 | spin_lock(&timelines->lock); |
66101975 | 143 | list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { |
4856254d CW |
144 | if (!mutex_trylock(&tl->mutex)) { |
145 | active_count++; /* report busy to caller, try again? */ | |
66101975 | 146 | continue; |
4856254d | 147 | } |
66101975 CW |
148 | |
149 | intel_timeline_get(tl); | |
a6edbca7 CW |
150 | GEM_BUG_ON(!atomic_read(&tl->active_count)); |
151 | atomic_inc(&tl->active_count); /* pin the list element */ | |
88cec497 | 152 | spin_unlock(&timelines->lock); |
66101975 CW |
153 | |
154 | if (timeout > 0) { | |
155 | struct dma_fence *fence; | |
156 | ||
157 | fence = i915_active_fence_get(&tl->last_request); | |
158 | if (fence) { | |
82126e59 CW |
159 | mutex_unlock(&tl->mutex); |
160 | ||
66101975 | 161 | timeout = dma_fence_wait_timeout(fence, |
51c87fa6 | 162 | true, |
66101975 CW |
163 | timeout); |
164 | dma_fence_put(fence); | |
82126e59 CW |
165 | |
166 | /* Retirement is best effort */ | |
167 | if (!mutex_trylock(&tl->mutex)) { | |
168 | active_count++; | |
169 | goto out_active; | |
170 | } | |
66101975 CW |
171 | } |
172 | } | |
173 | ||
f153f639 | 174 | if (!retire_requests(tl)) |
cdd280b1 | 175 | active_count++; |
82126e59 | 176 | mutex_unlock(&tl->mutex); |
66101975 | 177 | |
82126e59 | 178 | out_active: spin_lock(&timelines->lock); |
66101975 | 179 | |
82126e59 | 180 | /* Resume list iteration after reacquiring spinlock */ |
66101975 | 181 | list_safe_reset_next(tl, tn, link); |
4856254d | 182 | if (atomic_dec_and_test(&tl->active_count)) |
66101975 CW |
183 | list_del(&tl->link); |
184 | ||
66101975 CW |
185 | /* Defer the final release to after the spinlock */ |
186 | if (refcount_dec_and_test(&tl->kref.refcount)) { | |
a6edbca7 | 187 | GEM_BUG_ON(atomic_read(&tl->active_count)); |
66101975 CW |
188 | list_add(&tl->link, &free); |
189 | } | |
190 | } | |
88cec497 | 191 | spin_unlock(&timelines->lock); |
66101975 CW |
192 | |
193 | list_for_each_entry_safe(tl, tn, &free, link) | |
194 | __intel_timeline_free(&tl->kref); | |
195 | ||
09137e94 | 196 | if (flush_submission(gt, timeout)) /* Wait, there's more! */ |
f153f639 CW |
197 | active_count++; |
198 | ||
b97060a9 MB |
199 | if (remaining_timeout) |
200 | *remaining_timeout = timeout; | |
66101975 | 201 | |
b97060a9 | 202 | return active_count ? timeout : 0; |
66101975 CW |
203 | } |
204 | ||
205 | static void retire_work_handler(struct work_struct *work) | |
206 | { | |
207 | struct intel_gt *gt = | |
208 | container_of(work, typeof(*gt), requests.retire_work.work); | |
209 | ||
66101975 CW |
210 | schedule_delayed_work(>->requests.retire_work, |
211 | round_jiffies_up_relative(HZ)); | |
dcbebb11 | 212 | intel_gt_retire_requests(gt); |
66101975 CW |
213 | } |
214 | ||
215 | void intel_gt_init_requests(struct intel_gt *gt) | |
216 | { | |
217 | INIT_DELAYED_WORK(>->requests.retire_work, retire_work_handler); | |
218 | } | |
219 | ||
220 | void intel_gt_park_requests(struct intel_gt *gt) | |
221 | { | |
222 | cancel_delayed_work(>->requests.retire_work); | |
223 | } | |
224 | ||
225 | void intel_gt_unpark_requests(struct intel_gt *gt) | |
226 | { | |
227 | schedule_delayed_work(>->requests.retire_work, | |
228 | round_jiffies_up_relative(HZ)); | |
229 | } | |
dea397e8 CW |
230 | |
231 | void intel_gt_fini_requests(struct intel_gt *gt) | |
232 | { | |
233 | /* Wait until the work is marked as finished before unloading! */ | |
234 | cancel_delayed_work_sync(>->requests.retire_work); | |
9b4d0598 TU |
235 | |
236 | flush_work(>->watchdog.work); | |
237 | } | |
238 | ||
239 | void intel_gt_watchdog_work(struct work_struct *work) | |
240 | { | |
241 | struct intel_gt *gt = | |
242 | container_of(work, typeof(*gt), watchdog.work); | |
243 | struct i915_request *rq, *rn; | |
244 | struct llist_node *first; | |
245 | ||
246 | first = llist_del_all(>->watchdog.list); | |
247 | if (!first) | |
248 | return; | |
249 | ||
250 | llist_for_each_entry_safe(rq, rn, first, watchdog.link) { | |
251 | if (!i915_request_completed(rq)) { | |
252 | struct dma_fence *f = &rq->fence; | |
253 | ||
254 | pr_notice("Fence expiration time out i915-%s:%s:%llx!\n", | |
255 | f->ops->get_driver_name(f), | |
256 | f->ops->get_timeline_name(f), | |
257 | f->seqno); | |
258 | i915_request_cancel(rq, -EINTR); | |
259 | } | |
260 | i915_request_put(rq); | |
261 | } | |
dea397e8 | 262 | } |