Commit | Line | Data |
---|---|---|
24f90d66 | 1 | /* SPDX-License-Identifier: MIT */ |
39e2f501 | 2 | /* |
39e2f501 CW |
3 | * Copyright © 2019 Intel Corporation |
4 | */ | |
5 | ||
6 | #ifndef __INTEL_CONTEXT_H__ | |
7 | #define __INTEL_CONTEXT_H__ | |
8 | ||
9f3ccd40 | 9 | #include <linux/bitops.h> |
08819549 | 10 | #include <linux/lockdep.h> |
9f3ccd40 | 11 | #include <linux/types.h> |
08819549 | 12 | |
12c255b5 | 13 | #include "i915_active.h" |
1883a0a4 | 14 | #include "i915_drv.h" |
39e2f501 CW |
15 | #include "intel_context_types.h" |
16 | #include "intel_engine_types.h" | |
2871ea85 | 17 | #include "intel_ring_types.h" |
e5dadff4 | 18 | #include "intel_timeline_types.h" |
ae8ac10d | 19 | #include "i915_trace.h" |
39e2f501 | 20 | |
639f2f24 VSD |
21 | #define CE_TRACE(ce, fmt, ...) do { \ |
22 | const struct intel_context *ce__ = (ce); \ | |
41d329e2 | 23 | ENGINE_TRACE(ce__->engine, "context:%llx " fmt, \ |
639f2f24 VSD |
24 | ce__->timeline->fence_context, \ |
25 | ##__VA_ARGS__); \ | |
26 | } while (0) | |
27 | ||
45c64ecf TU |
28 | #define INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS (1) |
29 | ||
47b08693 ML |
30 | struct i915_gem_ww_ctx; |
31 | ||
39e2f501 | 32 | void intel_context_init(struct intel_context *ce, |
39e2f501 | 33 | struct intel_engine_cs *engine); |
df8cf31e | 34 | void intel_context_fini(struct intel_context *ce); |
39e2f501 | 35 | |
2dcec7d3 DV |
36 | void i915_context_module_exit(void); |
37 | int i915_context_module_init(void); | |
38 | ||
c4d52feb | 39 | struct intel_context * |
e6ba7648 | 40 | intel_context_create(struct intel_engine_cs *engine); |
c4d52feb | 41 | |
89f98d63 CW |
42 | int intel_context_alloc_state(struct intel_context *ce); |
43 | ||
5e2a0419 CW |
44 | void intel_context_free(struct intel_context *ce); |
45 | ||
b4d3acaa CW |
46 | int intel_context_reconfigure_sseu(struct intel_context *ce, |
47 | const struct intel_sseu sseu); | |
48 | ||
c2aa552f MB |
49 | #define PARENT_SCRATCH_SIZE PAGE_SIZE |
50 | ||
3897df4c MB |
51 | static inline bool intel_context_is_child(struct intel_context *ce) |
52 | { | |
53 | return !!ce->parallel.parent; | |
54 | } | |
55 | ||
56 | static inline bool intel_context_is_parent(struct intel_context *ce) | |
57 | { | |
58 | return !!ce->parallel.number_children; | |
59 | } | |
60 | ||
61 | static inline bool intel_context_is_pinned(struct intel_context *ce); | |
62 | ||
63 | static inline struct intel_context * | |
64 | intel_context_to_parent(struct intel_context *ce) | |
65 | { | |
66 | if (intel_context_is_child(ce)) { | |
67 | /* | |
68 | * The parent holds ref count to the child so it is always safe | |
69 | * for the parent to access the child, but the child has a | |
70 | * pointer to the parent without a ref. To ensure this is safe | |
71 | * the child should only access the parent pointer while the | |
72 | * parent is pinned. | |
73 | */ | |
74 | GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent)); | |
75 | ||
76 | return ce->parallel.parent; | |
77 | } else { | |
78 | return ce; | |
79 | } | |
80 | } | |
81 | ||
bc955204 MB |
82 | static inline bool intel_context_is_parallel(struct intel_context *ce) |
83 | { | |
84 | return intel_context_is_child(ce) || intel_context_is_parent(ce); | |
85 | } | |
86 | ||
3897df4c MB |
87 | void intel_context_bind_parent_child(struct intel_context *parent, |
88 | struct intel_context *child); | |
89 | ||
90 | #define for_each_child(parent, ce)\ | |
91 | list_for_each_entry(ce, &(parent)->parallel.child_list,\ | |
92 | parallel.child_link) | |
93 | #define for_each_child_safe(parent, ce, cn)\ | |
94 | list_for_each_entry_safe(ce, cn, &(parent)->parallel.child_list,\ | |
95 | parallel.child_link) | |
96 | ||
c4d52feb | 97 | /** |
6b736de5 CW |
98 | * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context |
99 | * @ce - the context | |
c4d52feb | 100 | * |
08819549 CW |
101 | * Acquire a lock on the pinned status of the HW context, such that the context |
102 | * can neither be bound to the GPU or unbound whilst the lock is held, i.e. | |
103 | * intel_context_is_pinned() remains stable. | |
c4d52feb | 104 | */ |
6b736de5 CW |
105 | static inline int intel_context_lock_pinned(struct intel_context *ce) |
106 | __acquires(ce->pin_mutex) | |
107 | { | |
108 | return mutex_lock_interruptible(&ce->pin_mutex); | |
109 | } | |
c4d52feb | 110 | |
6b736de5 CW |
111 | /** |
112 | * intel_context_is_pinned - Reports the 'pinned' status | |
113 | * @ce - the context | |
114 | * | |
115 | * While in use by the GPU, the context, along with its ring and page | |
116 | * tables is pinned into memory and the GTT. | |
117 | * | |
118 | * Returns: true if the context is currently pinned for use by the GPU. | |
119 | */ | |
08819549 CW |
120 | static inline bool |
121 | intel_context_is_pinned(struct intel_context *ce) | |
122 | { | |
123 | return atomic_read(&ce->pin_count); | |
124 | } | |
125 | ||
62eaf0ae MB |
126 | static inline void intel_context_cancel_request(struct intel_context *ce, |
127 | struct i915_request *rq) | |
128 | { | |
129 | GEM_BUG_ON(!ce->ops->cancel_request); | |
130 | return ce->ops->cancel_request(ce, rq); | |
131 | } | |
132 | ||
6b736de5 CW |
133 | /** |
134 | * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status | |
135 | * @ce - the context | |
136 | * | |
137 | * Releases the lock earlier acquired by intel_context_unlock_pinned(). | |
138 | */ | |
139 | static inline void intel_context_unlock_pinned(struct intel_context *ce) | |
140 | __releases(ce->pin_mutex) | |
141 | { | |
142 | mutex_unlock(&ce->pin_mutex); | |
143 | } | |
08819549 | 144 | |
fa9f6681 | 145 | int __intel_context_do_pin(struct intel_context *ce); |
47b08693 ML |
146 | int __intel_context_do_pin_ww(struct intel_context *ce, |
147 | struct i915_gem_ww_ctx *ww); | |
fa9f6681 | 148 | |
feed5c7b CW |
149 | static inline bool intel_context_pin_if_active(struct intel_context *ce) |
150 | { | |
151 | return atomic_inc_not_zero(&ce->pin_count); | |
152 | } | |
153 | ||
fa9f6681 CW |
154 | static inline int intel_context_pin(struct intel_context *ce) |
155 | { | |
feed5c7b | 156 | if (likely(intel_context_pin_if_active(ce))) |
fa9f6681 CW |
157 | return 0; |
158 | ||
159 | return __intel_context_do_pin(ce); | |
160 | } | |
39e2f501 | 161 | |
47b08693 ML |
162 | static inline int intel_context_pin_ww(struct intel_context *ce, |
163 | struct i915_gem_ww_ctx *ww) | |
164 | { | |
165 | if (likely(intel_context_pin_if_active(ce))) | |
166 | return 0; | |
167 | ||
168 | return __intel_context_do_pin_ww(ce, ww); | |
169 | } | |
170 | ||
39e2f501 CW |
171 | static inline void __intel_context_pin(struct intel_context *ce) |
172 | { | |
08819549 CW |
173 | GEM_BUG_ON(!intel_context_is_pinned(ce)); |
174 | atomic_inc(&ce->pin_count); | |
39e2f501 CW |
175 | } |
176 | ||
e0717063 MB |
177 | void __intel_context_do_unpin(struct intel_context *ce, int sub); |
178 | ||
179 | static inline void intel_context_sched_disable_unpin(struct intel_context *ce) | |
180 | { | |
181 | __intel_context_do_unpin(ce, 2); | |
182 | } | |
183 | ||
184 | static inline void intel_context_unpin(struct intel_context *ce) | |
185 | { | |
186 | if (!ce->ops->sched_disable) { | |
187 | __intel_context_do_unpin(ce, 1); | |
188 | } else { | |
189 | /* | |
190 | * Move ownership of this pin to the scheduling disable which is | |
191 | * an async operation. When that operation completes the above | |
192 | * intel_context_sched_disable_unpin is called potentially | |
193 | * unpinning the context. | |
194 | */ | |
195 | while (!atomic_add_unless(&ce->pin_count, -1, 1)) { | |
196 | if (atomic_cmpxchg(&ce->pin_count, 1, 2) == 1) { | |
197 | ce->ops->sched_disable(ce); | |
198 | break; | |
199 | } | |
200 | } | |
201 | } | |
202 | } | |
39e2f501 | 203 | |
6eee33e8 CW |
204 | void intel_context_enter_engine(struct intel_context *ce); |
205 | void intel_context_exit_engine(struct intel_context *ce); | |
206 | ||
207 | static inline void intel_context_enter(struct intel_context *ce) | |
208 | { | |
6c69a454 | 209 | lockdep_assert_held(&ce->timeline->mutex); |
6eee33e8 CW |
210 | if (!ce->active_count++) |
211 | ce->ops->enter(ce); | |
212 | } | |
213 | ||
214 | static inline void intel_context_mark_active(struct intel_context *ce) | |
215 | { | |
bce45c26 SAS |
216 | lockdep_assert(lockdep_is_held(&ce->timeline->mutex) || |
217 | test_bit(CONTEXT_IS_PARKING, &ce->flags)); | |
6eee33e8 CW |
218 | ++ce->active_count; |
219 | } | |
220 | ||
221 | static inline void intel_context_exit(struct intel_context *ce) | |
222 | { | |
6c69a454 | 223 | lockdep_assert_held(&ce->timeline->mutex); |
6eee33e8 CW |
224 | GEM_BUG_ON(!ce->active_count); |
225 | if (!--ce->active_count) | |
226 | ce->ops->exit(ce); | |
227 | } | |
228 | ||
4c5896dc CW |
229 | static inline struct intel_context *intel_context_get(struct intel_context *ce) |
230 | { | |
231 | kref_get(&ce->ref); | |
232 | return ce; | |
233 | } | |
234 | ||
235 | static inline void intel_context_put(struct intel_context *ce) | |
236 | { | |
237 | kref_put(&ce->ref, ce->ops->destroy); | |
238 | } | |
239 | ||
e5dadff4 | 240 | static inline struct intel_timeline *__must_check |
f4d57d83 | 241 | intel_context_timeline_lock(struct intel_context *ce) |
75d0a7f3 | 242 | __acquires(&ce->timeline->mutex) |
2ccdf6a1 | 243 | { |
e5dadff4 CW |
244 | struct intel_timeline *tl = ce->timeline; |
245 | int err; | |
246 | ||
544460c3 MB |
247 | if (intel_context_is_parent(ce)) |
248 | err = mutex_lock_interruptible_nested(&tl->mutex, 0); | |
249 | else if (intel_context_is_child(ce)) | |
250 | err = mutex_lock_interruptible_nested(&tl->mutex, | |
251 | ce->parallel.child_index + 1); | |
252 | else | |
253 | err = mutex_lock_interruptible(&tl->mutex); | |
e5dadff4 CW |
254 | if (err) |
255 | return ERR_PTR(err); | |
256 | ||
257 | return tl; | |
2ccdf6a1 CW |
258 | } |
259 | ||
e5dadff4 CW |
260 | static inline void intel_context_timeline_unlock(struct intel_timeline *tl) |
261 | __releases(&tl->mutex) | |
2ccdf6a1 | 262 | { |
e5dadff4 | 263 | mutex_unlock(&tl->mutex); |
2ccdf6a1 CW |
264 | } |
265 | ||
a9877da2 CW |
266 | int intel_context_prepare_remote_request(struct intel_context *ce, |
267 | struct i915_request *rq); | |
268 | ||
5e2a0419 CW |
269 | struct i915_request *intel_context_create_request(struct intel_context *ce); |
270 | ||
573ba126 MB |
271 | struct i915_request * |
272 | intel_context_find_active_request(struct intel_context *ce); | |
273 | ||
e6ba7648 CW |
274 | static inline bool intel_context_is_barrier(const struct intel_context *ce) |
275 | { | |
276 | return test_bit(CONTEXT_BARRIER_BIT, &ce->flags); | |
277 | } | |
278 | ||
2e46a2a0 CW |
279 | static inline bool intel_context_is_closed(const struct intel_context *ce) |
280 | { | |
281 | return test_bit(CONTEXT_CLOSED_BIT, &ce->flags); | |
282 | } | |
283 | ||
cc1557ca CW |
284 | static inline bool intel_context_has_inflight(const struct intel_context *ce) |
285 | { | |
286 | return test_bit(COPS_HAS_INFLIGHT_BIT, &ce->ops->flags); | |
287 | } | |
288 | ||
0f100b70 CW |
289 | static inline bool intel_context_use_semaphores(const struct intel_context *ce) |
290 | { | |
291 | return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); | |
292 | } | |
293 | ||
294 | static inline void intel_context_set_use_semaphores(struct intel_context *ce) | |
295 | { | |
296 | set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); | |
297 | } | |
298 | ||
299 | static inline void intel_context_clear_use_semaphores(struct intel_context *ce) | |
300 | { | |
301 | clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); | |
302 | } | |
303 | ||
9f3ccd40 CW |
304 | static inline bool intel_context_is_banned(const struct intel_context *ce) |
305 | { | |
306 | return test_bit(CONTEXT_BANNED, &ce->flags); | |
307 | } | |
308 | ||
309 | static inline bool intel_context_set_banned(struct intel_context *ce) | |
310 | { | |
311 | return test_and_set_bit(CONTEXT_BANNED, &ce->flags); | |
312 | } | |
313 | ||
45c64ecf TU |
314 | bool intel_context_ban(struct intel_context *ce, struct i915_request *rq); |
315 | ||
316 | static inline bool intel_context_is_schedulable(const struct intel_context *ce) | |
ae8ac10d | 317 | { |
45c64ecf TU |
318 | return !test_bit(CONTEXT_EXITING, &ce->flags) && |
319 | !test_bit(CONTEXT_BANNED, &ce->flags); | |
320 | } | |
ae8ac10d | 321 | |
45c64ecf TU |
322 | static inline bool intel_context_is_exiting(const struct intel_context *ce) |
323 | { | |
324 | return test_bit(CONTEXT_EXITING, &ce->flags); | |
325 | } | |
ae8ac10d | 326 | |
45c64ecf TU |
327 | static inline bool intel_context_set_exiting(struct intel_context *ce) |
328 | { | |
329 | return test_and_set_bit(CONTEXT_EXITING, &ce->flags); | |
ae8ac10d MB |
330 | } |
331 | ||
45c64ecf TU |
332 | bool intel_context_exit_nonpersistent(struct intel_context *ce, |
333 | struct i915_request *rq); | |
334 | ||
9f3ccd40 CW |
335 | static inline bool |
336 | intel_context_force_single_submission(const struct intel_context *ce) | |
337 | { | |
338 | return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags); | |
339 | } | |
340 | ||
341 | static inline void | |
342 | intel_context_set_single_submission(struct intel_context *ce) | |
343 | { | |
344 | __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags); | |
345 | } | |
346 | ||
347 | static inline bool | |
348 | intel_context_nopreempt(const struct intel_context *ce) | |
349 | { | |
350 | return test_bit(CONTEXT_NOPREEMPT, &ce->flags); | |
351 | } | |
352 | ||
353 | static inline void | |
354 | intel_context_set_nopreempt(struct intel_context *ce) | |
355 | { | |
356 | set_bit(CONTEXT_NOPREEMPT, &ce->flags); | |
357 | } | |
358 | ||
359 | static inline void | |
360 | intel_context_clear_nopreempt(struct intel_context *ce) | |
361 | { | |
362 | clear_bit(CONTEXT_NOPREEMPT, &ce->flags); | |
363 | } | |
364 | ||
bb6287cb TU |
365 | u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); |
366 | u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); | |
1883a0a4 | 367 | |
bb6287cb | 368 | static inline u64 intel_context_clock(void) |
1883a0a4 | 369 | { |
bb6287cb TU |
370 | /* As we mix CS cycles with CPU clocks, use the raw monotonic clock. */ |
371 | return ktime_get_raw_fast_ns(); | |
1883a0a4 TU |
372 | } |
373 | ||
39e2f501 | 374 | #endif /* __INTEL_CONTEXT_H__ */ |