Commit | Line | Data |
---|---|---|
24f90d66 | 1 | /* SPDX-License-Identifier: MIT */ |
39e2f501 | 2 | /* |
39e2f501 CW |
3 | * Copyright © 2019 Intel Corporation |
4 | */ | |
5 | ||
6 | #ifndef __INTEL_CONTEXT_H__ | |
7 | #define __INTEL_CONTEXT_H__ | |
8 | ||
9f3ccd40 | 9 | #include <linux/bitops.h> |
08819549 | 10 | #include <linux/lockdep.h> |
9f3ccd40 | 11 | #include <linux/types.h> |
08819549 | 12 | |
12c255b5 | 13 | #include "i915_active.h" |
1883a0a4 | 14 | #include "i915_drv.h" |
39e2f501 CW |
15 | #include "intel_context_types.h" |
16 | #include "intel_engine_types.h" | |
2871ea85 | 17 | #include "intel_ring_types.h" |
e5dadff4 | 18 | #include "intel_timeline_types.h" |
ae8ac10d | 19 | #include "i915_trace.h" |
39e2f501 | 20 | |
639f2f24 VSD |
21 | #define CE_TRACE(ce, fmt, ...) do { \ |
22 | const struct intel_context *ce__ = (ce); \ | |
41d329e2 | 23 | ENGINE_TRACE(ce__->engine, "context:%llx " fmt, \ |
639f2f24 VSD |
24 | ce__->timeline->fence_context, \ |
25 | ##__VA_ARGS__); \ | |
26 | } while (0) | |
27 | ||
47b08693 ML |
28 | struct i915_gem_ww_ctx; |
29 | ||
39e2f501 | 30 | void intel_context_init(struct intel_context *ce, |
39e2f501 | 31 | struct intel_engine_cs *engine); |
df8cf31e | 32 | void intel_context_fini(struct intel_context *ce); |
39e2f501 | 33 | |
2dcec7d3 DV |
34 | void i915_context_module_exit(void); |
35 | int i915_context_module_init(void); | |
36 | ||
c4d52feb | 37 | struct intel_context * |
e6ba7648 | 38 | intel_context_create(struct intel_engine_cs *engine); |
c4d52feb | 39 | |
89f98d63 CW |
40 | int intel_context_alloc_state(struct intel_context *ce); |
41 | ||
5e2a0419 CW |
42 | void intel_context_free(struct intel_context *ce); |
43 | ||
b4d3acaa CW |
44 | int intel_context_reconfigure_sseu(struct intel_context *ce, |
45 | const struct intel_sseu sseu); | |
46 | ||
c2aa552f MB |
47 | #define PARENT_SCRATCH_SIZE PAGE_SIZE |
48 | ||
3897df4c MB |
49 | static inline bool intel_context_is_child(struct intel_context *ce) |
50 | { | |
51 | return !!ce->parallel.parent; | |
52 | } | |
53 | ||
54 | static inline bool intel_context_is_parent(struct intel_context *ce) | |
55 | { | |
56 | return !!ce->parallel.number_children; | |
57 | } | |
58 | ||
59 | static inline bool intel_context_is_pinned(struct intel_context *ce); | |
60 | ||
61 | static inline struct intel_context * | |
62 | intel_context_to_parent(struct intel_context *ce) | |
63 | { | |
64 | if (intel_context_is_child(ce)) { | |
65 | /* | |
66 | * The parent holds ref count to the child so it is always safe | |
67 | * for the parent to access the child, but the child has a | |
68 | * pointer to the parent without a ref. To ensure this is safe | |
69 | * the child should only access the parent pointer while the | |
70 | * parent is pinned. | |
71 | */ | |
72 | GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent)); | |
73 | ||
74 | return ce->parallel.parent; | |
75 | } else { | |
76 | return ce; | |
77 | } | |
78 | } | |
79 | ||
bc955204 MB |
80 | static inline bool intel_context_is_parallel(struct intel_context *ce) |
81 | { | |
82 | return intel_context_is_child(ce) || intel_context_is_parent(ce); | |
83 | } | |
84 | ||
3897df4c MB |
85 | void intel_context_bind_parent_child(struct intel_context *parent, |
86 | struct intel_context *child); | |
87 | ||
88 | #define for_each_child(parent, ce)\ | |
89 | list_for_each_entry(ce, &(parent)->parallel.child_list,\ | |
90 | parallel.child_link) | |
91 | #define for_each_child_safe(parent, ce, cn)\ | |
92 | list_for_each_entry_safe(ce, cn, &(parent)->parallel.child_list,\ | |
93 | parallel.child_link) | |
94 | ||
c4d52feb | 95 | /** |
6b736de5 CW |
96 | * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context |
97 | * @ce - the context | |
c4d52feb | 98 | * |
08819549 CW |
99 | * Acquire a lock on the pinned status of the HW context, such that the context |
100 | * can neither be bound to the GPU or unbound whilst the lock is held, i.e. | |
101 | * intel_context_is_pinned() remains stable. | |
c4d52feb | 102 | */ |
6b736de5 CW |
103 | static inline int intel_context_lock_pinned(struct intel_context *ce) |
104 | __acquires(ce->pin_mutex) | |
105 | { | |
106 | return mutex_lock_interruptible(&ce->pin_mutex); | |
107 | } | |
c4d52feb | 108 | |
6b736de5 CW |
109 | /** |
110 | * intel_context_is_pinned - Reports the 'pinned' status | |
111 | * @ce - the context | |
112 | * | |
113 | * While in use by the GPU, the context, along with its ring and page | |
114 | * tables is pinned into memory and the GTT. | |
115 | * | |
116 | * Returns: true if the context is currently pinned for use by the GPU. | |
117 | */ | |
08819549 CW |
118 | static inline bool |
119 | intel_context_is_pinned(struct intel_context *ce) | |
120 | { | |
121 | return atomic_read(&ce->pin_count); | |
122 | } | |
123 | ||
62eaf0ae MB |
124 | static inline void intel_context_cancel_request(struct intel_context *ce, |
125 | struct i915_request *rq) | |
126 | { | |
127 | GEM_BUG_ON(!ce->ops->cancel_request); | |
128 | return ce->ops->cancel_request(ce, rq); | |
129 | } | |
130 | ||
6b736de5 CW |
131 | /** |
132 | * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status | |
133 | * @ce - the context | |
134 | * | |
135 | * Releases the lock earlier acquired by intel_context_unlock_pinned(). | |
136 | */ | |
137 | static inline void intel_context_unlock_pinned(struct intel_context *ce) | |
138 | __releases(ce->pin_mutex) | |
139 | { | |
140 | mutex_unlock(&ce->pin_mutex); | |
141 | } | |
08819549 | 142 | |
fa9f6681 | 143 | int __intel_context_do_pin(struct intel_context *ce); |
47b08693 ML |
144 | int __intel_context_do_pin_ww(struct intel_context *ce, |
145 | struct i915_gem_ww_ctx *ww); | |
fa9f6681 | 146 | |
feed5c7b CW |
147 | static inline bool intel_context_pin_if_active(struct intel_context *ce) |
148 | { | |
149 | return atomic_inc_not_zero(&ce->pin_count); | |
150 | } | |
151 | ||
fa9f6681 CW |
152 | static inline int intel_context_pin(struct intel_context *ce) |
153 | { | |
feed5c7b | 154 | if (likely(intel_context_pin_if_active(ce))) |
fa9f6681 CW |
155 | return 0; |
156 | ||
157 | return __intel_context_do_pin(ce); | |
158 | } | |
39e2f501 | 159 | |
47b08693 ML |
160 | static inline int intel_context_pin_ww(struct intel_context *ce, |
161 | struct i915_gem_ww_ctx *ww) | |
162 | { | |
163 | if (likely(intel_context_pin_if_active(ce))) | |
164 | return 0; | |
165 | ||
166 | return __intel_context_do_pin_ww(ce, ww); | |
167 | } | |
168 | ||
39e2f501 CW |
169 | static inline void __intel_context_pin(struct intel_context *ce) |
170 | { | |
08819549 CW |
171 | GEM_BUG_ON(!intel_context_is_pinned(ce)); |
172 | atomic_inc(&ce->pin_count); | |
39e2f501 CW |
173 | } |
174 | ||
e0717063 MB |
175 | void __intel_context_do_unpin(struct intel_context *ce, int sub); |
176 | ||
177 | static inline void intel_context_sched_disable_unpin(struct intel_context *ce) | |
178 | { | |
179 | __intel_context_do_unpin(ce, 2); | |
180 | } | |
181 | ||
182 | static inline void intel_context_unpin(struct intel_context *ce) | |
183 | { | |
184 | if (!ce->ops->sched_disable) { | |
185 | __intel_context_do_unpin(ce, 1); | |
186 | } else { | |
187 | /* | |
188 | * Move ownership of this pin to the scheduling disable which is | |
189 | * an async operation. When that operation completes the above | |
190 | * intel_context_sched_disable_unpin is called potentially | |
191 | * unpinning the context. | |
192 | */ | |
193 | while (!atomic_add_unless(&ce->pin_count, -1, 1)) { | |
194 | if (atomic_cmpxchg(&ce->pin_count, 1, 2) == 1) { | |
195 | ce->ops->sched_disable(ce); | |
196 | break; | |
197 | } | |
198 | } | |
199 | } | |
200 | } | |
39e2f501 | 201 | |
6eee33e8 CW |
202 | void intel_context_enter_engine(struct intel_context *ce); |
203 | void intel_context_exit_engine(struct intel_context *ce); | |
204 | ||
205 | static inline void intel_context_enter(struct intel_context *ce) | |
206 | { | |
6c69a454 | 207 | lockdep_assert_held(&ce->timeline->mutex); |
6eee33e8 CW |
208 | if (!ce->active_count++) |
209 | ce->ops->enter(ce); | |
210 | } | |
211 | ||
212 | static inline void intel_context_mark_active(struct intel_context *ce) | |
213 | { | |
bce45c26 SAS |
214 | lockdep_assert(lockdep_is_held(&ce->timeline->mutex) || |
215 | test_bit(CONTEXT_IS_PARKING, &ce->flags)); | |
6eee33e8 CW |
216 | ++ce->active_count; |
217 | } | |
218 | ||
219 | static inline void intel_context_exit(struct intel_context *ce) | |
220 | { | |
6c69a454 | 221 | lockdep_assert_held(&ce->timeline->mutex); |
6eee33e8 CW |
222 | GEM_BUG_ON(!ce->active_count); |
223 | if (!--ce->active_count) | |
224 | ce->ops->exit(ce); | |
225 | } | |
226 | ||
4c5896dc CW |
227 | static inline struct intel_context *intel_context_get(struct intel_context *ce) |
228 | { | |
229 | kref_get(&ce->ref); | |
230 | return ce; | |
231 | } | |
232 | ||
233 | static inline void intel_context_put(struct intel_context *ce) | |
234 | { | |
235 | kref_put(&ce->ref, ce->ops->destroy); | |
236 | } | |
237 | ||
e5dadff4 | 238 | static inline struct intel_timeline *__must_check |
f4d57d83 | 239 | intel_context_timeline_lock(struct intel_context *ce) |
75d0a7f3 | 240 | __acquires(&ce->timeline->mutex) |
2ccdf6a1 | 241 | { |
e5dadff4 CW |
242 | struct intel_timeline *tl = ce->timeline; |
243 | int err; | |
244 | ||
544460c3 MB |
245 | if (intel_context_is_parent(ce)) |
246 | err = mutex_lock_interruptible_nested(&tl->mutex, 0); | |
247 | else if (intel_context_is_child(ce)) | |
248 | err = mutex_lock_interruptible_nested(&tl->mutex, | |
249 | ce->parallel.child_index + 1); | |
250 | else | |
251 | err = mutex_lock_interruptible(&tl->mutex); | |
e5dadff4 CW |
252 | if (err) |
253 | return ERR_PTR(err); | |
254 | ||
255 | return tl; | |
2ccdf6a1 CW |
256 | } |
257 | ||
e5dadff4 CW |
258 | static inline void intel_context_timeline_unlock(struct intel_timeline *tl) |
259 | __releases(&tl->mutex) | |
2ccdf6a1 | 260 | { |
e5dadff4 | 261 | mutex_unlock(&tl->mutex); |
2ccdf6a1 CW |
262 | } |
263 | ||
a9877da2 CW |
264 | int intel_context_prepare_remote_request(struct intel_context *ce, |
265 | struct i915_request *rq); | |
266 | ||
5e2a0419 CW |
267 | struct i915_request *intel_context_create_request(struct intel_context *ce); |
268 | ||
573ba126 MB |
269 | struct i915_request * |
270 | intel_context_find_active_request(struct intel_context *ce); | |
271 | ||
e6ba7648 CW |
272 | static inline bool intel_context_is_barrier(const struct intel_context *ce) |
273 | { | |
274 | return test_bit(CONTEXT_BARRIER_BIT, &ce->flags); | |
275 | } | |
276 | ||
2e46a2a0 CW |
277 | static inline bool intel_context_is_closed(const struct intel_context *ce) |
278 | { | |
279 | return test_bit(CONTEXT_CLOSED_BIT, &ce->flags); | |
280 | } | |
281 | ||
cc1557ca CW |
282 | static inline bool intel_context_has_inflight(const struct intel_context *ce) |
283 | { | |
284 | return test_bit(COPS_HAS_INFLIGHT_BIT, &ce->ops->flags); | |
285 | } | |
286 | ||
0f100b70 CW |
287 | static inline bool intel_context_use_semaphores(const struct intel_context *ce) |
288 | { | |
289 | return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); | |
290 | } | |
291 | ||
292 | static inline void intel_context_set_use_semaphores(struct intel_context *ce) | |
293 | { | |
294 | set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); | |
295 | } | |
296 | ||
297 | static inline void intel_context_clear_use_semaphores(struct intel_context *ce) | |
298 | { | |
299 | clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); | |
300 | } | |
301 | ||
9f3ccd40 CW |
302 | static inline bool intel_context_is_banned(const struct intel_context *ce) |
303 | { | |
304 | return test_bit(CONTEXT_BANNED, &ce->flags); | |
305 | } | |
306 | ||
307 | static inline bool intel_context_set_banned(struct intel_context *ce) | |
308 | { | |
309 | return test_and_set_bit(CONTEXT_BANNED, &ce->flags); | |
310 | } | |
311 | ||
ae8ac10d MB |
312 | static inline bool intel_context_ban(struct intel_context *ce, |
313 | struct i915_request *rq) | |
314 | { | |
315 | bool ret = intel_context_set_banned(ce); | |
316 | ||
317 | trace_intel_context_ban(ce); | |
318 | if (ce->ops->ban) | |
319 | ce->ops->ban(ce, rq); | |
320 | ||
321 | return ret; | |
322 | } | |
323 | ||
9f3ccd40 CW |
324 | static inline bool |
325 | intel_context_force_single_submission(const struct intel_context *ce) | |
326 | { | |
327 | return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags); | |
328 | } | |
329 | ||
330 | static inline void | |
331 | intel_context_set_single_submission(struct intel_context *ce) | |
332 | { | |
333 | __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags); | |
334 | } | |
335 | ||
336 | static inline bool | |
337 | intel_context_nopreempt(const struct intel_context *ce) | |
338 | { | |
339 | return test_bit(CONTEXT_NOPREEMPT, &ce->flags); | |
340 | } | |
341 | ||
342 | static inline void | |
343 | intel_context_set_nopreempt(struct intel_context *ce) | |
344 | { | |
345 | set_bit(CONTEXT_NOPREEMPT, &ce->flags); | |
346 | } | |
347 | ||
348 | static inline void | |
349 | intel_context_clear_nopreempt(struct intel_context *ce) | |
350 | { | |
351 | clear_bit(CONTEXT_NOPREEMPT, &ce->flags); | |
352 | } | |
353 | ||
1883a0a4 TU |
354 | static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce) |
355 | { | |
f170523a | 356 | const u32 period = ce->engine->gt->clock_period_ns; |
1883a0a4 TU |
357 | |
358 | return READ_ONCE(ce->runtime.total) * period; | |
359 | } | |
360 | ||
361 | static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce) | |
362 | { | |
f170523a | 363 | const u32 period = ce->engine->gt->clock_period_ns; |
1883a0a4 TU |
364 | |
365 | return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period); | |
366 | } | |
367 | ||
39e2f501 | 368 | #endif /* __INTEL_CONTEXT_H__ */ |