Merge tag 'sparc-for-6.10-tag1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / include / linux / sched.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_SCHED_H
3#define _LINUX_SCHED_H
4
5eca1c10
IM
5/*
6 * Define 'struct task_struct' and provide the main scheduler
7 * APIs (schedule(), wakeup variants, etc.)
8 */
b7b3c76a 9
5eca1c10 10#include <uapi/linux/sched.h>
5c228079 11
5eca1c10 12#include <asm/current.h>
1e2f2d31
KO
13#include <asm/processor.h>
14#include <linux/thread_info.h>
15#include <linux/preempt.h>
16#include <linux/cpumask.h>
1da177e4 17
e034d49e 18#include <linux/cache.h>
9983deb2 19#include <linux/irqflags_types.h>
1e2f2d31 20#include <linux/smp_types.h>
6d5e9d63 21#include <linux/pid_types.h>
e034d49e 22#include <linux/sem_types.h>
ab602f79 23#include <linux/shm.h>
f80be457 24#include <linux/kmsan_types.h>
d84f3179 25#include <linux/mutex_types.h>
8b7787a5 26#include <linux/plist_types.h>
50d91c76 27#include <linux/hrtimer_types.h>
e034d49e 28#include <linux/timer_types.h>
a6e1420c 29#include <linux/seccomp_types.h>
bea32141 30#include <linux/nodemask_types.h>
f9d6966b 31#include <linux/refcount_types.h>
a3b6714e 32#include <linux/resource.h>
9745512c 33#include <linux/latencytop.h>
5eca1c10 34#include <linux/sched/prio.h>
9eacb5c7 35#include <linux/sched/types.h>
5eca1c10 36#include <linux/signal_types.h>
55b899aa 37#include <linux/syscall_user_dispatch_types.h>
5eca1c10
IM
38#include <linux/mm_types_task.h>
39#include <linux/task_io_accounting.h>
53d31ba8 40#include <linux/posix-timers_types.h>
cba6167f 41#include <linux/restart_block.h>
932562a6 42#include <uapi/linux/rseq.h>
f038cc13 43#include <linux/seqlock_types.h>
dfd402a4 44#include <linux/kcsan.h>
102227b9 45#include <linux/rv.h>
e3ff7c60 46#include <linux/livepatch_sched.h>
af6da56a 47#include <linux/uidgid_types.h>
5fbda3ec 48#include <asm/kmap_size.h>
a3b6714e 49
5eca1c10 50/* task_struct member predeclarations (sorted alphabetically): */
c7af7877 51struct audit_context;
bddd87c7 52struct bio_list;
73c10101 53struct blk_plug;
a10787e6 54struct bpf_local_storage;
c7603cfa 55struct bpf_run_ctx;
3c93a0c0 56struct capture_control;
c7af7877 57struct cfs_rq;
c7af7877
IM
58struct fs_struct;
59struct futex_pi_state;
60struct io_context;
1875dc5b 61struct io_uring_task;
c7af7877 62struct mempolicy;
89076bc3 63struct nameidata;
c7af7877
IM
64struct nsproxy;
65struct perf_event_context;
66struct pid_namespace;
67struct pipe_inode_info;
68struct rcu_node;
69struct reclaim_state;
70struct robust_list_head;
3c93a0c0
QY
71struct root_domain;
72struct rq;
c7af7877 73struct sched_attr;
63ba8422 74struct sched_dl_entity;
43ae34cb 75struct seq_file;
c7af7877
IM
76struct sighand_struct;
77struct signal_struct;
78struct task_delay_info;
4cf86d77 79struct task_group;
63ba8422 80struct task_struct;
fd593511 81struct user_event_mm;
1da177e4 82
4a8342d2
LT
83/*
84 * Task state bitmask. NOTE! These bits are also
85 * encoded in fs/proc/array.c: get_task_state().
86 *
48b55837 87 * We have two separate sets of flags: task->__state
4a8342d2
LT
88 * is about runnability, while task->exit_state are
89 * about the task exiting. Confusing, but this way
90 * modifying one set can't modify the other one by
91 * mistake.
92 */
5eca1c10 93
48b55837 94/* Used in tsk->__state: */
9963e444
PZ
95#define TASK_RUNNING 0x00000000
96#define TASK_INTERRUPTIBLE 0x00000001
97#define TASK_UNINTERRUPTIBLE 0x00000002
98#define __TASK_STOPPED 0x00000004
99#define __TASK_TRACED 0x00000008
5eca1c10 100/* Used in tsk->exit_state: */
9963e444
PZ
101#define EXIT_DEAD 0x00000010
102#define EXIT_ZOMBIE 0x00000020
5eca1c10 103#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
48b55837 104/* Used in tsk->__state again: */
9963e444
PZ
105#define TASK_PARKED 0x00000040
106#define TASK_DEAD 0x00000080
107#define TASK_WAKEKILL 0x00000100
108#define TASK_WAKING 0x00000200
109#define TASK_NOLOAD 0x00000400
110#define TASK_NEW 0x00000800
9963e444 111#define TASK_RTLOCK_WAIT 0x00001000
f5d39b02
PZ
112#define TASK_FREEZABLE 0x00002000
113#define __TASK_FREEZABLE_UNSAFE (0x00004000 * IS_ENABLED(CONFIG_LOCKDEP))
114#define TASK_FROZEN 0x00008000
115#define TASK_STATE_MAX 0x00010000
5eca1c10 116
f9fc8cad
PZ
117#define TASK_ANY (TASK_STATE_MAX-1)
118
f5d39b02
PZ
119/*
120 * DO NOT ADD ANY NEW USERS !
121 */
122#define TASK_FREEZABLE_UNSAFE (TASK_FREEZABLE | __TASK_FREEZABLE_UNSAFE)
5eca1c10 123
5eca1c10
IM
124/* Convenience macros for the sake of set_current_state: */
125#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
126#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
2500ad1c 127#define TASK_TRACED __TASK_TRACED
5eca1c10
IM
128
129#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
130
131/* Convenience macros for the sake of wake_up(): */
132#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
5eca1c10
IM
133
134/* get_task_state(): */
135#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
136 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
8ef9925b
PZ
137 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
138 TASK_PARKED)
5eca1c10 139
2f064a59 140#define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
5eca1c10 141
31cae1ea
PZ
142#define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
143#define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
144#define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
5eca1c10 145
b5bf9a90
PZ
146/*
147 * Special states are those that do not use the normal wait-loop pattern. See
148 * the comment with set_special_state().
149 */
150#define is_special_task_state(state) \
1cef1150 151 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
b5bf9a90 152
85019c16
TG
153#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
154# define debug_normal_state_change(state_value) \
155 do { \
156 WARN_ON_ONCE(is_special_task_state(state_value)); \
157 current->task_state_change = _THIS_IP_; \
8eb23b9f
PZ
158 } while (0)
159
85019c16 160# define debug_special_state_change(state_value) \
b5bf9a90 161 do { \
b5bf9a90 162 WARN_ON_ONCE(!is_special_task_state(state_value)); \
b5bf9a90 163 current->task_state_change = _THIS_IP_; \
b5bf9a90 164 } while (0)
85019c16 165
5f220be2
TG
166# define debug_rtlock_wait_set_state() \
167 do { \
168 current->saved_state_change = current->task_state_change;\
169 current->task_state_change = _THIS_IP_; \
170 } while (0)
171
172# define debug_rtlock_wait_restore_state() \
173 do { \
174 current->task_state_change = current->saved_state_change;\
175 } while (0)
176
8eb23b9f 177#else
85019c16
TG
178# define debug_normal_state_change(cond) do { } while (0)
179# define debug_special_state_change(cond) do { } while (0)
5f220be2
TG
180# define debug_rtlock_wait_set_state() do { } while (0)
181# define debug_rtlock_wait_restore_state() do { } while (0)
85019c16
TG
182#endif
183
498d0c57 184/*
48b55837 185 * set_current_state() includes a barrier so that the write of current->__state
498d0c57
AM
186 * is correctly serialised wrt the caller's subsequent test of whether to
187 * actually sleep:
188 *
a2250238 189 * for (;;) {
498d0c57 190 * set_current_state(TASK_UNINTERRUPTIBLE);
58877d34
PZ
191 * if (CONDITION)
192 * break;
a2250238
PZ
193 *
194 * schedule();
195 * }
196 * __set_current_state(TASK_RUNNING);
197 *
198 * If the caller does not need such serialisation (because, for instance, the
58877d34 199 * CONDITION test and condition change and wakeup are under the same lock) then
a2250238
PZ
200 * use __set_current_state().
201 *
202 * The above is typically ordered against the wakeup, which does:
203 *
58877d34 204 * CONDITION = 1;
b5bf9a90 205 * wake_up_state(p, TASK_UNINTERRUPTIBLE);
a2250238 206 *
58877d34 207 * where wake_up_state()/try_to_wake_up() executes a full memory barrier before
48b55837 208 * accessing p->__state.
a2250238 209 *
48b55837 210 * Wakeup will do: if (@state & p->__state) p->__state = TASK_RUNNING, that is,
a2250238
PZ
211 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
212 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
498d0c57 213 *
b5bf9a90 214 * However, with slightly different timing the wakeup TASK_RUNNING store can
dfcb245e 215 * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
b5bf9a90
PZ
216 * a problem either because that will result in one extra go around the loop
217 * and our @cond test will save the day.
498d0c57 218 *
a2250238 219 * Also see the comments of try_to_wake_up().
498d0c57 220 */
b5bf9a90 221#define __set_current_state(state_value) \
85019c16
TG
222 do { \
223 debug_normal_state_change((state_value)); \
224 WRITE_ONCE(current->__state, (state_value)); \
225 } while (0)
b5bf9a90
PZ
226
227#define set_current_state(state_value) \
85019c16
TG
228 do { \
229 debug_normal_state_change((state_value)); \
230 smp_store_mb(current->__state, (state_value)); \
231 } while (0)
b5bf9a90
PZ
232
233/*
234 * set_special_state() should be used for those states when the blocking task
235 * can not use the regular condition based wait-loop. In that case we must
85019c16
TG
236 * serialize against wakeups such that any possible in-flight TASK_RUNNING
237 * stores will not collide with our state change.
b5bf9a90
PZ
238 */
239#define set_special_state(state_value) \
240 do { \
241 unsigned long flags; /* may shadow */ \
85019c16 242 \
b5bf9a90 243 raw_spin_lock_irqsave(&current->pi_lock, flags); \
85019c16 244 debug_special_state_change((state_value)); \
2f064a59 245 WRITE_ONCE(current->__state, (state_value)); \
b5bf9a90
PZ
246 raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
247 } while (0)
248
5f220be2
TG
249/*
250 * PREEMPT_RT specific variants for "sleeping" spin/rwlocks
251 *
252 * RT's spin/rwlock substitutions are state preserving. The state of the
253 * task when blocking on the lock is saved in task_struct::saved_state and
254 * restored after the lock has been acquired. These operations are
255 * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT
256 * lock related wakeups while the task is blocked on the lock are
257 * redirected to operate on task_struct::saved_state to ensure that these
258 * are not dropped. On restore task_struct::saved_state is set to
259 * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail.
260 *
261 * The lock operation looks like this:
262 *
263 * current_save_and_set_rtlock_wait_state();
264 * for (;;) {
265 * if (try_lock())
266 * break;
267 * raw_spin_unlock_irq(&lock->wait_lock);
268 * schedule_rtlock();
269 * raw_spin_lock_irq(&lock->wait_lock);
270 * set_current_state(TASK_RTLOCK_WAIT);
271 * }
272 * current_restore_rtlock_saved_state();
273 */
274#define current_save_and_set_rtlock_wait_state() \
275 do { \
276 lockdep_assert_irqs_disabled(); \
277 raw_spin_lock(&current->pi_lock); \
278 current->saved_state = current->__state; \
279 debug_rtlock_wait_set_state(); \
280 WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
281 raw_spin_unlock(&current->pi_lock); \
282 } while (0);
283
284#define current_restore_rtlock_saved_state() \
285 do { \
286 lockdep_assert_irqs_disabled(); \
287 raw_spin_lock(&current->pi_lock); \
288 debug_rtlock_wait_restore_state(); \
289 WRITE_ONCE(current->__state, current->saved_state); \
290 current->saved_state = TASK_RUNNING; \
291 raw_spin_unlock(&current->pi_lock); \
292 } while (0);
8eb23b9f 293
2f064a59 294#define get_current_state() READ_ONCE(current->__state)
d6c23bb3 295
3087c61e
YS
296/*
297 * Define the task command name length as enum, then it can be visible to
298 * BPF programs.
299 */
300enum {
301 TASK_COMM_LEN = 16,
302};
1da177e4 303
86dd6c04 304extern void sched_tick(void);
1da177e4 305
5eca1c10
IM
306#define MAX_SCHEDULE_TIMEOUT LONG_MAX
307
308extern long schedule_timeout(long timeout);
309extern long schedule_timeout_interruptible(long timeout);
310extern long schedule_timeout_killable(long timeout);
311extern long schedule_timeout_uninterruptible(long timeout);
312extern long schedule_timeout_idle(long timeout);
1da177e4 313asmlinkage void schedule(void);
c5491ea7 314extern void schedule_preempt_disabled(void);
19c95f26 315asmlinkage void preempt_schedule_irq(void);
6991436c
TG
316#ifdef CONFIG_PREEMPT_RT
317 extern void schedule_rtlock(void);
318#endif
1da177e4 319
10ab5643
TH
320extern int __must_check io_schedule_prepare(void);
321extern void io_schedule_finish(int token);
9cff8ade 322extern long io_schedule_timeout(long timeout);
10ab5643 323extern void io_schedule(void);
9cff8ade 324
d37f761d 325/**
0ba42a59 326 * struct prev_cputime - snapshot of system and user cputime
d37f761d
FW
327 * @utime: time spent in user mode
328 * @stime: time spent in system mode
9d7fb042 329 * @lock: protects the above two fields
d37f761d 330 *
9d7fb042
PZ
331 * Stores previous user/system time values such that we can guarantee
332 * monotonicity.
d37f761d 333 */
9d7fb042
PZ
334struct prev_cputime {
335#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
5eca1c10
IM
336 u64 utime;
337 u64 stime;
338 raw_spinlock_t lock;
9d7fb042 339#endif
d37f761d
FW
340};
341
bac5b6b6
FW
342enum vtime_state {
343 /* Task is sleeping or running in a CPU with VTIME inactive: */
344 VTIME_INACTIVE = 0,
14faf6fc
FW
345 /* Task is idle */
346 VTIME_IDLE,
bac5b6b6
FW
347 /* Task runs in kernelspace in a CPU with VTIME active: */
348 VTIME_SYS,
14faf6fc
FW
349 /* Task runs in userspace in a CPU with VTIME active: */
350 VTIME_USER,
e6d5bf3e
FW
351 /* Task runs as guests in a CPU with VTIME active: */
352 VTIME_GUEST,
bac5b6b6
FW
353};
354
355struct vtime {
356 seqcount_t seqcount;
357 unsigned long long starttime;
358 enum vtime_state state;
802f4a82 359 unsigned int cpu;
2a42eb95
WL
360 u64 utime;
361 u64 stime;
362 u64 gtime;
bac5b6b6
FW
363};
364
69842cba
PB
365/*
366 * Utilization clamp constraints.
367 * @UCLAMP_MIN: Minimum utilization
368 * @UCLAMP_MAX: Maximum utilization
369 * @UCLAMP_CNT: Utilization clamp constraints count
370 */
371enum uclamp_id {
372 UCLAMP_MIN = 0,
373 UCLAMP_MAX,
374 UCLAMP_CNT
375};
376
f9a25f77
MP
377#ifdef CONFIG_SMP
378extern struct root_domain def_root_domain;
379extern struct mutex sched_domains_mutex;
380#endif
381
d844fe65
KK
382struct sched_param {
383 int sched_priority;
384};
385
1da177e4 386struct sched_info {
7f5f8e8d 387#ifdef CONFIG_SCHED_INFO
5eca1c10
IM
388 /* Cumulative counters: */
389
390 /* # of times we have run on this CPU: */
391 unsigned long pcount;
392
393 /* Time spent waiting on a runqueue: */
394 unsigned long long run_delay;
395
396 /* Timestamps: */
397
398 /* When did we last run on a CPU? */
399 unsigned long long last_arrival;
400
401 /* When were we last queued to run? */
402 unsigned long long last_queued;
1da177e4 403
f6db8347 404#endif /* CONFIG_SCHED_INFO */
7f5f8e8d 405};
1da177e4 406
6ecdd749
YD
407/*
408 * Integer metrics need fixed point arithmetic, e.g., sched/fair
409 * has a few: load, load_avg, util_avg, freq, and capacity.
410 *
411 * We define a basic fixed point arithmetic range, and then formalize
412 * all these metrics based on that basic range.
413 */
5eca1c10
IM
414# define SCHED_FIXEDPOINT_SHIFT 10
415# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
6ecdd749 416
69842cba
PB
417/* Increase resolution of cpu_capacity calculations */
418# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
419# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
420
20b8a59f 421struct load_weight {
5eca1c10
IM
422 unsigned long weight;
423 u32 inv_weight;
20b8a59f
IM
424};
425
9d89c257 426/*
9f683953 427 * The load/runnable/util_avg accumulates an infinite geometric series
0dacee1b 428 * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
7b595334
YD
429 *
430 * [load_avg definition]
431 *
432 * load_avg = runnable% * scale_load_down(load)
433 *
9f683953
VG
434 * [runnable_avg definition]
435 *
436 * runnable_avg = runnable% * SCHED_CAPACITY_SCALE
7b595334 437 *
7b595334
YD
438 * [util_avg definition]
439 *
440 * util_avg = running% * SCHED_CAPACITY_SCALE
441 *
9f683953
VG
442 * where runnable% is the time ratio that a sched_entity is runnable and
443 * running% the time ratio that a sched_entity is running.
444 *
445 * For cfs_rq, they are the aggregated values of all runnable and blocked
446 * sched_entities.
7b595334 447 *
c1b7b8d4 448 * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
9f683953
VG
449 * capacity scaling. The scaling is done through the rq_clock_pelt that is used
450 * for computing those signals (see update_rq_clock_pelt())
7b595334 451 *
23127296
VG
452 * N.B., the above ratios (runnable% and running%) themselves are in the
453 * range of [0, 1]. To do fixed point arithmetics, we therefore scale them
454 * to as large a range as necessary. This is for example reflected by
455 * util_avg's SCHED_CAPACITY_SCALE.
7b595334
YD
456 *
457 * [Overflow issue]
458 *
459 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
460 * with the highest load (=88761), always runnable on a single cfs_rq,
461 * and should not overflow as the number already hits PID_MAX_LIMIT.
462 *
463 * For all other cases (including 32-bit kernels), struct load_weight's
464 * weight will overflow first before we do, because:
465 *
466 * Max(load_avg) <= Max(load.weight)
467 *
468 * Then it is the load_weight's responsibility to consider overflow
469 * issues.
9d89c257 470 */
9d85f21c 471struct sched_avg {
5eca1c10
IM
472 u64 last_update_time;
473 u64 load_sum;
9f683953 474 u64 runnable_sum;
5eca1c10
IM
475 u32 util_sum;
476 u32 period_contrib;
477 unsigned long load_avg;
9f683953 478 unsigned long runnable_avg;
5eca1c10 479 unsigned long util_avg;
11137d38 480 unsigned int util_est;
317d359d 481} ____cacheline_aligned;
9d85f21c 482
11137d38
VG
483/*
484 * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
485 * updates. When a task is dequeued, its util_est should not be updated if its
486 * util_avg has not been updated in the meantime.
487 * This information is mapped into the MSB bit of util_est at dequeue time.
488 * Since max value of util_est for a task is 1024 (PELT util_avg for a task)
489 * it is safe to use MSB.
490 */
491#define UTIL_EST_WEIGHT_SHIFT 2
492#define UTIL_AVG_UNCHANGED 0x80000000
493
41acab88 494struct sched_statistics {
7f5f8e8d 495#ifdef CONFIG_SCHEDSTATS
5eca1c10
IM
496 u64 wait_start;
497 u64 wait_max;
498 u64 wait_count;
499 u64 wait_sum;
500 u64 iowait_count;
501 u64 iowait_sum;
502
503 u64 sleep_start;
504 u64 sleep_max;
505 s64 sum_sleep_runtime;
506
507 u64 block_start;
508 u64 block_max;
847fc0cd
YS
509 s64 sum_block_runtime;
510
5d69eca5 511 s64 exec_max;
5eca1c10
IM
512 u64 slice_max;
513
514 u64 nr_migrations_cold;
515 u64 nr_failed_migrations_affine;
516 u64 nr_failed_migrations_running;
517 u64 nr_failed_migrations_hot;
518 u64 nr_forced_migrations;
519
520 u64 nr_wakeups;
521 u64 nr_wakeups_sync;
522 u64 nr_wakeups_migrate;
523 u64 nr_wakeups_local;
524 u64 nr_wakeups_remote;
525 u64 nr_wakeups_affine;
526 u64 nr_wakeups_affine_attempts;
527 u64 nr_wakeups_passive;
528 u64 nr_wakeups_idle;
4feee7d1
JD
529
530#ifdef CONFIG_SCHED_CORE
531 u64 core_forceidle_sum;
41acab88 532#endif
4feee7d1 533#endif /* CONFIG_SCHEDSTATS */
ceeadb83 534} ____cacheline_aligned;
41acab88
LDM
535
536struct sched_entity {
5eca1c10
IM
537 /* For load-balancing: */
538 struct load_weight load;
539 struct rb_node run_node;
147f3efa 540 u64 deadline;
2227a957 541 u64 min_vruntime;
147f3efa 542
5eca1c10
IM
543 struct list_head group_node;
544 unsigned int on_rq;
41acab88 545
5eca1c10
IM
546 u64 exec_start;
547 u64 sum_exec_runtime;
5eca1c10 548 u64 prev_sum_exec_runtime;
86bfbb7c
PZ
549 u64 vruntime;
550 s64 vlag;
147f3efa 551 u64 slice;
41acab88 552
5eca1c10 553 u64 nr_migrations;
41acab88 554
20b8a59f 555#ifdef CONFIG_FAIR_GROUP_SCHED
5eca1c10
IM
556 int depth;
557 struct sched_entity *parent;
20b8a59f 558 /* rq on which this entity is (to be) queued: */
5eca1c10 559 struct cfs_rq *cfs_rq;
20b8a59f 560 /* rq "owned" by this entity/group: */
5eca1c10 561 struct cfs_rq *my_q;
9f683953
VG
562 /* cached value of my_q->h_nr_running */
563 unsigned long runnable_weight;
20b8a59f 564#endif
8bd75c77 565
141965c7 566#ifdef CONFIG_SMP
5a107804
JO
567 /*
568 * Per entity load average tracking.
569 *
570 * Put into separate cache line so it does not
571 * collide with read-mostly values above.
572 */
317d359d 573 struct sched_avg avg;
9d85f21c 574#endif
20b8a59f 575};
70b97a7f 576
fa717060 577struct sched_rt_entity {
5eca1c10
IM
578 struct list_head run_list;
579 unsigned long timeout;
580 unsigned long watchdog_stamp;
581 unsigned int time_slice;
582 unsigned short on_rq;
583 unsigned short on_list;
584
585 struct sched_rt_entity *back;
052f1dc7 586#ifdef CONFIG_RT_GROUP_SCHED
5eca1c10 587 struct sched_rt_entity *parent;
6f505b16 588 /* rq on which this entity is (to be) queued: */
5eca1c10 589 struct rt_rq *rt_rq;
6f505b16 590 /* rq "owned" by this entity/group: */
5eca1c10 591 struct rt_rq *my_q;
6f505b16 592#endif
3859a271 593} __randomize_layout;
fa717060 594
63ba8422
PZ
595typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *);
596typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *);
597
aab03e05 598struct sched_dl_entity {
5eca1c10 599 struct rb_node rb_node;
aab03e05
DF
600
601 /*
602 * Original scheduling parameters. Copied here from sched_attr
4027d080 603 * during sched_setattr(), they will remain the same until
604 * the next sched_setattr().
aab03e05 605 */
5eca1c10
IM
606 u64 dl_runtime; /* Maximum runtime for each instance */
607 u64 dl_deadline; /* Relative deadline of each instance */
608 u64 dl_period; /* Separation of two instances (period) */
54d6d303 609 u64 dl_bw; /* dl_runtime / dl_period */
3effcb42 610 u64 dl_density; /* dl_runtime / dl_deadline */
aab03e05
DF
611
612 /*
613 * Actual scheduling parameters. Initialized with the values above,
dfcb245e 614 * they are continuously updated during task execution. Note that
aab03e05
DF
615 * the remaining runtime could be < 0 in case we are in overrun.
616 */
5eca1c10
IM
617 s64 runtime; /* Remaining runtime for this instance */
618 u64 deadline; /* Absolute deadline for this instance */
619 unsigned int flags; /* Specifying the scheduler behaviour */
aab03e05
DF
620
621 /*
622 * Some bool flags:
623 *
624 * @dl_throttled tells if we exhausted the runtime. If so, the
625 * task has to wait for a replenishment to be performed at the
626 * next firing of dl_timer.
627 *
5eca1c10 628 * @dl_yielded tells if task gave up the CPU before consuming
5bfd126e 629 * all its available runtime during the last job.
209a0cbd
LA
630 *
631 * @dl_non_contending tells if the task is inactive while still
632 * contributing to the active utilization. In other words, it
633 * indicates if the inactive timer has been armed and its handler
634 * has not been executed yet. This flag is useful to avoid race
635 * conditions between the inactive timer handler and the wakeup
636 * code.
34be3930
JL
637 *
638 * @dl_overrun tells if the task asked to be informed about runtime
639 * overruns.
aab03e05 640 */
aa5222e9 641 unsigned int dl_throttled : 1;
aa5222e9
DC
642 unsigned int dl_yielded : 1;
643 unsigned int dl_non_contending : 1;
34be3930 644 unsigned int dl_overrun : 1;
63ba8422 645 unsigned int dl_server : 1;
aab03e05
DF
646
647 /*
648 * Bandwidth enforcement timer. Each -deadline task has its
649 * own bandwidth to be enforced, thus we need one timer per task.
650 */
5eca1c10 651 struct hrtimer dl_timer;
209a0cbd
LA
652
653 /*
654 * Inactive timer, responsible for decreasing the active utilization
655 * at the "0-lag time". When a -deadline task blocks, it contributes
656 * to GRUB's active utilization until the "0-lag time", hence a
657 * timer is needed to decrease the active utilization at the correct
658 * time.
659 */
63ba8422
PZ
660 struct hrtimer inactive_timer;
661
662 /*
663 * Bits for DL-server functionality. Also see the comment near
664 * dl_server_update().
665 *
666 * @rq the runqueue this server is for
667 *
668 * @server_has_tasks() returns true if @server_pick return a
669 * runnable task.
670 */
671 struct rq *rq;
672 dl_server_has_tasks_f server_has_tasks;
673 dl_server_pick_f server_pick;
2279f540
JL
674
675#ifdef CONFIG_RT_MUTEXES
676 /*
677 * Priority Inheritance. When a DEADLINE scheduling entity is boosted
678 * pi_se points to the donor, otherwise points to the dl_se it belongs
679 * to (the original one/itself).
680 */
681 struct sched_dl_entity *pi_se;
682#endif
aab03e05 683};
8bd75c77 684
69842cba
PB
685#ifdef CONFIG_UCLAMP_TASK
686/* Number of utilization clamp buckets (shorter alias) */
687#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
688
689/*
690 * Utilization clamp for a scheduling entity
691 * @value: clamp value "assigned" to a se
692 * @bucket_id: bucket index corresponding to the "assigned" value
e8f14172 693 * @active: the se is currently refcounted in a rq's bucket
a509a7cd 694 * @user_defined: the requested clamp value comes from user-space
69842cba
PB
695 *
696 * The bucket_id is the index of the clamp bucket matching the clamp value
697 * which is pre-computed and stored to avoid expensive integer divisions from
698 * the fast path.
e8f14172
PB
699 *
700 * The active bit is set whenever a task has got an "effective" value assigned,
701 * which can be different from the clamp value "requested" from user-space.
702 * This allows to know a task is refcounted in the rq's bucket corresponding
703 * to the "effective" bucket_id.
a509a7cd
PB
704 *
705 * The user_defined bit is set whenever a task has got a task-specific clamp
706 * value requested from userspace, i.e. the system defaults apply to this task
707 * just as a restriction. This allows to relax default clamps when a less
708 * restrictive task-specific value has been requested, thus allowing to
709 * implement a "nice" semantic. For example, a task running with a 20%
710 * default boost can still drop its own boosting to 0%.
69842cba
PB
711 */
712struct uclamp_se {
713 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
714 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
e8f14172 715 unsigned int active : 1;
a509a7cd 716 unsigned int user_defined : 1;
69842cba
PB
717};
718#endif /* CONFIG_UCLAMP_TASK */
719
1d082fd0
PM
720union rcu_special {
721 struct {
5eca1c10
IM
722 u8 blocked;
723 u8 need_qs;
05f41571 724 u8 exp_hint; /* Hint for performance. */
276c4104 725 u8 need_mb; /* Readers need smp_mb(). */
8203d6d0 726 } b; /* Bits. */
05f41571 727 u32 s; /* Set of bits. */
1d082fd0 728};
86848966 729
8dc85d54
PZ
730enum perf_event_task_context {
731 perf_invalid_context = -1,
732 perf_hw_context = 0,
89a1e187 733 perf_sw_context,
8dc85d54
PZ
734 perf_nr_task_contexts,
735};
736
eb61baf6
IM
737struct wake_q_node {
738 struct wake_q_node *next;
739};
740
5fbda3ec
TG
741struct kmap_ctrl {
742#ifdef CONFIG_KMAP_LOCAL
743 int idx;
744 pte_t pteval[KM_MAX_IDX];
745#endif
746};
747
1da177e4 748struct task_struct {
c65eacbe
AL
749#ifdef CONFIG_THREAD_INFO_IN_TASK
750 /*
751 * For reasons of header soup (see current_thread_info()), this
752 * must be the first element of task_struct.
753 */
5eca1c10 754 struct thread_info thread_info;
c65eacbe 755#endif
2f064a59 756 unsigned int __state;
29e48ce8 757
5f220be2
TG
758 /* saved state for "spinlock sleepers" */
759 unsigned int saved_state;
5f220be2 760
29e48ce8
KC
761 /*
762 * This begins the randomizable portion of task_struct. Only
763 * scheduling-critical items should be added above here.
764 */
765 randomized_struct_fields_start
766
5eca1c10 767 void *stack;
ec1d2819 768 refcount_t usage;
5eca1c10
IM
769 /* Per task flags (PF_*), defined further below: */
770 unsigned int flags;
771 unsigned int ptrace;
1da177e4 772
22d407b1
SB
773#ifdef CONFIG_MEM_ALLOC_PROFILING
774 struct alloc_tag *alloc_tag;
775#endif
776
2dd73a4f 777#ifdef CONFIG_SMP
5eca1c10 778 int on_cpu;
8c4890d1 779 struct __call_single_node wake_entry;
5eca1c10
IM
780 unsigned int wakee_flips;
781 unsigned long wakee_flip_decay_ts;
782 struct task_struct *last_wakee;
ac66f547 783
32e839dd
MG
784 /*
785 * recent_used_cpu is initially set as the last CPU used by a task
786 * that wakes affine another task. Waker/wakee relationships can
787 * push tasks around a CPU where each wakeup moves to the next one.
788 * Tracking a recently used CPU allows a quick search for a recently
789 * used CPU that may be idle.
790 */
791 int recent_used_cpu;
5eca1c10 792 int wake_cpu;
2dd73a4f 793#endif
5eca1c10
IM
794 int on_rq;
795
796 int prio;
797 int static_prio;
798 int normal_prio;
799 unsigned int rt_priority;
50e645a8 800
5eca1c10
IM
801 struct sched_entity se;
802 struct sched_rt_entity rt;
8a311c74 803 struct sched_dl_entity dl;
63ba8422 804 struct sched_dl_entity *dl_server;
804bccba 805 const struct sched_class *sched_class;
8a311c74
PZ
806
807#ifdef CONFIG_SCHED_CORE
808 struct rb_node core_node;
809 unsigned long core_cookie;
d2dfa17b 810 unsigned int core_occupation;
8a311c74
PZ
811#endif
812
8323f26c 813#ifdef CONFIG_CGROUP_SCHED
5eca1c10 814 struct task_group *sched_task_group;
8323f26c 815#endif
1da177e4 816
22d407b1 817
69842cba 818#ifdef CONFIG_UCLAMP_TASK
13685c4a
QY
819 /*
820 * Clamp values requested for a scheduling entity.
821 * Must be updated with task_rq_lock() held.
822 */
e8f14172 823 struct uclamp_se uclamp_req[UCLAMP_CNT];
13685c4a
QY
824 /*
825 * Effective clamp values used for a scheduling entity.
826 * Must be updated with task_rq_lock() held.
827 */
69842cba
PB
828 struct uclamp_se uclamp[UCLAMP_CNT];
829#endif
830
ceeadb83
YS
831 struct sched_statistics stats;
832
e107be36 833#ifdef CONFIG_PREEMPT_NOTIFIERS
5eca1c10
IM
834 /* List of struct preempt_notifier: */
835 struct hlist_head preempt_notifiers;
e107be36
AK
836#endif
837
6c5c9341 838#ifdef CONFIG_BLK_DEV_IO_TRACE
5eca1c10 839 unsigned int btrace_seq;
6c5c9341 840#endif
1da177e4 841
5eca1c10 842 unsigned int policy;
22d56074 843 unsigned long max_allowed_capacity;
5eca1c10 844 int nr_cpus_allowed;
3bd37062 845 const cpumask_t *cpus_ptr;
b90ca8ba 846 cpumask_t *user_cpus_ptr;
3bd37062 847 cpumask_t cpus_mask;
6d337eab 848 void *migration_pending;
74d862b6 849#ifdef CONFIG_SMP
a7c81556 850 unsigned short migration_disabled;
af449901 851#endif
a7c81556 852 unsigned short migration_flags;
1da177e4 853
a57eb940 854#ifdef CONFIG_PREEMPT_RCU
5eca1c10
IM
855 int rcu_read_lock_nesting;
856 union rcu_special rcu_read_unlock_special;
857 struct list_head rcu_node_entry;
858 struct rcu_node *rcu_blocked_node;
28f6569a 859#endif /* #ifdef CONFIG_PREEMPT_RCU */
5eca1c10 860
8315f422 861#ifdef CONFIG_TASKS_RCU
5eca1c10 862 unsigned long rcu_tasks_nvcsw;
ccdd29ff
PM
863 u8 rcu_tasks_holdout;
864 u8 rcu_tasks_idx;
5eca1c10 865 int rcu_tasks_idle_cpu;
ccdd29ff 866 struct list_head rcu_tasks_holdout_list;
bfe93930
PM
867 int rcu_tasks_exit_cpu;
868 struct list_head rcu_tasks_exit_list;
8315f422 869#endif /* #ifdef CONFIG_TASKS_RCU */
e260be67 870
d5f177d3
PM
871#ifdef CONFIG_TASKS_TRACE_RCU
872 int trc_reader_nesting;
873 int trc_ipi_to_cpu;
276c4104 874 union rcu_special trc_reader_special;
d5f177d3 875 struct list_head trc_holdout_list;
434c9eef
PM
876 struct list_head trc_blkd_node;
877 int trc_blkd_cpu;
d5f177d3
PM
878#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
879
5eca1c10 880 struct sched_info sched_info;
1da177e4 881
5eca1c10 882 struct list_head tasks;
806c09a7 883#ifdef CONFIG_SMP
5eca1c10
IM
884 struct plist_node pushable_tasks;
885 struct rb_node pushable_dl_tasks;
806c09a7 886#endif
1da177e4 887
5eca1c10
IM
888 struct mm_struct *mm;
889 struct mm_struct *active_mm;
2b69987b 890 struct address_space *faults_disabled_mapping;
314ff785 891
5eca1c10
IM
892 int exit_state;
893 int exit_code;
894 int exit_signal;
895 /* The signal sent when the parent dies: */
896 int pdeath_signal;
897 /* JOBCTL_*, siglock protected: */
898 unsigned long jobctl;
899
900 /* Used for emulating ABI behavior of previous Linux versions: */
901 unsigned int personality;
902
903 /* Scheduler bits, serialized by scheduler locks: */
904 unsigned sched_reset_on_fork:1;
905 unsigned sched_contributes_to_load:1;
906 unsigned sched_migrated:1;
eb414681 907
5eca1c10
IM
908 /* Force alignment to the next boundary: */
909 unsigned :0;
910
911 /* Unserialized, strictly 'current' */
912
f97bb527
PZ
913 /*
914 * This field must not be in the scheduler word above due to wakelist
915 * queueing no longer being serialized by p->on_cpu. However:
916 *
917 * p->XXX = X; ttwu()
918 * schedule() if (p->on_rq && ..) // false
919 * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true
920 * deactivate_task() ttwu_queue_wakelist())
921 * p->on_rq = 0; p->sched_remote_wakeup = Y;
922 *
923 * guarantees all stores of 'current' are visible before
924 * ->sched_remote_wakeup gets used, so it can be in this word.
925 */
926 unsigned sched_remote_wakeup:1;
6b596e62
PZ
927#ifdef CONFIG_RT_MUTEXES
928 unsigned sched_rt_mutex:1;
929#endif
f97bb527 930
90383cc0 931 /* Bit to tell TOMOYO we're in execve(): */
5eca1c10
IM
932 unsigned in_execve:1;
933 unsigned in_iowait:1;
934#ifndef TIF_RESTORE_SIGMASK
935 unsigned restore_sigmask:1;
7e781418 936#endif
626ebc41 937#ifdef CONFIG_MEMCG
29ef680a 938 unsigned in_user_fault:1;
127424c8 939#endif
ec1c86b2
YZ
940#ifdef CONFIG_LRU_GEN
941 /* whether the LRU algorithm may apply to this access */
942 unsigned in_lru_fault:1;
943#endif
ff303e66 944#ifdef CONFIG_COMPAT_BRK
5eca1c10 945 unsigned brk_randomized:1;
ff303e66 946#endif
77f88796
TH
947#ifdef CONFIG_CGROUPS
948 /* disallow userland-initiated cgroup migration */
949 unsigned no_cgroup_migration:1;
76f969e8
RG
950 /* task is frozen/stopped (used by the cgroup freezer) */
951 unsigned frozen:1;
77f88796 952#endif
d09d8df3 953#ifdef CONFIG_BLK_CGROUP
d09d8df3
JB
954 unsigned use_memdelay:1;
955#endif
1066d1b6
YS
956#ifdef CONFIG_PSI
957 /* Stalled due to lack of memory */
958 unsigned in_memstall:1;
959#endif
8e9b16c4
ST
960#ifdef CONFIG_PAGE_OWNER
961 /* Used by page_owner=on to detect recursion in page tracking. */
962 unsigned in_page_owner:1;
963#endif
b542e383
TG
964#ifdef CONFIG_EVENTFD
965 /* Recursion prevention for eventfd_signal() */
9f0deaa1 966 unsigned in_eventfd:1;
b542e383 967#endif
8f23f5db 968#ifdef CONFIG_ARCH_HAS_CPU_PASID
a3d29e82
PZ
969 unsigned pasid_activated:1;
970#endif
b041b525
TL
971#ifdef CONFIG_CPU_SUP_INTEL
972 unsigned reported_split_lock:1;
973#endif
aa1cf99b
YY
974#ifdef CONFIG_TASK_DELAY_ACCT
975 /* delay due to memory thrashing */
976 unsigned in_thrashing:1;
977#endif
6f185c29 978
5eca1c10 979 unsigned long atomic_flags; /* Flags requiring atomic access. */
1d4457f9 980
5eca1c10 981 struct restart_block restart_block;
f56141e3 982
5eca1c10
IM
983 pid_t pid;
984 pid_t tgid;
0a425405 985
050e9baa 986#ifdef CONFIG_STACKPROTECTOR
5eca1c10
IM
987 /* Canary value for the -fstack-protector GCC feature: */
988 unsigned long stack_canary;
1314562a 989#endif
4d1d61a6 990 /*
5eca1c10 991 * Pointers to the (original) parent process, youngest child, younger sibling,
4d1d61a6 992 * older sibling, respectively. (p->father can be replaced with
f470021a 993 * p->real_parent->pid)
1da177e4 994 */
5eca1c10
IM
995
996 /* Real parent process: */
997 struct task_struct __rcu *real_parent;
998
999 /* Recipient of SIGCHLD, wait4() reports: */
1000 struct task_struct __rcu *parent;
1001
1da177e4 1002 /*
5eca1c10 1003 * Children/sibling form the list of natural children:
1da177e4 1004 */
5eca1c10
IM
1005 struct list_head children;
1006 struct list_head sibling;
1007 struct task_struct *group_leader;
1da177e4 1008
f470021a 1009 /*
5eca1c10
IM
1010 * 'ptraced' is the list of tasks this task is using ptrace() on.
1011 *
f470021a 1012 * This includes both natural children and PTRACE_ATTACH targets.
5eca1c10 1013 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
f470021a 1014 */
5eca1c10
IM
1015 struct list_head ptraced;
1016 struct list_head ptrace_entry;
f470021a 1017
1da177e4 1018 /* PID/PID hash table linkage. */
2c470475
EB
1019 struct pid *thread_pid;
1020 struct hlist_node pid_links[PIDTYPE_MAX];
5eca1c10
IM
1021 struct list_head thread_node;
1022
1023 struct completion *vfork_done;
1da177e4 1024
5eca1c10
IM
1025 /* CLONE_CHILD_SETTID: */
1026 int __user *set_child_tid;
1da177e4 1027
5eca1c10
IM
1028 /* CLONE_CHILD_CLEARTID: */
1029 int __user *clear_child_tid;
1030
e32cf5df
EB
1031 /* PF_KTHREAD | PF_IO_WORKER */
1032 void *worker_private;
3bfe6106 1033
5eca1c10
IM
1034 u64 utime;
1035 u64 stime;
40565b5a 1036#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
5eca1c10
IM
1037 u64 utimescaled;
1038 u64 stimescaled;
40565b5a 1039#endif
5eca1c10
IM
1040 u64 gtime;
1041 struct prev_cputime prev_cputime;
6a61671b 1042#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
bac5b6b6 1043 struct vtime vtime;
d99ca3b9 1044#endif
d027d45d
FW
1045
1046#ifdef CONFIG_NO_HZ_FULL
5eca1c10 1047 atomic_t tick_dep_mask;
d027d45d 1048#endif
5eca1c10
IM
1049 /* Context switch counts: */
1050 unsigned long nvcsw;
1051 unsigned long nivcsw;
1052
1053 /* Monotonic time in nsecs: */
1054 u64 start_time;
1055
1056 /* Boot based time in nsecs: */
cf25e24d 1057 u64 start_boottime;
5eca1c10
IM
1058
1059 /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
1060 unsigned long min_flt;
1061 unsigned long maj_flt;
1da177e4 1062
2b69942f
TG
1063 /* Empty if CONFIG_POSIX_CPUTIMERS=n */
1064 struct posix_cputimers posix_cputimers;
1da177e4 1065
1fb497dd
TG
1066#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
1067 struct posix_cputimers_work posix_cputimers_work;
1068#endif
1069
5eca1c10
IM
1070 /* Process credentials: */
1071
1072 /* Tracer's credentials at attach: */
1073 const struct cred __rcu *ptracer_cred;
1074
1075 /* Objective and real subjective task credentials (COW): */
1076 const struct cred __rcu *real_cred;
1077
1078 /* Effective (overridable) subjective task credentials (COW): */
1079 const struct cred __rcu *cred;
1080
7743c48e
DH
1081#ifdef CONFIG_KEYS
1082 /* Cached requested key. */
1083 struct key *cached_requested_key;
1084#endif
1085
5eca1c10
IM
1086 /*
1087 * executable name, excluding path.
1088 *
1089 * - normally initialized setup_new_exec()
1090 * - access it with [gs]et_task_comm()
1091 * - lock it with task_lock()
1092 */
1093 char comm[TASK_COMM_LEN];
1094
1095 struct nameidata *nameidata;
1096
3d5b6fcc 1097#ifdef CONFIG_SYSVIPC
5eca1c10
IM
1098 struct sysv_sem sysvsem;
1099 struct sysv_shm sysvshm;
3d5b6fcc 1100#endif
e162b39a 1101#ifdef CONFIG_DETECT_HUNG_TASK
5eca1c10 1102 unsigned long last_switch_count;
a2e51445 1103 unsigned long last_switch_time;
82a1fcb9 1104#endif
5eca1c10
IM
1105 /* Filesystem information: */
1106 struct fs_struct *fs;
1107
1108 /* Open file information: */
1109 struct files_struct *files;
1110
0f212204
JA
1111#ifdef CONFIG_IO_URING
1112 struct io_uring_task *io_uring;
1113#endif
1114
5eca1c10
IM
1115 /* Namespaces: */
1116 struct nsproxy *nsproxy;
1117
1118 /* Signal handlers: */
1119 struct signal_struct *signal;
913292c9 1120 struct sighand_struct __rcu *sighand;
5eca1c10
IM
1121 sigset_t blocked;
1122 sigset_t real_blocked;
1123 /* Restored if set_restore_sigmask() was used: */
1124 sigset_t saved_sigmask;
1125 struct sigpending pending;
1126 unsigned long sas_ss_sp;
1127 size_t sas_ss_size;
1128 unsigned int sas_ss_flags;
1129
1130 struct callback_head *task_works;
1131
4b7d248b 1132#ifdef CONFIG_AUDIT
bfef93a5 1133#ifdef CONFIG_AUDITSYSCALL
5f3d544f
RGB
1134 struct audit_context *audit_context;
1135#endif
5eca1c10
IM
1136 kuid_t loginuid;
1137 unsigned int sessionid;
bfef93a5 1138#endif
5eca1c10 1139 struct seccomp seccomp;
1446e1df 1140 struct syscall_user_dispatch syscall_dispatch;
5eca1c10
IM
1141
1142 /* Thread group tracking: */
d1e7fd64
EB
1143 u64 parent_exec_id;
1144 u64 self_exec_id;
1da177e4 1145
5eca1c10
IM
1146 /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
1147 spinlock_t alloc_lock;
1da177e4 1148
b29739f9 1149 /* Protection of the PI data structures: */
5eca1c10 1150 raw_spinlock_t pi_lock;
b29739f9 1151
5eca1c10 1152 struct wake_q_node wake_q;
76751049 1153
23f78d4a 1154#ifdef CONFIG_RT_MUTEXES
5eca1c10 1155 /* PI waiters blocked on a rt_mutex held by this task: */
a23ba907 1156 struct rb_root_cached pi_waiters;
e96a7705
XP
1157 /* Updated under owner's pi_lock and rq lock */
1158 struct task_struct *pi_top_task;
5eca1c10
IM
1159 /* Deadlock detection and priority inheritance handling: */
1160 struct rt_mutex_waiter *pi_blocked_on;
23f78d4a
IM
1161#endif
1162
408894ee 1163#ifdef CONFIG_DEBUG_MUTEXES
5eca1c10
IM
1164 /* Mutex deadlock detection: */
1165 struct mutex_waiter *blocked_on;
408894ee 1166#endif
5eca1c10 1167
312364f3
DV
1168#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1169 int non_block_count;
1170#endif
1171
de30a2b3 1172#ifdef CONFIG_TRACE_IRQFLAGS
0584df9c 1173 struct irqtrace_events irqtrace;
de8f5e4f 1174 unsigned int hardirq_threaded;
c86e9b98 1175 u64 hardirq_chain_key;
5eca1c10
IM
1176 int softirqs_enabled;
1177 int softirq_context;
40db1739 1178 int irq_config;
de30a2b3 1179#endif
728b478d
TG
1180#ifdef CONFIG_PREEMPT_RT
1181 int softirq_disable_cnt;
1182#endif
5eca1c10 1183
fbb9ce95 1184#ifdef CONFIG_LOCKDEP
5eca1c10
IM
1185# define MAX_LOCK_DEPTH 48UL
1186 u64 curr_chain_key;
1187 int lockdep_depth;
1188 unsigned int lockdep_recursion;
1189 struct held_lock held_locks[MAX_LOCK_DEPTH];
fbb9ce95 1190#endif
5eca1c10 1191
5cf53f3c 1192#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
5eca1c10 1193 unsigned int in_ubsan;
c6d30853 1194#endif
408894ee 1195
5eca1c10
IM
1196 /* Journalling filesystem info: */
1197 void *journal_info;
1da177e4 1198
5eca1c10
IM
1199 /* Stacked block device info: */
1200 struct bio_list *bio_list;
d89d8796 1201
5eca1c10
IM
1202 /* Stack plugging: */
1203 struct blk_plug *plug;
73c10101 1204
5eca1c10
IM
1205 /* VM state: */
1206 struct reclaim_state *reclaim_state;
1207
5eca1c10 1208 struct io_context *io_context;
1da177e4 1209
5e1f0f09
MG
1210#ifdef CONFIG_COMPACTION
1211 struct capture_control *capture_control;
1212#endif
5eca1c10
IM
1213 /* Ptrace state: */
1214 unsigned long ptrace_message;
ae7795bc 1215 kernel_siginfo_t *last_siginfo;
1da177e4 1216
5eca1c10 1217 struct task_io_accounting ioac;
eb414681
JW
1218#ifdef CONFIG_PSI
1219 /* Pressure stall state */
1220 unsigned int psi_flags;
1221#endif
5eca1c10
IM
1222#ifdef CONFIG_TASK_XACCT
1223 /* Accumulated RSS usage: */
1224 u64 acct_rss_mem1;
1225 /* Accumulated virtual memory usage: */
1226 u64 acct_vm_mem1;
1227 /* stime + utime since last update: */
1228 u64 acct_timexpd;
1da177e4
LT
1229#endif
1230#ifdef CONFIG_CPUSETS
5eca1c10
IM
1231 /* Protected by ->alloc_lock: */
1232 nodemask_t mems_allowed;
3b03706f 1233 /* Sequence number to catch updates: */
b7505861 1234 seqcount_spinlock_t mems_allowed_seq;
5eca1c10
IM
1235 int cpuset_mem_spread_rotor;
1236 int cpuset_slab_spread_rotor;
1da177e4 1237#endif
ddbcc7e8 1238#ifdef CONFIG_CGROUPS
5eca1c10
IM
1239 /* Control Group info protected by css_set_lock: */
1240 struct css_set __rcu *cgroups;
1241 /* cg_list protected by css_set_lock and tsk->alloc_lock: */
1242 struct list_head cg_list;
ddbcc7e8 1243#endif
e6d42931 1244#ifdef CONFIG_X86_CPU_RESCTRL
0734ded1 1245 u32 closid;
d6aaba61 1246 u32 rmid;
e02737d5 1247#endif
42b2dd0a 1248#ifdef CONFIG_FUTEX
5eca1c10 1249 struct robust_list_head __user *robust_list;
34f192c6
IM
1250#ifdef CONFIG_COMPAT
1251 struct compat_robust_list_head __user *compat_robust_list;
1252#endif
5eca1c10
IM
1253 struct list_head pi_state_list;
1254 struct futex_pi_state *pi_state_cache;
3f186d97 1255 struct mutex futex_exit_mutex;
3d4775df 1256 unsigned int futex_state;
c7aceaba 1257#endif
cdd6c482 1258#ifdef CONFIG_PERF_EVENTS
bd275681 1259 struct perf_event_context *perf_event_ctxp;
5eca1c10
IM
1260 struct mutex perf_event_mutex;
1261 struct list_head perf_event_list;
a63eaf34 1262#endif
8f47b187 1263#ifdef CONFIG_DEBUG_PREEMPT
5eca1c10 1264 unsigned long preempt_disable_ip;
8f47b187 1265#endif
c7aceaba 1266#ifdef CONFIG_NUMA
5eca1c10
IM
1267 /* Protected by alloc_lock: */
1268 struct mempolicy *mempolicy;
45816682 1269 short il_prev;
fa3bea4e 1270 u8 il_weight;
5eca1c10 1271 short pref_node_fork;
42b2dd0a 1272#endif
cbee9f88 1273#ifdef CONFIG_NUMA_BALANCING
5eca1c10
IM
1274 int numa_scan_seq;
1275 unsigned int numa_scan_period;
1276 unsigned int numa_scan_period_max;
1277 int numa_preferred_nid;
1278 unsigned long numa_migrate_retry;
1279 /* Migration stamp: */
1280 u64 node_stamp;
1281 u64 last_task_numa_placement;
1282 u64 last_sum_exec_runtime;
1283 struct callback_head numa_work;
1284
cb361d8c
JH
1285 /*
1286 * This pointer is only modified for current in syscall and
1287 * pagefault context (and for tasks being destroyed), so it can be read
1288 * from any of the following contexts:
1289 * - RCU read-side critical section
1290 * - current->numa_group from everywhere
1291 * - task's runqueue locked, task not running
1292 */
1293 struct numa_group __rcu *numa_group;
8c8a743c 1294
745d6147 1295 /*
44dba3d5
IM
1296 * numa_faults is an array split into four regions:
1297 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1298 * in this precise order.
1299 *
1300 * faults_memory: Exponential decaying average of faults on a per-node
1301 * basis. Scheduling placement decisions are made based on these
1302 * counts. The values remain static for the duration of a PTE scan.
1303 * faults_cpu: Track the nodes the process was running on when a NUMA
1304 * hinting fault was incurred.
1305 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1306 * during the current scan window. When the scan completes, the counts
1307 * in faults_memory and faults_cpu decay and these values are copied.
745d6147 1308 */
5eca1c10
IM
1309 unsigned long *numa_faults;
1310 unsigned long total_numa_faults;
745d6147 1311
04bb2f94
RR
1312 /*
1313 * numa_faults_locality tracks if faults recorded during the last
074c2381
MG
1314 * scan window were remote/local or failed to migrate. The task scan
1315 * period is adapted based on the locality of the faults with different
1316 * weights depending on whether they were shared or private faults
04bb2f94 1317 */
5eca1c10 1318 unsigned long numa_faults_locality[3];
04bb2f94 1319
5eca1c10 1320 unsigned long numa_pages_migrated;
cbee9f88
PZ
1321#endif /* CONFIG_NUMA_BALANCING */
1322
d7822b1e
MD
1323#ifdef CONFIG_RSEQ
1324 struct rseq __user *rseq;
ee3e3ac0 1325 u32 rseq_len;
d7822b1e
MD
1326 u32 rseq_sig;
1327 /*
1328 * RmW on rseq_event_mask must be performed atomically
1329 * with respect to preemption.
1330 */
1331 unsigned long rseq_event_mask;
1332#endif
1333
af7f588d
MD
1334#ifdef CONFIG_SCHED_MM_CID
1335 int mm_cid; /* Current cid in mm */
223baf9d
MD
1336 int last_mm_cid; /* Most recent cid in mm */
1337 int migrate_from_cpu;
af7f588d 1338 int mm_cid_active; /* Whether cid bitmap is active */
223baf9d 1339 struct callback_head cid_work;
af7f588d
MD
1340#endif
1341
5eca1c10 1342 struct tlbflush_unmap_batch tlb_ubc;
72b252ae 1343
5eca1c10
IM
1344 /* Cache last used pipe for splice(): */
1345 struct pipe_inode_info *splice_pipe;
5640f768 1346
5eca1c10 1347 struct page_frag task_frag;
5640f768 1348
47913d4e
IM
1349#ifdef CONFIG_TASK_DELAY_ACCT
1350 struct task_delay_info *delays;
f4f154fd 1351#endif
47913d4e 1352
f4f154fd 1353#ifdef CONFIG_FAULT_INJECTION
5eca1c10 1354 int make_it_fail;
9049f2f6 1355 unsigned int fail_nth;
ca74e92b 1356#endif
9d823e8f 1357 /*
5eca1c10
IM
1358 * When (nr_dirtied >= nr_dirtied_pause), it's time to call
1359 * balance_dirty_pages() for a dirty throttling pause:
9d823e8f 1360 */
5eca1c10
IM
1361 int nr_dirtied;
1362 int nr_dirtied_pause;
1363 /* Start of a write-and-pause period: */
1364 unsigned long dirty_paused_when;
9d823e8f 1365
9745512c 1366#ifdef CONFIG_LATENCYTOP
5eca1c10
IM
1367 int latency_record_count;
1368 struct latency_record latency_record[LT_SAVECOUNT];
9745512c 1369#endif
6976675d 1370 /*
5eca1c10 1371 * Time slack values; these are used to round up poll() and
6976675d
AV
1372 * select() etc timeout values. These are in nanoseconds.
1373 */
5eca1c10
IM
1374 u64 timer_slack_ns;
1375 u64 default_timer_slack_ns;
f8d570a4 1376
d73b4936 1377#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
5eca1c10 1378 unsigned int kasan_depth;
0b24becc 1379#endif
92c209ac 1380
dfd402a4
ME
1381#ifdef CONFIG_KCSAN
1382 struct kcsan_ctx kcsan_ctx;
92c209ac
ME
1383#ifdef CONFIG_TRACE_IRQFLAGS
1384 struct irqtrace_events kcsan_save_irqtrace;
1385#endif
69562e49
ME
1386#ifdef CONFIG_KCSAN_WEAK_MEMORY
1387 int kcsan_stack_depth;
1388#endif
dfd402a4 1389#endif
5eca1c10 1390
f80be457
AP
1391#ifdef CONFIG_KMSAN
1392 struct kmsan_ctx kmsan_ctx;
1393#endif
1394
393824f6
PA
1395#if IS_ENABLED(CONFIG_KUNIT)
1396 struct kunit *kunit_test;
1397#endif
1398
fb52607a 1399#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5eca1c10
IM
1400 /* Index of current stored address in ret_stack: */
1401 int curr_ret_stack;
39eb456d 1402 int curr_ret_depth;
5eca1c10
IM
1403
1404 /* Stack of return addresses for return function tracing: */
1405 struct ftrace_ret_stack *ret_stack;
1406
1407 /* Timestamp for last schedule: */
1408 unsigned long long ftrace_timestamp;
1409
f201ae23
FW
1410 /*
1411 * Number of functions that haven't been traced
5eca1c10 1412 * because of depth overrun:
f201ae23 1413 */
5eca1c10
IM
1414 atomic_t trace_overrun;
1415
1416 /* Pause tracing: */
1417 atomic_t tracing_graph_pause;
f201ae23 1418#endif
5eca1c10 1419
ea4e2bc4 1420#ifdef CONFIG_TRACING
5eca1c10
IM
1421 /* Bitmask and counter of trace recursion: */
1422 unsigned long trace_recursion;
261842b7 1423#endif /* CONFIG_TRACING */
5eca1c10 1424
5c9a8750 1425#ifdef CONFIG_KCOV
eec028c9
AK
1426 /* See kernel/kcov.c for more details. */
1427
5eca1c10 1428 /* Coverage collection mode enabled for this task (0 if disabled): */
0ed557aa 1429 unsigned int kcov_mode;
5eca1c10
IM
1430
1431 /* Size of the kcov_area: */
1432 unsigned int kcov_size;
1433
1434 /* Buffer for coverage collection: */
1435 void *kcov_area;
1436
1437 /* KCOV descriptor wired with this task or NULL: */
1438 struct kcov *kcov;
eec028c9
AK
1439
1440 /* KCOV common handle for remote coverage collection: */
1441 u64 kcov_handle;
1442
1443 /* KCOV sequence number: */
1444 int kcov_sequence;
5ff3b30a
AK
1445
1446 /* Collect coverage from softirq context: */
1447 unsigned int kcov_softirq;
5c9a8750 1448#endif
5eca1c10 1449
6f185c29 1450#ifdef CONFIG_MEMCG
5eca1c10 1451 struct mem_cgroup *memcg_in_oom;
b23afb93 1452
5eca1c10
IM
1453 /* Number of pages to reclaim on returning to userland: */
1454 unsigned int memcg_nr_pages_over_high;
d46eb14b
SB
1455
1456 /* Used by memcontrol for targeted memcg charge: */
1457 struct mem_cgroup *active_memcg;
569b846d 1458#endif
5eca1c10 1459
1aacbd35
RG
1460#ifdef CONFIG_MEMCG_KMEM
1461 struct obj_cgroup *objcg;
1462#endif
1463
d09d8df3 1464#ifdef CONFIG_BLK_CGROUP
f05837ed 1465 struct gendisk *throttle_disk;
d09d8df3
JB
1466#endif
1467
0326f5a9 1468#ifdef CONFIG_UPROBES
5eca1c10 1469 struct uprobe_task *utask;
0326f5a9 1470#endif
cafe5635 1471#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
5eca1c10
IM
1472 unsigned int sequential_io;
1473 unsigned int sequential_io_avg;
cafe5635 1474#endif
5fbda3ec 1475 struct kmap_ctrl kmap_ctrl;
8eb23b9f 1476#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5eca1c10 1477 unsigned long task_state_change;
5f220be2
TG
1478# ifdef CONFIG_PREEMPT_RT
1479 unsigned long saved_state_change;
1480# endif
8eb23b9f 1481#endif
22df776a
DV
1482 struct rcu_head rcu;
1483 refcount_t rcu_users;
5eca1c10 1484 int pagefault_disabled;
03049269 1485#ifdef CONFIG_MMU
5eca1c10 1486 struct task_struct *oom_reaper_list;
e4a38402 1487 struct timer_list oom_reaper_timer;
03049269 1488#endif
ba14a194 1489#ifdef CONFIG_VMAP_STACK
5eca1c10 1490 struct vm_struct *stack_vm_area;
ba14a194 1491#endif
68f24b08 1492#ifdef CONFIG_THREAD_INFO_IN_TASK
5eca1c10 1493 /* A live task holds one reference: */
f0b89d39 1494 refcount_t stack_refcount;
d83a7cb3
JP
1495#endif
1496#ifdef CONFIG_LIVEPATCH
1497 int patch_state;
0302e28d 1498#endif
e4e55b47
TH
1499#ifdef CONFIG_SECURITY
1500 /* Used by LSM modules for access restriction: */
1501 void *security;
68f24b08 1502#endif
a10787e6
SL
1503#ifdef CONFIG_BPF_SYSCALL
1504 /* Used by BPF task local storage */
1505 struct bpf_local_storage __rcu *bpf_storage;
c7603cfa
AN
1506 /* Used for BPF run context */
1507 struct bpf_run_ctx *bpf_ctx;
a10787e6 1508#endif
29e48ce8 1509
afaef01c
AP
1510#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1511 unsigned long lowest_stack;
c8d12627 1512 unsigned long prev_lowest_stack;
afaef01c
AP
1513#endif
1514
5567d11c 1515#ifdef CONFIG_X86_MCE
c0ab7ffc
TL
1516 void __user *mce_vaddr;
1517 __u64 mce_kflags;
5567d11c 1518 u64 mce_addr;
17fae129
TL
1519 __u64 mce_ripv : 1,
1520 mce_whole_page : 1,
1521 __mce_reserved : 62;
5567d11c 1522 struct callback_head mce_kill_me;
81065b35 1523 int mce_count;
5567d11c
PZ
1524#endif
1525
d741bf41
PZ
1526#ifdef CONFIG_KRETPROBES
1527 struct llist_head kretprobe_instances;
1528#endif
54ecbe6f
MH
1529#ifdef CONFIG_RETHOOK
1530 struct llist_head rethooks;
1531#endif
d741bf41 1532
58e106e7
BS
1533#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
1534 /*
1535 * If L1D flush is supported on mm context switch
1536 * then we use this callback head to queue kill work
1537 * to kill tasks that are not running on SMT disabled
1538 * cores
1539 */
1540 struct callback_head l1d_flush_kill;
1541#endif
1542
102227b9
DBO
1543#ifdef CONFIG_RV
1544 /*
1545 * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
1546 * If we find justification for more monitors, we can think
1547 * about adding more or developing a dynamic method. So far,
1548 * none of these are justified.
1549 */
1550 union rv_task_monitor rv[RV_PER_TASK_MONITORS];
1551#endif
1552
fd593511
BB
1553#ifdef CONFIG_USER_EVENTS
1554 struct user_event_mm *user_event_mm;
1555#endif
1556
29e48ce8
KC
1557 /*
1558 * New fields for task_struct should be added above here, so that
1559 * they are included in the randomized portion of task_struct.
1560 */
1561 randomized_struct_fields_end
1562
5eca1c10
IM
1563 /* CPU-specific state of this task: */
1564 struct thread_struct thread;
1565
1566 /*
1567 * WARNING: on x86, 'thread_struct' contains a variable-sized
1568 * structure. It *MUST* be at the end of 'task_struct'.
1569 *
1570 * Do not put anything below here!
1571 */
1da177e4
LT
1572};
1573
06eb6184
PZ
1574#define TASK_REPORT_IDLE (TASK_REPORT + 1)
1575#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
1576
fa2c3254
VS
1577static inline unsigned int __task_state_index(unsigned int tsk_state,
1578 unsigned int tsk_exit_state)
20435d84 1579{
fa2c3254 1580 unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT;
20435d84 1581
06eb6184
PZ
1582 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1583
0d6b3528 1584 if ((tsk_state & TASK_IDLE) == TASK_IDLE)
06eb6184
PZ
1585 state = TASK_REPORT_IDLE;
1586
25795ef6
VS
1587 /*
1588 * We're lying here, but rather than expose a completely new task state
1589 * to userspace, we can make this appear as if the task has gone through
1590 * a regular rt_mutex_lock() call.
1591 */
0d6b3528 1592 if (tsk_state & TASK_RTLOCK_WAIT)
25795ef6
VS
1593 state = TASK_UNINTERRUPTIBLE;
1594
1593baab
PZ
1595 return fls(state);
1596}
1597
fa2c3254
VS
1598static inline unsigned int task_state_index(struct task_struct *tsk)
1599{
1600 return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state);
1601}
1602
1d48b080 1603static inline char task_index_to_char(unsigned int state)
1593baab 1604{
8ef9925b 1605 static const char state_char[] = "RSDTtXZPI";
1593baab 1606
06eb6184 1607 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
20435d84 1608
1593baab
PZ
1609 return state_char[state];
1610}
1611
1612static inline char task_state_to_char(struct task_struct *tsk)
1613{
1d48b080 1614 return task_index_to_char(task_state_index(tsk));
20435d84
XX
1615}
1616
9ec52099
CLG
1617extern struct pid *cad_pid;
1618
1da177e4
LT
1619/*
1620 * Per process flags
1621 */
01ccf592 1622#define PF_VCPU 0x00000001 /* I'm a virtual CPU */
5eca1c10
IM
1623#define PF_IDLE 0x00000002 /* I am an IDLE thread */
1624#define PF_EXITING 0x00000004 /* Getting shut down */
92307383 1625#define PF_POSTCOREDUMP 0x00000008 /* Coredumps should ignore this task */
01ccf592 1626#define PF_IO_WORKER 0x00000010 /* Task is an IO worker */
5eca1c10
IM
1627#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1628#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
1629#define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */
1630#define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */
1631#define PF_DUMPCORE 0x00000200 /* Dumped core */
1632#define PF_SIGNALED 0x00000400 /* Killed by a signal */
cfb837e8 1633#define PF_MEMALLOC 0x00000800 /* Allocating memory to free memory. See memalloc_noreclaim_save() */
5eca1c10
IM
1634#define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */
1635#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
54e6842d 1636#define PF_USER_WORKER 0x00004000 /* Kernel thread cloned from userspace thread */
5eca1c10 1637#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
fb04563d 1638#define PF__HOLE__00010000 0x00010000
7dea19f9 1639#define PF_KSWAPD 0x00020000 /* I am kswapd */
cfb837e8
VB
1640#define PF_MEMALLOC_NOFS 0x00040000 /* All allocations inherit GFP_NOFS. See memalloc_nfs_save() */
1641#define PF_MEMALLOC_NOIO 0x00080000 /* All allocations inherit GFP_NOIO. See memalloc_noio_save() */
a37b0715
N
1642#define PF_LOCAL_THROTTLE 0x00100000 /* Throttle writes only against the bdi I write to,
1643 * I am cleaning dirty pages from some other bdi. */
5eca1c10
IM
1644#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1645#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
eab0af90
KO
1646#define PF_MEMALLOC_NORECLAIM 0x00800000 /* All allocation requests will clear __GFP_DIRECT_RECLAIM */
1647#define PF_MEMALLOC_NOWARN 0x01000000 /* All allocation requests will inherit __GFP_NOWARN */
fb04563d 1648#define PF__HOLE__02000000 0x02000000
3bd37062 1649#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
5eca1c10 1650#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
cfb837e8
VB
1651#define PF_MEMALLOC_PIN 0x10000000 /* Allocations constrained to zones which allow long term pinning.
1652 * See memalloc_pin_save() */
06b23f92 1653#define PF_BLOCK_TS 0x20000000 /* plug has ts that needs updating */
fb04563d 1654#define PF__HOLE__40000000 0x40000000
5eca1c10 1655#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
1da177e4
LT
1656
1657/*
1658 * Only the _current_ task can read/write to tsk->flags, but other
1659 * tasks can access tsk->flags in readonly mode for example
1660 * with tsk_used_math (like during threaded core dumping).
1661 * There is however an exception to this rule during ptrace
1662 * or during fork: the ptracer task is allowed to write to the
1663 * child->flags of its traced child (same goes for fork, the parent
1664 * can write to the child->flags), because we're guaranteed the
1665 * child is not running and in turn not changing child->flags
1666 * at the same time the parent does it.
1667 */
5eca1c10
IM
1668#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1669#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1670#define clear_used_math() clear_stopped_child_used_math(current)
1671#define set_used_math() set_stopped_child_used_math(current)
1672
1da177e4
LT
1673#define conditional_stopped_child_used_math(condition, child) \
1674 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
5eca1c10
IM
1675
1676#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
1677
1da177e4
LT
1678#define copy_to_stopped_child_used_math(child) \
1679 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
5eca1c10 1680
1da177e4 1681/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
5eca1c10
IM
1682#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1683#define used_math() tsk_used_math(current)
1da177e4 1684
83d40a61 1685static __always_inline bool is_percpu_thread(void)
62ec05dd
TG
1686{
1687#ifdef CONFIG_SMP
1688 return (current->flags & PF_NO_SETAFFINITY) &&
1689 (current->nr_cpus_allowed == 1);
1690#else
1691 return true;
1692#endif
1693}
1694
1d4457f9 1695/* Per-process atomic flags. */
5eca1c10
IM
1696#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
1697#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
1698#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
356e4bff
TG
1699#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
1700#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
9137bb27
TG
1701#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */
1702#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */
71368af9 1703#define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */
1d4457f9 1704
e0e5070b
ZL
1705#define TASK_PFA_TEST(name, func) \
1706 static inline bool task_##func(struct task_struct *p) \
1707 { return test_bit(PFA_##name, &p->atomic_flags); }
5eca1c10 1708
e0e5070b
ZL
1709#define TASK_PFA_SET(name, func) \
1710 static inline void task_set_##func(struct task_struct *p) \
1711 { set_bit(PFA_##name, &p->atomic_flags); }
5eca1c10 1712
e0e5070b
ZL
1713#define TASK_PFA_CLEAR(name, func) \
1714 static inline void task_clear_##func(struct task_struct *p) \
1715 { clear_bit(PFA_##name, &p->atomic_flags); }
1716
1717TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1718TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1d4457f9 1719
2ad654bc
ZL
1720TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1721TASK_PFA_SET(SPREAD_PAGE, spread_page)
1722TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1723
1724TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1725TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1726TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1d4457f9 1727
356e4bff
TG
1728TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1729TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1730TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1731
71368af9
WL
1732TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1733TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1734TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1735
356e4bff
TG
1736TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1737TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1738
9137bb27
TG
1739TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1740TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1741TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1742
1743TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1744TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1745
5eca1c10 1746static inline void
717a94b5 1747current_restore_flags(unsigned long orig_flags, unsigned long flags)
907aed48 1748{
717a94b5
N
1749 current->flags &= ~flags;
1750 current->flags |= orig_flags & flags;
907aed48
MG
1751}
1752
5eca1c10 1753extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
2ef269ef 1754extern int task_can_attach(struct task_struct *p);
85989106
DE
1755extern int dl_bw_alloc(int cpu, u64 dl_bw);
1756extern void dl_bw_free(int cpu, u64 dl_bw);
1da177e4 1757#ifdef CONFIG_SMP
ae894083
CS
1758
1759/* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */
5eca1c10 1760extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
ae894083
CS
1761
1762/**
1763 * set_cpus_allowed_ptr - set CPU affinity mask of a task
1764 * @p: the task
1765 * @new_mask: CPU affinity mask
1766 *
1767 * Return: zero if successful, or a negative error code
1768 */
5eca1c10 1769extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
b90ca8ba
WD
1770extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
1771extern void release_user_cpus_ptr(struct task_struct *p);
234b8ab6 1772extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
07ec77a1
WD
1773extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
1774extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
1da177e4 1775#else
5eca1c10 1776static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1e1b6c51
KM
1777{
1778}
5eca1c10 1779static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1da177e4 1780{
96f874e2 1781 if (!cpumask_test_cpu(0, new_mask))
1da177e4
LT
1782 return -EINVAL;
1783 return 0;
1784}
b90ca8ba
WD
1785static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
1786{
1787 if (src->user_cpus_ptr)
1788 return -EINVAL;
1789 return 0;
1790}
1791static inline void release_user_cpus_ptr(struct task_struct *p)
1792{
1793 WARN_ON(p->user_cpus_ptr);
1794}
234b8ab6
WD
1795
1796static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
1797{
1798 return 0;
1799}
1da177e4 1800#endif
e0ad9556 1801
fa93384f 1802extern int yield_to(struct task_struct *p, bool preempt);
36c8b586
IM
1803extern void set_user_nice(struct task_struct *p, long nice);
1804extern int task_prio(const struct task_struct *p);
5eca1c10 1805
d0ea0268
DY
1806/**
1807 * task_nice - return the nice value of a given task.
1808 * @p: the task in question.
1809 *
1810 * Return: The nice value [ -20 ... 0 ... 19 ].
1811 */
1812static inline int task_nice(const struct task_struct *p)
1813{
1814 return PRIO_TO_NICE((p)->static_prio);
1815}
5eca1c10 1816
36c8b586
IM
1817extern int can_nice(const struct task_struct *p, const int nice);
1818extern int task_curr(const struct task_struct *p);
1da177e4 1819extern int idle_cpu(int cpu);
943d355d 1820extern int available_idle_cpu(int cpu);
5eca1c10
IM
1821extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1822extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
8b700983
PZ
1823extern void sched_set_fifo(struct task_struct *p);
1824extern void sched_set_fifo_low(struct task_struct *p);
1825extern void sched_set_normal(struct task_struct *p, int nice);
5eca1c10 1826extern int sched_setattr(struct task_struct *, const struct sched_attr *);
794a56eb 1827extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
36c8b586 1828extern struct task_struct *idle_task(int cpu);
5eca1c10 1829
c4f30608
PM
1830/**
1831 * is_idle_task - is the specified task an idle task?
fa757281 1832 * @p: the task in question.
e69f6186
YB
1833 *
1834 * Return: 1 if @p is an idle task. 0 otherwise.
c4f30608 1835 */
c94a88f3 1836static __always_inline bool is_idle_task(const struct task_struct *p)
c4f30608 1837{
c1de45ca 1838 return !!(p->flags & PF_IDLE);
c4f30608 1839}
5eca1c10 1840
36c8b586 1841extern struct task_struct *curr_task(int cpu);
a458ae2e 1842extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1da177e4
LT
1843
1844void yield(void);
1845
1da177e4 1846union thread_union {
0500871f 1847 struct task_struct task;
c65eacbe 1848#ifndef CONFIG_THREAD_INFO_IN_TASK
1da177e4 1849 struct thread_info thread_info;
c65eacbe 1850#endif
1da177e4
LT
1851 unsigned long stack[THREAD_SIZE/sizeof(long)];
1852};
1853
0500871f
DH
1854#ifndef CONFIG_THREAD_INFO_IN_TASK
1855extern struct thread_info init_thread_info;
1856#endif
1857
1858extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1859
f3ac6067 1860#ifdef CONFIG_THREAD_INFO_IN_TASK
bcf9033e 1861# define task_thread_info(task) (&(task)->thread_info)
f3ac6067
IM
1862#elif !defined(__HAVE_THREAD_FUNCTIONS)
1863# define task_thread_info(task) ((struct thread_info *)(task)->stack)
1864#endif
1865
198fe21b
PE
1866/*
1867 * find a task by one of its numerical ids
1868 *
198fe21b
PE
1869 * find_task_by_pid_ns():
1870 * finds a task by its pid in the specified namespace
228ebcbe
PE
1871 * find_task_by_vpid():
1872 * finds a task by its virtual pid
198fe21b 1873 *
e49859e7 1874 * see also find_vpid() etc in include/linux/pid.h
198fe21b
PE
1875 */
1876
228ebcbe 1877extern struct task_struct *find_task_by_vpid(pid_t nr);
5eca1c10 1878extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
198fe21b 1879
2ee08260
MR
1880/*
1881 * find a task by its virtual pid and get the task struct
1882 */
1883extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1884
b3c97528
HH
1885extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1886extern int wake_up_process(struct task_struct *tsk);
3e51e3ed 1887extern void wake_up_new_task(struct task_struct *tsk);
5eca1c10 1888
1da177e4 1889#ifdef CONFIG_SMP
5eca1c10 1890extern void kick_process(struct task_struct *tsk);
1da177e4 1891#else
5eca1c10 1892static inline void kick_process(struct task_struct *tsk) { }
1da177e4 1893#endif
1da177e4 1894
82b89778 1895extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
5eca1c10 1896
82b89778
AH
1897static inline void set_task_comm(struct task_struct *tsk, const char *from)
1898{
1899 __set_task_comm(tsk, from, false);
1900}
5eca1c10 1901
3756f640
AB
1902extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1903#define get_task_comm(buf, tsk) ({ \
1904 BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
1905 __get_task_comm(buf, sizeof(buf), tsk); \
1906})
1da177e4
LT
1907
1908#ifdef CONFIG_SMP
2a0a24eb
TG
1909static __always_inline void scheduler_ipi(void)
1910{
1911 /*
1912 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
1913 * TIF_NEED_RESCHED remotely (for the first time) will also send
1914 * this IPI.
1915 */
1916 preempt_fold_need_resched();
1917}
1da177e4 1918#else
184748cc 1919static inline void scheduler_ipi(void) { }
1da177e4
LT
1920#endif
1921
d5e15866
PZ
1922extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
1923
5eca1c10
IM
1924/*
1925 * Set thread flags in other task's structures.
1926 * See asm/thread_info.h for TIF_xxxx flags available:
1da177e4
LT
1927 */
1928static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1929{
a1261f54 1930 set_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
1931}
1932
1933static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1934{
a1261f54 1935 clear_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
1936}
1937
93ee37c2
DM
1938static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
1939 bool value)
1940{
1941 update_ti_thread_flag(task_thread_info(tsk), flag, value);
1942}
1943
1da177e4
LT
1944static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1945{
a1261f54 1946 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
1947}
1948
1949static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1950{
a1261f54 1951 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
1952}
1953
1954static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1955{
a1261f54 1956 return test_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
1957}
1958
1959static inline void set_tsk_need_resched(struct task_struct *tsk)
1960{
1961 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1962}
1963
1964static inline void clear_tsk_need_resched(struct task_struct *tsk)
1965{
1966 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1967}
1968
8ae121ac
GH
1969static inline int test_tsk_need_resched(struct task_struct *tsk)
1970{
1971 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1972}
1973
1da177e4
LT
1974/*
1975 * cond_resched() and cond_resched_lock(): latency reduction via
1976 * explicit rescheduling in places that are safe. The return
1977 * value indicates whether a reschedule was done in fact.
1978 * cond_resched_lock() will drop the spinlock before scheduling,
1da177e4 1979 */
b965f1dd
PZI
1980#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
1981extern int __cond_resched(void);
1982
99cf983c 1983#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
b965f1dd 1984
e3ff7c60
JP
1985void sched_dynamic_klp_enable(void);
1986void sched_dynamic_klp_disable(void);
1987
b965f1dd
PZI
1988DECLARE_STATIC_CALL(cond_resched, __cond_resched);
1989
1990static __always_inline int _cond_resched(void)
1991{
ef72661e 1992 return static_call_mod(cond_resched)();
b965f1dd
PZI
1993}
1994
99cf983c 1995#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
e3ff7c60 1996
99cf983c
MR
1997extern int dynamic_cond_resched(void);
1998
1999static __always_inline int _cond_resched(void)
2000{
2001 return dynamic_cond_resched();
2002}
2003
e3ff7c60 2004#else /* !CONFIG_PREEMPTION */
b965f1dd
PZI
2005
2006static inline int _cond_resched(void)
2007{
e3ff7c60 2008 klp_sched_try_switch();
b965f1dd
PZI
2009 return __cond_resched();
2010}
2011
e3ff7c60 2012#endif /* PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
b965f1dd 2013
e3ff7c60 2014#else /* CONFIG_PREEMPTION && !CONFIG_PREEMPT_DYNAMIC */
b965f1dd 2015
e3ff7c60
JP
2016static inline int _cond_resched(void)
2017{
2018 klp_sched_try_switch();
2019 return 0;
2020}
b965f1dd 2021
e3ff7c60 2022#endif /* !CONFIG_PREEMPTION || CONFIG_PREEMPT_DYNAMIC */
6f80bd98 2023
613afbf8 2024#define cond_resched() ({ \
874f670e 2025 __might_resched(__FILE__, __LINE__, 0); \
613afbf8
FW
2026 _cond_resched(); \
2027})
6f80bd98 2028
613afbf8 2029extern int __cond_resched_lock(spinlock_t *lock);
f3d4b4b1
BG
2030extern int __cond_resched_rwlock_read(rwlock_t *lock);
2031extern int __cond_resched_rwlock_write(rwlock_t *lock);
613afbf8 2032
50e081b9
TG
2033#define MIGHT_RESCHED_RCU_SHIFT 8
2034#define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
2035
3e9cc688
TG
2036#ifndef CONFIG_PREEMPT_RT
2037/*
2038 * Non RT kernels have an elevated preempt count due to the held lock,
2039 * but are not allowed to be inside a RCU read side critical section
2040 */
2041# define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET
2042#else
2043/*
2044 * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in
2045 * cond_resched*lock() has to take that into account because it checks for
2046 * preempt_count() and rcu_preempt_depth().
2047 */
2048# define PREEMPT_LOCK_RESCHED_OFFSETS \
2049 (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
2050#endif
2051
2052#define cond_resched_lock(lock) ({ \
2053 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
2054 __cond_resched_lock(lock); \
613afbf8
FW
2055})
2056
3e9cc688
TG
2057#define cond_resched_rwlock_read(lock) ({ \
2058 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
2059 __cond_resched_rwlock_read(lock); \
f3d4b4b1
BG
2060})
2061
3e9cc688
TG
2062#define cond_resched_rwlock_write(lock) ({ \
2063 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
2064 __cond_resched_rwlock_write(lock); \
f3d4b4b1
BG
2065})
2066
cfe43f47
VS
2067#ifdef CONFIG_PREEMPT_DYNAMIC
2068
2069extern bool preempt_model_none(void);
2070extern bool preempt_model_voluntary(void);
2071extern bool preempt_model_full(void);
2072
2073#else
2074
2075static inline bool preempt_model_none(void)
2076{
2077 return IS_ENABLED(CONFIG_PREEMPT_NONE);
2078}
2079static inline bool preempt_model_voluntary(void)
2080{
2081 return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
2082}
2083static inline bool preempt_model_full(void)
2084{
2085 return IS_ENABLED(CONFIG_PREEMPT);
2086}
2087
2088#endif
2089
2090static inline bool preempt_model_rt(void)
2091{
2092 return IS_ENABLED(CONFIG_PREEMPT_RT);
2093}
2094
2095/*
2096 * Does the preemption model allow non-cooperative preemption?
2097 *
2098 * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with
2099 * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the
2100 * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the
2101 * PREEMPT_NONE model.
2102 */
2103static inline bool preempt_model_preemptible(void)
2104{
2105 return preempt_model_full() || preempt_model_rt();
2106}
2107
75f93fed
PZ
2108static __always_inline bool need_resched(void)
2109{
2110 return unlikely(tif_need_resched());
2111}
2112
1da177e4
LT
2113/*
2114 * Wrappers for p->thread_info->cpu access. No-op on UP.
2115 */
2116#ifdef CONFIG_SMP
2117
2118static inline unsigned int task_cpu(const struct task_struct *p)
2119{
c546951d 2120 return READ_ONCE(task_thread_info(p)->cpu);
1da177e4
LT
2121}
2122
c65cc870 2123extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
1da177e4
LT
2124
2125#else
2126
2127static inline unsigned int task_cpu(const struct task_struct *p)
2128{
2129 return 0;
2130}
2131
2132static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2133{
2134}
2135
2136#endif /* CONFIG_SMP */
2137
a1dfb631 2138extern bool sched_task_on_rq(struct task_struct *p);
42a20f86 2139extern unsigned long get_wchan(struct task_struct *p);
e386b672 2140extern struct task_struct *cpu_curr_snapshot(int cpu);
a1dfb631 2141
72375a88
KO
2142#include <linux/spinlock.h>
2143
d9345c65
PX
2144/*
2145 * In order to reduce various lock holder preemption latencies provide an
2146 * interface to see if a vCPU is currently running or not.
2147 *
2148 * This allows us to terminate optimistic spin loops and block, analogous to
2149 * the native optimistic spin heuristic of testing if the lock owner task is
2150 * running or not.
2151 */
2152#ifndef vcpu_is_preempted
42fd8baa
QC
2153static inline bool vcpu_is_preempted(int cpu)
2154{
2155 return false;
2156}
d9345c65
PX
2157#endif
2158
96f874e2
RR
2159extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2160extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
5c45bf27 2161
82455257
DH
2162#ifndef TASK_SIZE_OF
2163#define TASK_SIZE_OF(tsk) TASK_SIZE
2164#endif
2165
a5418be9 2166#ifdef CONFIG_SMP
c0bed69d
KW
2167static inline bool owner_on_cpu(struct task_struct *owner)
2168{
2169 /*
2170 * As lock holder preemption issue, we both skip spinning if
2171 * task is not on cpu or its cpu is preempted
2172 */
4cf75fd4 2173 return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
c0bed69d
KW
2174}
2175
a5418be9 2176/* Returns effective CPU energy utilization, as seen by the scheduler */
bb447999 2177unsigned long sched_cpu_util(int cpu);
a5418be9
VK
2178#endif /* CONFIG_SMP */
2179
6e33cad0
PZ
2180#ifdef CONFIG_SCHED_CORE
2181extern void sched_core_free(struct task_struct *tsk);
85dd3f61 2182extern void sched_core_fork(struct task_struct *p);
7ac592aa
CH
2183extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
2184 unsigned long uaddr);
548796e2 2185extern int sched_core_idle_cpu(int cpu);
6e33cad0
PZ
2186#else
2187static inline void sched_core_free(struct task_struct *tsk) { }
85dd3f61 2188static inline void sched_core_fork(struct task_struct *p) { }
548796e2 2189static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); }
6e33cad0
PZ
2190#endif
2191
d664e399
TG
2192extern void sched_set_stop_task(int cpu, struct task_struct *stop);
2193
22d407b1
SB
2194#ifdef CONFIG_MEM_ALLOC_PROFILING
2195static inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag)
2196{
2197 swap(current->alloc_tag, tag);
2198 return tag;
2199}
2200
2201static inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old)
2202{
2203#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
2204 WARN(current->alloc_tag != tag, "current->alloc_tag was changed:\n");
2205#endif
2206 current->alloc_tag = old;
2207}
2208#else
2209#define alloc_tag_save(_tag) NULL
2210#define alloc_tag_restore(_tag, _old) do {} while (0)
2211#endif
2212
1da177e4 2213#endif