Merge branch 'perf/urgent' into perf/core, to resolve conflict and to prepare for...
[linux-2.6-block.git] / include / linux / sched.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
607ca46e 4#include <uapi/linux/sched.h>
b7b3c76a 5
5c228079
DY
6#include <linux/sched/prio.h>
7
b7b3c76a
DW
8
9struct sched_param {
10 int sched_priority;
11};
12
1da177e4
LT
13#include <asm/param.h> /* for HZ */
14
1da177e4
LT
15#include <linux/capability.h>
16#include <linux/threads.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/timex.h>
20#include <linux/jiffies.h>
fb00aca4 21#include <linux/plist.h>
1da177e4
LT
22#include <linux/rbtree.h>
23#include <linux/thread_info.h>
24#include <linux/cpumask.h>
25#include <linux/errno.h>
26#include <linux/nodemask.h>
c92ff1bd 27#include <linux/mm_types.h>
00d1a39e 28#include <linux/preempt_mask.h>
1da177e4 29
1da177e4
LT
30#include <asm/page.h>
31#include <asm/ptrace.h>
bfc3f028 32#include <linux/cputime.h>
1da177e4
LT
33
34#include <linux/smp.h>
35#include <linux/sem.h>
36#include <linux/signal.h>
1da177e4
LT
37#include <linux/compiler.h>
38#include <linux/completion.h>
39#include <linux/pid.h>
40#include <linux/percpu.h>
41#include <linux/topology.h>
3e26c149 42#include <linux/proportions.h>
1da177e4 43#include <linux/seccomp.h>
e56d0903 44#include <linux/rcupdate.h>
05725f7e 45#include <linux/rculist.h>
23f78d4a 46#include <linux/rtmutex.h>
1da177e4 47
a3b6714e
DW
48#include <linux/time.h>
49#include <linux/param.h>
50#include <linux/resource.h>
51#include <linux/timer.h>
52#include <linux/hrtimer.h>
7c3ab738 53#include <linux/task_io_accounting.h>
9745512c 54#include <linux/latencytop.h>
9e2b2dc4 55#include <linux/cred.h>
fa14ff4a 56#include <linux/llist.h>
7b44ab97 57#include <linux/uidgid.h>
21caf2fc 58#include <linux/gfp.h>
a3b6714e
DW
59
60#include <asm/processor.h>
36d57ac4 61
d50dde5a
DF
62#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */
63
64/*
65 * Extended scheduling parameters data structure.
66 *
67 * This is needed because the original struct sched_param can not be
68 * altered without introducing ABI issues with legacy applications
69 * (e.g., in sched_getparam()).
70 *
71 * However, the possibility of specifying more than just a priority for
72 * the tasks may be useful for a wide variety of application fields, e.g.,
73 * multimedia, streaming, automation and control, and many others.
74 *
75 * This variant (sched_attr) is meant at describing a so-called
76 * sporadic time-constrained task. In such model a task is specified by:
77 * - the activation period or minimum instance inter-arrival time;
78 * - the maximum (or average, depending on the actual scheduling
79 * discipline) computation time of all instances, a.k.a. runtime;
80 * - the deadline (relative to the actual activation time) of each
81 * instance.
82 * Very briefly, a periodic (sporadic) task asks for the execution of
83 * some specific computation --which is typically called an instance--
84 * (at most) every period. Moreover, each instance typically lasts no more
85 * than the runtime and must be completed by time instant t equal to
86 * the instance activation time + the deadline.
87 *
88 * This is reflected by the actual fields of the sched_attr structure:
89 *
90 * @size size of the structure, for fwd/bwd compat.
91 *
92 * @sched_policy task's scheduling policy
93 * @sched_flags for customizing the scheduler behaviour
94 * @sched_nice task's nice value (SCHED_NORMAL/BATCH)
95 * @sched_priority task's static priority (SCHED_FIFO/RR)
96 * @sched_deadline representative of the task's deadline
97 * @sched_runtime representative of the task's runtime
98 * @sched_period representative of the task's period
99 *
100 * Given this task model, there are a multiplicity of scheduling algorithms
101 * and policies, that can be used to ensure all the tasks will make their
102 * timing constraints.
aab03e05
DF
103 *
104 * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
105 * only user of this new interface. More information about the algorithm
106 * available in the scheduling class file or in Documentation/.
d50dde5a
DF
107 */
108struct sched_attr {
109 u32 size;
110
111 u32 sched_policy;
112 u64 sched_flags;
113
114 /* SCHED_NORMAL, SCHED_BATCH */
115 s32 sched_nice;
116
117 /* SCHED_FIFO, SCHED_RR */
118 u32 sched_priority;
119
120 /* SCHED_DEADLINE */
121 u64 sched_runtime;
122 u64 sched_deadline;
123 u64 sched_period;
124};
125
1da177e4 126struct exec_domain;
c87e2837 127struct futex_pi_state;
286100a6 128struct robust_list_head;
bddd87c7 129struct bio_list;
5ad4e53b 130struct fs_struct;
cdd6c482 131struct perf_event_context;
73c10101 132struct blk_plug;
c4ad8f98 133struct filename;
1da177e4 134
615d6e87
DB
135#define VMACACHE_BITS 2
136#define VMACACHE_SIZE (1U << VMACACHE_BITS)
137#define VMACACHE_MASK (VMACACHE_SIZE - 1)
138
1da177e4
LT
139/*
140 * List of flags we want to share for kernel threads,
141 * if only because they are not used by them anyway.
142 */
143#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
144
145/*
146 * These are the constant used to fake the fixed-point load-average
147 * counting. Some notes:
148 * - 11 bit fractions expand to 22 bits by the multiplies: this gives
149 * a load-average precision of 10 bits integer + 11 bits fractional
150 * - if you want to count load-averages more often, you need more
151 * precision, or rounding will get you. With 2-second counting freq,
152 * the EXP_n values would be 1981, 2034 and 2043 if still using only
153 * 11 bit fractions.
154 */
155extern unsigned long avenrun[]; /* Load averages */
2d02494f 156extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
1da177e4
LT
157
158#define FSHIFT 11 /* nr of bits of precision */
159#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
0c2043ab 160#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */
1da177e4
LT
161#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
162#define EXP_5 2014 /* 1/exp(5sec/5min) */
163#define EXP_15 2037 /* 1/exp(5sec/15min) */
164
165#define CALC_LOAD(load,exp,n) \
166 load *= exp; \
167 load += n*(FIXED_1-exp); \
168 load >>= FSHIFT;
169
170extern unsigned long total_forks;
171extern int nr_threads;
1da177e4
LT
172DECLARE_PER_CPU(unsigned long, process_counts);
173extern int nr_processes(void);
174extern unsigned long nr_running(void);
1da177e4 175extern unsigned long nr_iowait(void);
8c215bd3 176extern unsigned long nr_iowait_cpu(int cpu);
69d25870
AV
177extern unsigned long this_cpu_load(void);
178
179
0f004f5a 180extern void calc_global_load(unsigned long ticks);
5aaa0b7a 181extern void update_cpu_load_nohz(void);
1da177e4 182
7e49fcce
SR
183extern unsigned long get_parent_ip(unsigned long addr);
184
b637a328
PM
185extern void dump_cpu_task(int cpu);
186
43ae34cb
IM
187struct seq_file;
188struct cfs_rq;
4cf86d77 189struct task_group;
43ae34cb
IM
190#ifdef CONFIG_SCHED_DEBUG
191extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
192extern void proc_sched_set_task(struct task_struct *p);
193extern void
5cef9eca 194print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
43ae34cb 195#endif
1da177e4 196
4a8342d2
LT
197/*
198 * Task state bitmask. NOTE! These bits are also
199 * encoded in fs/proc/array.c: get_task_state().
200 *
201 * We have two separate sets of flags: task->state
202 * is about runnability, while task->exit_state are
203 * about the task exiting. Confusing, but this way
204 * modifying one set can't modify the other one by
205 * mistake.
206 */
1da177e4
LT
207#define TASK_RUNNING 0
208#define TASK_INTERRUPTIBLE 1
209#define TASK_UNINTERRUPTIBLE 2
f021a3c2
MW
210#define __TASK_STOPPED 4
211#define __TASK_TRACED 8
4a8342d2 212/* in tsk->exit_state */
ad86622b
ON
213#define EXIT_DEAD 16
214#define EXIT_ZOMBIE 32
abd50b39 215#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
4a8342d2 216/* in tsk->state again */
af927232 217#define TASK_DEAD 64
f021a3c2 218#define TASK_WAKEKILL 128
e9c84311 219#define TASK_WAKING 256
f2530dc7
TG
220#define TASK_PARKED 512
221#define TASK_STATE_MAX 1024
f021a3c2 222
ad0f614e 223#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
73342151 224
e1781538
PZ
225extern char ___assert_task_state[1 - 2*!!(
226 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
f021a3c2
MW
227
228/* Convenience macros for the sake of set_task_state */
229#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
230#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
231#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
1da177e4 232
92a1f4bc
MW
233/* Convenience macros for the sake of wake_up */
234#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
f021a3c2 235#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
92a1f4bc
MW
236
237/* get_task_state() */
238#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
f021a3c2 239 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
74e37200 240 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
92a1f4bc 241
f021a3c2
MW
242#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
243#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
92a1f4bc 244#define task_is_stopped_or_traced(task) \
f021a3c2 245 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
92a1f4bc 246#define task_contributes_to_load(task) \
e3c8ca83 247 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
376fede8 248 (task->flags & PF_FROZEN) == 0)
1da177e4
LT
249
250#define __set_task_state(tsk, state_value) \
251 do { (tsk)->state = (state_value); } while (0)
252#define set_task_state(tsk, state_value) \
253 set_mb((tsk)->state, (state_value))
254
498d0c57
AM
255/*
256 * set_current_state() includes a barrier so that the write of current->state
257 * is correctly serialised wrt the caller's subsequent test of whether to
258 * actually sleep:
259 *
260 * set_current_state(TASK_UNINTERRUPTIBLE);
261 * if (do_i_need_to_sleep())
262 * schedule();
263 *
264 * If the caller does not need such serialisation then use __set_current_state()
265 */
1da177e4
LT
266#define __set_current_state(state_value) \
267 do { current->state = (state_value); } while (0)
268#define set_current_state(state_value) \
269 set_mb(current->state, (state_value))
270
271/* Task command name length */
272#define TASK_COMM_LEN 16
273
1da177e4
LT
274#include <linux/spinlock.h>
275
276/*
277 * This serializes "schedule()" and also protects
278 * the run-queue from deletions/modifications (but
279 * _adding_ to the beginning of the run-queue has
280 * a separate lock).
281 */
282extern rwlock_t tasklist_lock;
283extern spinlock_t mmlist_lock;
284
36c8b586 285struct task_struct;
1da177e4 286
db1466b3
PM
287#ifdef CONFIG_PROVE_RCU
288extern int lockdep_tasklist_lock_is_held(void);
289#endif /* #ifdef CONFIG_PROVE_RCU */
290
1da177e4
LT
291extern void sched_init(void);
292extern void sched_init_smp(void);
2d07b255 293extern asmlinkage void schedule_tail(struct task_struct *prev);
36c8b586 294extern void init_idle(struct task_struct *idle, int cpu);
1df21055 295extern void init_idle_bootup_task(struct task_struct *idle);
1da177e4 296
89f19f04 297extern int runqueue_is_locked(int cpu);
017730c1 298
3451d024 299#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
c1cc017c 300extern void nohz_balance_enter_idle(int cpu);
69e1e811 301extern void set_cpu_sd_state_idle(void);
6201b4d6 302extern int get_nohz_timer_target(int pinned);
46cb4b7c 303#else
c1cc017c 304static inline void nohz_balance_enter_idle(int cpu) { }
fdaabd80 305static inline void set_cpu_sd_state_idle(void) { }
6201b4d6
VK
306static inline int get_nohz_timer_target(int pinned)
307{
308 return smp_processor_id();
309}
46cb4b7c 310#endif
1da177e4 311
e59e2ae2 312/*
39bc89fd 313 * Only dump TASK_* tasks. (0 for all tasks)
e59e2ae2
IM
314 */
315extern void show_state_filter(unsigned long state_filter);
316
317static inline void show_state(void)
318{
39bc89fd 319 show_state_filter(0);
e59e2ae2
IM
320}
321
1da177e4
LT
322extern void show_regs(struct pt_regs *);
323
324/*
325 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
326 * task), SP is the stack pointer of the first frame that should be shown in the back
327 * trace (or NULL if the entire call-chain of the task should be shown).
328 */
329extern void show_stack(struct task_struct *task, unsigned long *sp);
330
331void io_schedule(void);
332long io_schedule_timeout(long timeout);
333
334extern void cpu_init (void);
335extern void trap_init(void);
336extern void update_process_times(int user);
337extern void scheduler_tick(void);
338
82a1fcb9
IM
339extern void sched_show_task(struct task_struct *p);
340
19cc36c0 341#ifdef CONFIG_LOCKUP_DETECTOR
8446f1d3 342extern void touch_softlockup_watchdog(void);
d6ad3e28 343extern void touch_softlockup_watchdog_sync(void);
04c9167f 344extern void touch_all_softlockup_watchdogs(void);
332fbdbc
DZ
345extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
346 void __user *buffer,
347 size_t *lenp, loff_t *ppos);
9c44bc03 348extern unsigned int softlockup_panic;
004417a6 349void lockup_detector_init(void);
8446f1d3 350#else
8446f1d3
IM
351static inline void touch_softlockup_watchdog(void)
352{
353}
d6ad3e28
JW
354static inline void touch_softlockup_watchdog_sync(void)
355{
356}
04c9167f
JF
357static inline void touch_all_softlockup_watchdogs(void)
358{
359}
004417a6
PZ
360static inline void lockup_detector_init(void)
361{
362}
8446f1d3
IM
363#endif
364
8b414521
MT
365#ifdef CONFIG_DETECT_HUNG_TASK
366void reset_hung_task_detector(void);
367#else
368static inline void reset_hung_task_detector(void)
369{
370}
371#endif
372
1da177e4
LT
373/* Attach to any functions which should be ignored in wchan output. */
374#define __sched __attribute__((__section__(".sched.text")))
deaf2227
IM
375
376/* Linker adds these: start and end of __sched functions */
377extern char __sched_text_start[], __sched_text_end[];
378
1da177e4
LT
379/* Is this address in the __sched functions? */
380extern int in_sched_functions(unsigned long addr);
381
382#define MAX_SCHEDULE_TIMEOUT LONG_MAX
b3c97528 383extern signed long schedule_timeout(signed long timeout);
64ed93a2 384extern signed long schedule_timeout_interruptible(signed long timeout);
294d5cc2 385extern signed long schedule_timeout_killable(signed long timeout);
64ed93a2 386extern signed long schedule_timeout_uninterruptible(signed long timeout);
1da177e4 387asmlinkage void schedule(void);
c5491ea7 388extern void schedule_preempt_disabled(void);
1da177e4 389
ab516013 390struct nsproxy;
acce292c 391struct user_namespace;
1da177e4 392
efc1a3b1
DH
393#ifdef CONFIG_MMU
394extern void arch_pick_mmap_layout(struct mm_struct *mm);
1da177e4
LT
395extern unsigned long
396arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
397 unsigned long, unsigned long);
398extern unsigned long
399arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
400 unsigned long len, unsigned long pgoff,
401 unsigned long flags);
efc1a3b1
DH
402#else
403static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
404#endif
1da177e4 405
d049f74f
KC
406#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
407#define SUID_DUMP_USER 1 /* Dump as user of process */
408#define SUID_DUMP_ROOT 2 /* Dump as root */
409
6c5d5238 410/* mm flags */
f8af4da3 411
7288e118 412/* for SUID_DUMP_* above */
3cb4a0bb 413#define MMF_DUMPABLE_BITS 2
f8af4da3 414#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
3cb4a0bb 415
942be387
ON
416extern void set_dumpable(struct mm_struct *mm, int value);
417/*
418 * This returns the actual value of the suid_dumpable flag. For things
419 * that are using this for checking for privilege transitions, it must
420 * test against SUID_DUMP_USER rather than treating it as a boolean
421 * value.
422 */
423static inline int __get_dumpable(unsigned long mm_flags)
424{
425 return mm_flags & MMF_DUMPABLE_MASK;
426}
427
428static inline int get_dumpable(struct mm_struct *mm)
429{
430 return __get_dumpable(mm->flags);
431}
432
3cb4a0bb
KH
433/* coredump filter bits */
434#define MMF_DUMP_ANON_PRIVATE 2
435#define MMF_DUMP_ANON_SHARED 3
436#define MMF_DUMP_MAPPED_PRIVATE 4
437#define MMF_DUMP_MAPPED_SHARED 5
82df3973 438#define MMF_DUMP_ELF_HEADERS 6
e575f111
KM
439#define MMF_DUMP_HUGETLB_PRIVATE 7
440#define MMF_DUMP_HUGETLB_SHARED 8
f8af4da3 441
3cb4a0bb 442#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
e575f111 443#define MMF_DUMP_FILTER_BITS 7
3cb4a0bb
KH
444#define MMF_DUMP_FILTER_MASK \
445 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
446#define MMF_DUMP_FILTER_DEFAULT \
e575f111 447 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
656eb2cd
RM
448 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
449
450#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
451# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
452#else
453# define MMF_DUMP_MASK_DEFAULT_ELF 0
454#endif
f8af4da3
HD
455 /* leave room for more dump flags */
456#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
ba76149f 457#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
bafb282d 458#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
f8af4da3 459
9f68f672
ON
460#define MMF_HAS_UPROBES 19 /* has uprobes */
461#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
f8ac4ec9 462
f8af4da3 463#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
6c5d5238 464
1da177e4
LT
465struct sighand_struct {
466 atomic_t count;
467 struct k_sigaction action[_NSIG];
468 spinlock_t siglock;
b8fceee1 469 wait_queue_head_t signalfd_wqh;
1da177e4
LT
470};
471
0e464814 472struct pacct_struct {
f6ec29a4
KK
473 int ac_flag;
474 long ac_exitcode;
0e464814 475 unsigned long ac_mem;
77787bfb
KK
476 cputime_t ac_utime, ac_stime;
477 unsigned long ac_minflt, ac_majflt;
0e464814
KK
478};
479
42c4ab41
SG
480struct cpu_itimer {
481 cputime_t expires;
482 cputime_t incr;
8356b5f9
SG
483 u32 error;
484 u32 incr_error;
42c4ab41
SG
485};
486
d37f761d
FW
487/**
488 * struct cputime - snaphsot of system and user cputime
489 * @utime: time spent in user mode
490 * @stime: time spent in system mode
491 *
492 * Gathers a generic snapshot of user and system time.
493 */
494struct cputime {
495 cputime_t utime;
496 cputime_t stime;
497};
498
f06febc9
FM
499/**
500 * struct task_cputime - collected CPU time counts
501 * @utime: time spent in user mode, in &cputime_t units
502 * @stime: time spent in kernel mode, in &cputime_t units
503 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
5ce73a4a 504 *
d37f761d
FW
505 * This is an extension of struct cputime that includes the total runtime
506 * spent by the task from the scheduler point of view.
507 *
508 * As a result, this structure groups together three kinds of CPU time
509 * that are tracked for threads and thread groups. Most things considering
f06febc9
FM
510 * CPU time want to group these counts together and treat all three
511 * of them in parallel.
512 */
513struct task_cputime {
514 cputime_t utime;
515 cputime_t stime;
516 unsigned long long sum_exec_runtime;
517};
518/* Alternate field names when used to cache expirations. */
519#define prof_exp stime
520#define virt_exp utime
521#define sched_exp sum_exec_runtime
522
4cd4c1b4
PZ
523#define INIT_CPUTIME \
524 (struct task_cputime) { \
64861634
MS
525 .utime = 0, \
526 .stime = 0, \
4cd4c1b4
PZ
527 .sum_exec_runtime = 0, \
528 }
529
a233f112
PZ
530#ifdef CONFIG_PREEMPT_COUNT
531#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
532#else
533#define PREEMPT_DISABLED PREEMPT_ENABLED
534#endif
535
c99e6efe
PZ
536/*
537 * Disable preemption until the scheduler is running.
538 * Reset by start_kernel()->sched_init()->init_idle().
d86ee480
PZ
539 *
540 * We include PREEMPT_ACTIVE to avoid cond_resched() from working
541 * before the scheduler is active -- see should_resched().
c99e6efe 542 */
a233f112 543#define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE)
c99e6efe 544
f06febc9 545/**
4cd4c1b4
PZ
546 * struct thread_group_cputimer - thread group interval timer counts
547 * @cputime: thread group interval timers.
548 * @running: non-zero when there are timers running and
549 * @cputime receives updates.
550 * @lock: lock for fields in this struct.
f06febc9
FM
551 *
552 * This structure contains the version of task_cputime, above, that is
4cd4c1b4 553 * used for thread group CPU timer calculations.
f06febc9 554 */
4cd4c1b4
PZ
555struct thread_group_cputimer {
556 struct task_cputime cputime;
557 int running;
ee30a7b2 558 raw_spinlock_t lock;
f06febc9 559};
f06febc9 560
4714d1d3 561#include <linux/rwsem.h>
5091faa4
MG
562struct autogroup;
563
1da177e4 564/*
e815f0a8 565 * NOTE! "signal_struct" does not have its own
1da177e4
LT
566 * locking, because a shared signal_struct always
567 * implies a shared sighand_struct, so locking
568 * sighand_struct is always a proper superset of
569 * the locking of signal_struct.
570 */
571struct signal_struct {
ea6d290c 572 atomic_t sigcnt;
1da177e4 573 atomic_t live;
b3ac022c 574 int nr_threads;
0c740d0a 575 struct list_head thread_head;
1da177e4
LT
576
577 wait_queue_head_t wait_chldexit; /* for wait4() */
578
579 /* current thread group signal load-balancing target: */
36c8b586 580 struct task_struct *curr_target;
1da177e4
LT
581
582 /* shared signal handling: */
583 struct sigpending shared_pending;
584
585 /* thread group exit support */
586 int group_exit_code;
587 /* overloaded:
588 * - notify group_exit_task when ->count is equal to notify_count
589 * - everyone except group_exit_task is stopped during signal delivery
590 * of fatal signals, group_exit_task processes the signal.
591 */
1da177e4 592 int notify_count;
07dd20e0 593 struct task_struct *group_exit_task;
1da177e4
LT
594
595 /* thread group stop support, overloads group_exit_code too */
596 int group_stop_count;
597 unsigned int flags; /* see SIGNAL_* flags below */
598
ebec18a6
LP
599 /*
600 * PR_SET_CHILD_SUBREAPER marks a process, like a service
601 * manager, to re-parent orphan (double-forking) child processes
602 * to this process instead of 'init'. The service manager is
603 * able to receive SIGCHLD signals and is able to investigate
604 * the process until it calls wait(). All children of this
605 * process will inherit a flag if they should look for a
606 * child_subreaper process at exit.
607 */
608 unsigned int is_child_subreaper:1;
609 unsigned int has_child_subreaper:1;
610
1da177e4 611 /* POSIX.1b Interval Timers */
5ed67f05
PE
612 int posix_timer_id;
613 struct list_head posix_timers;
1da177e4
LT
614
615 /* ITIMER_REAL timer for the process */
2ff678b8 616 struct hrtimer real_timer;
fea9d175 617 struct pid *leader_pid;
2ff678b8 618 ktime_t it_real_incr;
1da177e4 619
42c4ab41
SG
620 /*
621 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
622 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
623 * values are defined to 0 and 1 respectively
624 */
625 struct cpu_itimer it[2];
1da177e4 626
f06febc9 627 /*
4cd4c1b4
PZ
628 * Thread group totals for process CPU timers.
629 * See thread_group_cputimer(), et al, for details.
f06febc9 630 */
4cd4c1b4 631 struct thread_group_cputimer cputimer;
f06febc9
FM
632
633 /* Earliest-expiration cache. */
634 struct task_cputime cputime_expires;
635
636 struct list_head cpu_timers[3];
637
ab521dc0 638 struct pid *tty_old_pgrp;
1ec320af 639
1da177e4
LT
640 /* boolean value for session group leader */
641 int leader;
642
643 struct tty_struct *tty; /* NULL if no tty */
644
5091faa4
MG
645#ifdef CONFIG_SCHED_AUTOGROUP
646 struct autogroup *autogroup;
647#endif
1da177e4
LT
648 /*
649 * Cumulative resource counters for dead threads in the group,
650 * and for reaped dead child processes forked by this group.
651 * Live threads maintain their own counters and add to these
652 * in __exit_signal, except for the group leader.
653 */
32bd671d 654 cputime_t utime, stime, cutime, cstime;
9ac52315
LV
655 cputime_t gtime;
656 cputime_t cgtime;
9fbc42ea 657#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
d37f761d 658 struct cputime prev_cputime;
0cf55e1e 659#endif
1da177e4
LT
660 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
661 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
6eaeeaba 662 unsigned long inblock, oublock, cinblock, coublock;
1f10206c 663 unsigned long maxrss, cmaxrss;
940389b8 664 struct task_io_accounting ioac;
1da177e4 665
32bd671d
PZ
666 /*
667 * Cumulative ns of schedule CPU time fo dead threads in the
668 * group, not including a zombie group leader, (This only differs
669 * from jiffies_to_ns(utime + stime) if sched_clock uses something
670 * other than jiffies.)
671 */
672 unsigned long long sum_sched_runtime;
673
1da177e4
LT
674 /*
675 * We don't bother to synchronize most readers of this at all,
676 * because there is no reader checking a limit that actually needs
677 * to get both rlim_cur and rlim_max atomically, and either one
678 * alone is a single word that can safely be read normally.
679 * getrlimit/setrlimit use task_lock(current->group_leader) to
680 * protect this instead of the siglock, because they really
681 * have no need to disable irqs.
682 */
683 struct rlimit rlim[RLIM_NLIMITS];
684
0e464814
KK
685#ifdef CONFIG_BSD_PROCESS_ACCT
686 struct pacct_struct pacct; /* per-process accounting information */
687#endif
ad4ecbcb 688#ifdef CONFIG_TASKSTATS
ad4ecbcb
SN
689 struct taskstats *stats;
690#endif
522ed776
MT
691#ifdef CONFIG_AUDIT
692 unsigned audit_tty;
46e959ea 693 unsigned audit_tty_log_passwd;
522ed776
MT
694 struct tty_audit_buf *tty_audit_buf;
695#endif
4714d1d3
BB
696#ifdef CONFIG_CGROUPS
697 /*
77e4ef99
TH
698 * group_rwsem prevents new tasks from entering the threadgroup and
699 * member tasks from exiting,a more specifically, setting of
700 * PF_EXITING. fork and exit paths are protected with this rwsem
701 * using threadgroup_change_begin/end(). Users which require
702 * threadgroup to remain stable should use threadgroup_[un]lock()
703 * which also takes care of exec path. Currently, cgroup is the
704 * only user.
4714d1d3 705 */
257058ae 706 struct rw_semaphore group_rwsem;
4714d1d3 707#endif
28b83c51 708
e1e12d2f 709 oom_flags_t oom_flags;
a9c58b90
DR
710 short oom_score_adj; /* OOM kill score adjustment */
711 short oom_score_adj_min; /* OOM kill score adjustment min value.
712 * Only settable by CAP_SYS_RESOURCE. */
9b1bf12d
KM
713
714 struct mutex cred_guard_mutex; /* guard against foreign influences on
715 * credential calculations
716 * (notably. ptrace) */
1da177e4
LT
717};
718
719/*
720 * Bits in flags field of signal_struct.
721 */
722#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
ee77f075
ON
723#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
724#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
403bad72 725#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
e4420551
ON
726/*
727 * Pending notifications to parent.
728 */
729#define SIGNAL_CLD_STOPPED 0x00000010
730#define SIGNAL_CLD_CONTINUED 0x00000020
731#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
1da177e4 732
fae5fa44
ON
733#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
734
ed5d2cac
ON
735/* If true, all threads except ->group_exit_task have pending SIGKILL */
736static inline int signal_group_exit(const struct signal_struct *sig)
737{
738 return (sig->flags & SIGNAL_GROUP_EXIT) ||
739 (sig->group_exit_task != NULL);
740}
741
1da177e4
LT
742/*
743 * Some day this will be a full-fledged user tracking system..
744 */
745struct user_struct {
746 atomic_t __count; /* reference count */
747 atomic_t processes; /* How many processes does this user have? */
748 atomic_t files; /* How many open files does this user have? */
749 atomic_t sigpending; /* How many pending signals does this user have? */
2d9048e2 750#ifdef CONFIG_INOTIFY_USER
0eeca283
RL
751 atomic_t inotify_watches; /* How many inotify watches does this user have? */
752 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
753#endif
4afeff85
EP
754#ifdef CONFIG_FANOTIFY
755 atomic_t fanotify_listeners;
756#endif
7ef9964e 757#ifdef CONFIG_EPOLL
52bd19f7 758 atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
7ef9964e 759#endif
970a8645 760#ifdef CONFIG_POSIX_MQUEUE
1da177e4
LT
761 /* protected by mq_lock */
762 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
970a8645 763#endif
1da177e4
LT
764 unsigned long locked_shm; /* How many pages of mlocked shm ? */
765
766#ifdef CONFIG_KEYS
767 struct key *uid_keyring; /* UID specific keyring */
768 struct key *session_keyring; /* UID's default session keyring */
769#endif
770
771 /* Hash table maintenance information */
735de223 772 struct hlist_node uidhash_node;
7b44ab97 773 kuid_t uid;
24e377a8 774
cdd6c482 775#ifdef CONFIG_PERF_EVENTS
789f90fc
PZ
776 atomic_long_t locked_vm;
777#endif
1da177e4
LT
778};
779
eb41d946 780extern int uids_sysfs_init(void);
5cb350ba 781
7b44ab97 782extern struct user_struct *find_user(kuid_t);
1da177e4
LT
783
784extern struct user_struct root_user;
785#define INIT_USER (&root_user)
786
b6dff3ec 787
1da177e4
LT
788struct backing_dev_info;
789struct reclaim_state;
790
52f17b6c 791#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1da177e4
LT
792struct sched_info {
793 /* cumulative counters */
2d72376b 794 unsigned long pcount; /* # of times run on this cpu */
9c2c4802 795 unsigned long long run_delay; /* time spent waiting on a runqueue */
1da177e4
LT
796
797 /* timestamps */
172ba844
BS
798 unsigned long long last_arrival,/* when we last ran on a cpu */
799 last_queued; /* when we were last queued to run */
1da177e4 800};
52f17b6c 801#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
1da177e4 802
ca74e92b
SN
803#ifdef CONFIG_TASK_DELAY_ACCT
804struct task_delay_info {
805 spinlock_t lock;
806 unsigned int flags; /* Private per-task flags */
807
808 /* For each stat XXX, add following, aligned appropriately
809 *
810 * struct timespec XXX_start, XXX_end;
811 * u64 XXX_delay;
812 * u32 XXX_count;
813 *
814 * Atomicity of updates to XXX_delay, XXX_count protected by
815 * single lock above (split into XXX_lock if contention is an issue).
816 */
0ff92245
SN
817
818 /*
819 * XXX_count is incremented on every XXX operation, the delay
820 * associated with the operation is added to XXX_delay.
821 * XXX_delay contains the accumulated delay time in nanoseconds.
822 */
823 struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */
824 u64 blkio_delay; /* wait for sync block io completion */
825 u64 swapin_delay; /* wait for swapin block io completion */
826 u32 blkio_count; /* total count of the number of sync block */
827 /* io operations performed */
828 u32 swapin_count; /* total count of the number of swapin block */
829 /* io operations performed */
873b4771
KK
830
831 struct timespec freepages_start, freepages_end;
832 u64 freepages_delay; /* wait for memory reclaim */
833 u32 freepages_count; /* total count of memory reclaim */
ca74e92b 834};
52f17b6c
CS
835#endif /* CONFIG_TASK_DELAY_ACCT */
836
837static inline int sched_info_on(void)
838{
839#ifdef CONFIG_SCHEDSTATS
840 return 1;
841#elif defined(CONFIG_TASK_DELAY_ACCT)
842 extern int delayacct_on;
843 return delayacct_on;
844#else
845 return 0;
ca74e92b 846#endif
52f17b6c 847}
ca74e92b 848
d15bcfdb
IM
849enum cpu_idle_type {
850 CPU_IDLE,
851 CPU_NOT_IDLE,
852 CPU_NEWLY_IDLE,
853 CPU_MAX_IDLE_TYPES
1da177e4
LT
854};
855
1399fa78
NR
856/*
857 * Increase resolution of cpu_power calculations
858 */
859#define SCHED_POWER_SHIFT 10
860#define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT)
1da177e4 861
1399fa78
NR
862/*
863 * sched-domains (multiprocessor balancing) declarations:
864 */
2dd73a4f 865#ifdef CONFIG_SMP
b5d978e0
PZ
866#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
867#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
868#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
869#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
c88d5910 870#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
b5d978e0 871#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
b5d978e0 872#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */
b5d978e0
PZ
873#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
874#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
532cb4c4 875#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
b5d978e0 876#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
e3589f6c 877#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
3a7053b3 878#define SD_NUMA 0x4000 /* cross-node balancing */
5c45bf27 879
532cb4c4
MN
880extern int __weak arch_sd_sibiling_asym_packing(void);
881
1d3504fc
HS
882struct sched_domain_attr {
883 int relax_domain_level;
884};
885
886#define SD_ATTR_INIT (struct sched_domain_attr) { \
887 .relax_domain_level = -1, \
888}
889
60495e77
PZ
890extern int sched_domain_level_max;
891
5e6521ea
LZ
892struct sched_group;
893
1da177e4
LT
894struct sched_domain {
895 /* These fields must be setup */
896 struct sched_domain *parent; /* top domain must be null terminated */
1a848870 897 struct sched_domain *child; /* bottom domain must be null terminated */
1da177e4 898 struct sched_group *groups; /* the balancing groups of the domain */
1da177e4
LT
899 unsigned long min_interval; /* Minimum balance interval ms */
900 unsigned long max_interval; /* Maximum balance interval ms */
901 unsigned int busy_factor; /* less balancing by factor if busy */
902 unsigned int imbalance_pct; /* No balance until over watermark */
1da177e4 903 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
7897986b
NP
904 unsigned int busy_idx;
905 unsigned int idle_idx;
906 unsigned int newidle_idx;
907 unsigned int wake_idx;
147cbb4b 908 unsigned int forkexec_idx;
a52bfd73 909 unsigned int smt_gain;
25f55d9d
VG
910
911 int nohz_idle; /* NOHZ IDLE status */
1da177e4 912 int flags; /* See SD_* */
60495e77 913 int level;
1da177e4
LT
914
915 /* Runtime fields. */
916 unsigned long last_balance; /* init to jiffies. units in jiffies */
917 unsigned int balance_interval; /* initialise to 1. units in ms. */
918 unsigned int nr_balance_failed; /* initialise to 0 */
919
f48627e6 920 /* idle_balance() stats */
9bd721c5 921 u64 max_newidle_lb_cost;
f48627e6 922 unsigned long next_decay_max_lb_cost;
2398f2c6 923
1da177e4
LT
924#ifdef CONFIG_SCHEDSTATS
925 /* load_balance() stats */
480b9434
KC
926 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
927 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
928 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
929 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
930 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
931 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
932 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
933 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
1da177e4
LT
934
935 /* Active load balancing */
480b9434
KC
936 unsigned int alb_count;
937 unsigned int alb_failed;
938 unsigned int alb_pushed;
1da177e4 939
68767a0a 940 /* SD_BALANCE_EXEC stats */
480b9434
KC
941 unsigned int sbe_count;
942 unsigned int sbe_balanced;
943 unsigned int sbe_pushed;
1da177e4 944
68767a0a 945 /* SD_BALANCE_FORK stats */
480b9434
KC
946 unsigned int sbf_count;
947 unsigned int sbf_balanced;
948 unsigned int sbf_pushed;
68767a0a 949
1da177e4 950 /* try_to_wake_up() stats */
480b9434
KC
951 unsigned int ttwu_wake_remote;
952 unsigned int ttwu_move_affine;
953 unsigned int ttwu_move_balance;
1da177e4 954#endif
a5d8c348
IM
955#ifdef CONFIG_SCHED_DEBUG
956 char *name;
957#endif
dce840a0
PZ
958 union {
959 void *private; /* used during construction */
960 struct rcu_head rcu; /* used during destruction */
961 };
6c99e9ad 962
669c55e9 963 unsigned int span_weight;
4200efd9
IM
964 /*
965 * Span of all CPUs in this domain.
966 *
967 * NOTE: this field is variable length. (Allocated dynamically
968 * by attaching extra space to the end of the structure,
969 * depending on how many CPUs the kernel has booted up with)
4200efd9
IM
970 */
971 unsigned long span[0];
1da177e4
LT
972};
973
758b2cdc
RR
974static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
975{
6c99e9ad 976 return to_cpumask(sd->span);
758b2cdc
RR
977}
978
acc3f5d7 979extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1d3504fc 980 struct sched_domain_attr *dattr_new);
029190c5 981
acc3f5d7
RR
982/* Allocate an array of sched domains, for partition_sched_domains(). */
983cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
984void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
985
39be3501
PZ
986bool cpus_share_cache(int this_cpu, int that_cpu);
987
1b427c15 988#else /* CONFIG_SMP */
1da177e4 989
1b427c15 990struct sched_domain_attr;
d02c7a8c 991
1b427c15 992static inline void
acc3f5d7 993partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1b427c15
IM
994 struct sched_domain_attr *dattr_new)
995{
d02c7a8c 996}
39be3501
PZ
997
998static inline bool cpus_share_cache(int this_cpu, int that_cpu)
999{
1000 return true;
1001}
1002
1b427c15 1003#endif /* !CONFIG_SMP */
1da177e4 1004
47fe38fc 1005
1da177e4 1006struct io_context; /* See blkdev.h */
1da177e4 1007
1da177e4 1008
383f2835 1009#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
36c8b586 1010extern void prefetch_stack(struct task_struct *t);
383f2835
CK
1011#else
1012static inline void prefetch_stack(struct task_struct *t) { }
1013#endif
1da177e4
LT
1014
1015struct audit_context; /* See audit.c */
1016struct mempolicy;
b92ce558 1017struct pipe_inode_info;
4865ecf1 1018struct uts_namespace;
1da177e4 1019
20b8a59f 1020struct load_weight {
9dbdb155
PZ
1021 unsigned long weight;
1022 u32 inv_weight;
20b8a59f
IM
1023};
1024
9d85f21c
PT
1025struct sched_avg {
1026 /*
1027 * These sums represent an infinite geometric series and so are bound
239003ea 1028 * above by 1024/(1-y). Thus we only need a u32 to store them for all
9d85f21c
PT
1029 * choices of y < 1-2^(-32)*1024.
1030 */
1031 u32 runnable_avg_sum, runnable_avg_period;
1032 u64 last_runnable_update;
9ee474f5 1033 s64 decay_count;
2dac754e 1034 unsigned long load_avg_contrib;
9d85f21c
PT
1035};
1036
94c18227 1037#ifdef CONFIG_SCHEDSTATS
41acab88 1038struct sched_statistics {
20b8a59f 1039 u64 wait_start;
94c18227 1040 u64 wait_max;
6d082592
AV
1041 u64 wait_count;
1042 u64 wait_sum;
8f0dfc34
AV
1043 u64 iowait_count;
1044 u64 iowait_sum;
94c18227 1045
20b8a59f 1046 u64 sleep_start;
20b8a59f 1047 u64 sleep_max;
94c18227
IM
1048 s64 sum_sleep_runtime;
1049
1050 u64 block_start;
20b8a59f
IM
1051 u64 block_max;
1052 u64 exec_max;
eba1ed4b 1053 u64 slice_max;
cc367732 1054
cc367732
IM
1055 u64 nr_migrations_cold;
1056 u64 nr_failed_migrations_affine;
1057 u64 nr_failed_migrations_running;
1058 u64 nr_failed_migrations_hot;
1059 u64 nr_forced_migrations;
cc367732
IM
1060
1061 u64 nr_wakeups;
1062 u64 nr_wakeups_sync;
1063 u64 nr_wakeups_migrate;
1064 u64 nr_wakeups_local;
1065 u64 nr_wakeups_remote;
1066 u64 nr_wakeups_affine;
1067 u64 nr_wakeups_affine_attempts;
1068 u64 nr_wakeups_passive;
1069 u64 nr_wakeups_idle;
41acab88
LDM
1070};
1071#endif
1072
1073struct sched_entity {
1074 struct load_weight load; /* for load-balancing */
1075 struct rb_node run_node;
1076 struct list_head group_node;
1077 unsigned int on_rq;
1078
1079 u64 exec_start;
1080 u64 sum_exec_runtime;
1081 u64 vruntime;
1082 u64 prev_sum_exec_runtime;
1083
41acab88
LDM
1084 u64 nr_migrations;
1085
41acab88
LDM
1086#ifdef CONFIG_SCHEDSTATS
1087 struct sched_statistics statistics;
94c18227
IM
1088#endif
1089
20b8a59f 1090#ifdef CONFIG_FAIR_GROUP_SCHED
fed14d45 1091 int depth;
20b8a59f
IM
1092 struct sched_entity *parent;
1093 /* rq on which this entity is (to be) queued: */
1094 struct cfs_rq *cfs_rq;
1095 /* rq "owned" by this entity/group: */
1096 struct cfs_rq *my_q;
1097#endif
8bd75c77 1098
141965c7 1099#ifdef CONFIG_SMP
f4e26b12 1100 /* Per-entity load-tracking */
9d85f21c
PT
1101 struct sched_avg avg;
1102#endif
20b8a59f 1103};
70b97a7f 1104
fa717060
PZ
1105struct sched_rt_entity {
1106 struct list_head run_list;
78f2c7db 1107 unsigned long timeout;
57d2aa00 1108 unsigned long watchdog_stamp;
bee367ed 1109 unsigned int time_slice;
6f505b16 1110
58d6c2d7 1111 struct sched_rt_entity *back;
052f1dc7 1112#ifdef CONFIG_RT_GROUP_SCHED
6f505b16
PZ
1113 struct sched_rt_entity *parent;
1114 /* rq on which this entity is (to be) queued: */
1115 struct rt_rq *rt_rq;
1116 /* rq "owned" by this entity/group: */
1117 struct rt_rq *my_q;
1118#endif
fa717060
PZ
1119};
1120
aab03e05
DF
1121struct sched_dl_entity {
1122 struct rb_node rb_node;
1123
1124 /*
1125 * Original scheduling parameters. Copied here from sched_attr
1126 * during sched_setscheduler2(), they will remain the same until
1127 * the next sched_setscheduler2().
1128 */
1129 u64 dl_runtime; /* maximum runtime for each instance */
1130 u64 dl_deadline; /* relative deadline of each instance */
755378a4 1131 u64 dl_period; /* separation of two instances (period) */
332ac17e 1132 u64 dl_bw; /* dl_runtime / dl_deadline */
aab03e05
DF
1133
1134 /*
1135 * Actual scheduling parameters. Initialized with the values above,
1136 * they are continously updated during task execution. Note that
1137 * the remaining runtime could be < 0 in case we are in overrun.
1138 */
1139 s64 runtime; /* remaining runtime for this instance */
1140 u64 deadline; /* absolute deadline for this instance */
1141 unsigned int flags; /* specifying the scheduler behaviour */
1142
1143 /*
1144 * Some bool flags:
1145 *
1146 * @dl_throttled tells if we exhausted the runtime. If so, the
1147 * task has to wait for a replenishment to be performed at the
1148 * next firing of dl_timer.
1149 *
1150 * @dl_new tells if a new instance arrived. If so we must
1151 * start executing it with full runtime and reset its absolute
1152 * deadline;
2d3d891d
DF
1153 *
1154 * @dl_boosted tells if we are boosted due to DI. If so we are
1155 * outside bandwidth enforcement mechanism (but only until we
5bfd126e
JL
1156 * exit the critical section);
1157 *
1158 * @dl_yielded tells if task gave up the cpu before consuming
1159 * all its available runtime during the last job.
aab03e05 1160 */
5bfd126e 1161 int dl_throttled, dl_new, dl_boosted, dl_yielded;
aab03e05
DF
1162
1163 /*
1164 * Bandwidth enforcement timer. Each -deadline task has its
1165 * own bandwidth to be enforced, thus we need one timer per task.
1166 */
1167 struct hrtimer dl_timer;
1168};
8bd75c77 1169
86848966
PM
1170struct rcu_node;
1171
8dc85d54
PZ
1172enum perf_event_task_context {
1173 perf_invalid_context = -1,
1174 perf_hw_context = 0,
89a1e187 1175 perf_sw_context,
8dc85d54
PZ
1176 perf_nr_task_contexts,
1177};
1178
1da177e4
LT
1179struct task_struct {
1180 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
f7e4217b 1181 void *stack;
1da177e4 1182 atomic_t usage;
97dc32cd
WC
1183 unsigned int flags; /* per process flags, defined below */
1184 unsigned int ptrace;
1da177e4 1185
2dd73a4f 1186#ifdef CONFIG_SMP
fa14ff4a 1187 struct llist_node wake_entry;
3ca7a440 1188 int on_cpu;
62470419
MW
1189 struct task_struct *last_wakee;
1190 unsigned long wakee_flips;
1191 unsigned long wakee_flip_decay_ts;
ac66f547
PZ
1192
1193 int wake_cpu;
2dd73a4f 1194#endif
fd2f4419 1195 int on_rq;
50e645a8 1196
b29739f9 1197 int prio, static_prio, normal_prio;
c7aceaba 1198 unsigned int rt_priority;
5522d5d5 1199 const struct sched_class *sched_class;
20b8a59f 1200 struct sched_entity se;
fa717060 1201 struct sched_rt_entity rt;
8323f26c
PZ
1202#ifdef CONFIG_CGROUP_SCHED
1203 struct task_group *sched_task_group;
1204#endif
aab03e05 1205 struct sched_dl_entity dl;
1da177e4 1206
e107be36
AK
1207#ifdef CONFIG_PREEMPT_NOTIFIERS
1208 /* list of struct preempt_notifier: */
1209 struct hlist_head preempt_notifiers;
1210#endif
1211
6c5c9341 1212#ifdef CONFIG_BLK_DEV_IO_TRACE
2056a782 1213 unsigned int btrace_seq;
6c5c9341 1214#endif
1da177e4 1215
97dc32cd 1216 unsigned int policy;
29baa747 1217 int nr_cpus_allowed;
1da177e4 1218 cpumask_t cpus_allowed;
1da177e4 1219
a57eb940 1220#ifdef CONFIG_PREEMPT_RCU
e260be67 1221 int rcu_read_lock_nesting;
f41d911f 1222 char rcu_read_unlock_special;
f41d911f 1223 struct list_head rcu_node_entry;
a57eb940
PM
1224#endif /* #ifdef CONFIG_PREEMPT_RCU */
1225#ifdef CONFIG_TREE_PREEMPT_RCU
1226 struct rcu_node *rcu_blocked_node;
f41d911f 1227#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
24278d14
PM
1228#ifdef CONFIG_RCU_BOOST
1229 struct rt_mutex *rcu_boost_mutex;
1230#endif /* #ifdef CONFIG_RCU_BOOST */
e260be67 1231
52f17b6c 1232#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1da177e4
LT
1233 struct sched_info sched_info;
1234#endif
1235
1236 struct list_head tasks;
806c09a7 1237#ifdef CONFIG_SMP
917b627d 1238 struct plist_node pushable_tasks;
1baca4ce 1239 struct rb_node pushable_dl_tasks;
806c09a7 1240#endif
1da177e4
LT
1241
1242 struct mm_struct *mm, *active_mm;
4471a675
JK
1243#ifdef CONFIG_COMPAT_BRK
1244 unsigned brk_randomized:1;
1245#endif
615d6e87
DB
1246 /* per-thread vma caching */
1247 u32 vmacache_seqnum;
1248 struct vm_area_struct *vmacache[VMACACHE_SIZE];
34e55232
KH
1249#if defined(SPLIT_RSS_COUNTING)
1250 struct task_rss_stat rss_stat;
1251#endif
1da177e4 1252/* task state */
97dc32cd 1253 int exit_state;
1da177e4
LT
1254 int exit_code, exit_signal;
1255 int pdeath_signal; /* The signal sent when the parent dies */
a8f072c1 1256 unsigned int jobctl; /* JOBCTL_*, siglock protected */
9b89f6ba
AE
1257
1258 /* Used for emulating ABI behavior of previous Linux versions */
97dc32cd 1259 unsigned int personality;
9b89f6ba 1260
f9ce1f1c
KT
1261 unsigned in_execve:1; /* Tell the LSMs that the process is doing an
1262 * execve */
8f0dfc34
AV
1263 unsigned in_iowait:1;
1264
259e5e6c
AL
1265 /* task may not gain privileges */
1266 unsigned no_new_privs:1;
ca94c442
LP
1267
1268 /* Revert to default priority/policy when forking */
1269 unsigned sched_reset_on_fork:1;
a8e4f2ea 1270 unsigned sched_contributes_to_load:1;
ca94c442 1271
1da177e4
LT
1272 pid_t pid;
1273 pid_t tgid;
0a425405 1274
1314562a 1275#ifdef CONFIG_CC_STACKPROTECTOR
0a425405
AV
1276 /* Canary value for the -fstack-protector gcc feature */
1277 unsigned long stack_canary;
1314562a 1278#endif
4d1d61a6 1279 /*
1da177e4 1280 * pointers to (original) parent process, youngest child, younger sibling,
4d1d61a6 1281 * older sibling, respectively. (p->father can be replaced with
f470021a 1282 * p->real_parent->pid)
1da177e4 1283 */
abd63bc3
KC
1284 struct task_struct __rcu *real_parent; /* real parent process */
1285 struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1da177e4 1286 /*
f470021a 1287 * children/sibling forms the list of my natural children
1da177e4
LT
1288 */
1289 struct list_head children; /* list of my children */
1290 struct list_head sibling; /* linkage in my parent's children list */
1291 struct task_struct *group_leader; /* threadgroup leader */
1292
f470021a
RM
1293 /*
1294 * ptraced is the list of tasks this task is using ptrace on.
1295 * This includes both natural children and PTRACE_ATTACH targets.
1296 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1297 */
1298 struct list_head ptraced;
1299 struct list_head ptrace_entry;
1300
1da177e4 1301 /* PID/PID hash table linkage. */
92476d7f 1302 struct pid_link pids[PIDTYPE_MAX];
47e65328 1303 struct list_head thread_group;
0c740d0a 1304 struct list_head thread_node;
1da177e4
LT
1305
1306 struct completion *vfork_done; /* for vfork() */
1307 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
1308 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
1309
c66f08be 1310 cputime_t utime, stime, utimescaled, stimescaled;
9ac52315 1311 cputime_t gtime;
9fbc42ea 1312#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
d37f761d 1313 struct cputime prev_cputime;
6a61671b
FW
1314#endif
1315#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1316 seqlock_t vtime_seqlock;
1317 unsigned long long vtime_snap;
1318 enum {
1319 VTIME_SLEEPING = 0,
1320 VTIME_USER,
1321 VTIME_SYS,
1322 } vtime_snap_whence;
d99ca3b9 1323#endif
1da177e4 1324 unsigned long nvcsw, nivcsw; /* context switch counts */
924b42d5
TJ
1325 struct timespec start_time; /* monotonic time */
1326 struct timespec real_start_time; /* boot based time */
1da177e4
LT
1327/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1328 unsigned long min_flt, maj_flt;
1329
f06febc9 1330 struct task_cputime cputime_expires;
1da177e4
LT
1331 struct list_head cpu_timers[3];
1332
1333/* process credentials */
1b0ba1c9 1334 const struct cred __rcu *real_cred; /* objective and real subjective task
3b11a1de 1335 * credentials (COW) */
1b0ba1c9 1336 const struct cred __rcu *cred; /* effective (overridable) subjective task
3b11a1de 1337 * credentials (COW) */
36772092
PBG
1338 char comm[TASK_COMM_LEN]; /* executable name excluding path
1339 - access with [gs]et_task_comm (which lock
1340 it with task_lock())
221af7f8 1341 - initialized normally by setup_new_exec */
1da177e4
LT
1342/* file system info */
1343 int link_count, total_link_count;
3d5b6fcc 1344#ifdef CONFIG_SYSVIPC
1da177e4
LT
1345/* ipc stuff */
1346 struct sysv_sem sysvsem;
3d5b6fcc 1347#endif
e162b39a 1348#ifdef CONFIG_DETECT_HUNG_TASK
82a1fcb9 1349/* hung task detection */
82a1fcb9
IM
1350 unsigned long last_switch_count;
1351#endif
1da177e4
LT
1352/* CPU-specific state of this task */
1353 struct thread_struct thread;
1354/* filesystem information */
1355 struct fs_struct *fs;
1356/* open file information */
1357 struct files_struct *files;
1651e14e 1358/* namespaces */
ab516013 1359 struct nsproxy *nsproxy;
1da177e4
LT
1360/* signal handlers */
1361 struct signal_struct *signal;
1362 struct sighand_struct *sighand;
1363
1364 sigset_t blocked, real_blocked;
f3de272b 1365 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
1da177e4
LT
1366 struct sigpending pending;
1367
1368 unsigned long sas_ss_sp;
1369 size_t sas_ss_size;
1370 int (*notifier)(void *priv);
1371 void *notifier_data;
1372 sigset_t *notifier_mask;
67d12145 1373 struct callback_head *task_works;
e73f8959 1374
1da177e4 1375 struct audit_context *audit_context;
bfef93a5 1376#ifdef CONFIG_AUDITSYSCALL
e1760bd5 1377 kuid_t loginuid;
4746ec5b 1378 unsigned int sessionid;
bfef93a5 1379#endif
932ecebb 1380 struct seccomp seccomp;
1da177e4
LT
1381
1382/* Thread group tracking */
1383 u32 parent_exec_id;
1384 u32 self_exec_id;
58568d2a
MX
1385/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1386 * mempolicy */
1da177e4 1387 spinlock_t alloc_lock;
1da177e4 1388
b29739f9 1389 /* Protection of the PI data structures: */
1d615482 1390 raw_spinlock_t pi_lock;
b29739f9 1391
23f78d4a
IM
1392#ifdef CONFIG_RT_MUTEXES
1393 /* PI waiters blocked on a rt_mutex held by this task */
fb00aca4
PZ
1394 struct rb_root pi_waiters;
1395 struct rb_node *pi_waiters_leftmost;
23f78d4a
IM
1396 /* Deadlock detection and priority inheritance handling */
1397 struct rt_mutex_waiter *pi_blocked_on;
2d3d891d
DF
1398 /* Top pi_waiters task */
1399 struct task_struct *pi_top_task;
23f78d4a
IM
1400#endif
1401
408894ee
IM
1402#ifdef CONFIG_DEBUG_MUTEXES
1403 /* mutex deadlock detection */
1404 struct mutex_waiter *blocked_on;
1405#endif
de30a2b3
IM
1406#ifdef CONFIG_TRACE_IRQFLAGS
1407 unsigned int irq_events;
de30a2b3 1408 unsigned long hardirq_enable_ip;
de30a2b3 1409 unsigned long hardirq_disable_ip;
fa1452e8 1410 unsigned int hardirq_enable_event;
de30a2b3 1411 unsigned int hardirq_disable_event;
fa1452e8
HS
1412 int hardirqs_enabled;
1413 int hardirq_context;
de30a2b3 1414 unsigned long softirq_disable_ip;
de30a2b3 1415 unsigned long softirq_enable_ip;
fa1452e8 1416 unsigned int softirq_disable_event;
de30a2b3 1417 unsigned int softirq_enable_event;
fa1452e8 1418 int softirqs_enabled;
de30a2b3
IM
1419 int softirq_context;
1420#endif
fbb9ce95 1421#ifdef CONFIG_LOCKDEP
bdb9441e 1422# define MAX_LOCK_DEPTH 48UL
fbb9ce95
IM
1423 u64 curr_chain_key;
1424 int lockdep_depth;
fbb9ce95 1425 unsigned int lockdep_recursion;
c7aceaba 1426 struct held_lock held_locks[MAX_LOCK_DEPTH];
cf40bd16 1427 gfp_t lockdep_reclaim_gfp;
fbb9ce95 1428#endif
408894ee 1429
1da177e4
LT
1430/* journalling filesystem info */
1431 void *journal_info;
1432
d89d8796 1433/* stacked block device info */
bddd87c7 1434 struct bio_list *bio_list;
d89d8796 1435
73c10101
JA
1436#ifdef CONFIG_BLOCK
1437/* stack plugging */
1438 struct blk_plug *plug;
1439#endif
1440
1da177e4
LT
1441/* VM state */
1442 struct reclaim_state *reclaim_state;
1443
1da177e4
LT
1444 struct backing_dev_info *backing_dev_info;
1445
1446 struct io_context *io_context;
1447
1448 unsigned long ptrace_message;
1449 siginfo_t *last_siginfo; /* For ptrace use. */
7c3ab738 1450 struct task_io_accounting ioac;
8f0ab514 1451#if defined(CONFIG_TASK_XACCT)
1da177e4
LT
1452 u64 acct_rss_mem1; /* accumulated rss usage */
1453 u64 acct_vm_mem1; /* accumulated virtual memory usage */
49b5cf34 1454 cputime_t acct_timexpd; /* stime + utime since last update */
1da177e4
LT
1455#endif
1456#ifdef CONFIG_CPUSETS
58568d2a 1457 nodemask_t mems_allowed; /* Protected by alloc_lock */
cc9a6c87 1458 seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
825a46af 1459 int cpuset_mem_spread_rotor;
6adef3eb 1460 int cpuset_slab_spread_rotor;
1da177e4 1461#endif
ddbcc7e8 1462#ifdef CONFIG_CGROUPS
817929ec 1463 /* Control Group info protected by css_set_lock */
2c392b8c 1464 struct css_set __rcu *cgroups;
817929ec
PM
1465 /* cg_list protected by css_set_lock and tsk->alloc_lock */
1466 struct list_head cg_list;
ddbcc7e8 1467#endif
42b2dd0a 1468#ifdef CONFIG_FUTEX
0771dfef 1469 struct robust_list_head __user *robust_list;
34f192c6
IM
1470#ifdef CONFIG_COMPAT
1471 struct compat_robust_list_head __user *compat_robust_list;
1472#endif
c87e2837
IM
1473 struct list_head pi_state_list;
1474 struct futex_pi_state *pi_state_cache;
c7aceaba 1475#endif
cdd6c482 1476#ifdef CONFIG_PERF_EVENTS
8dc85d54 1477 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
cdd6c482
IM
1478 struct mutex perf_event_mutex;
1479 struct list_head perf_event_list;
a63eaf34 1480#endif
8f47b187
TG
1481#ifdef CONFIG_DEBUG_PREEMPT
1482 unsigned long preempt_disable_ip;
1483#endif
c7aceaba 1484#ifdef CONFIG_NUMA
58568d2a 1485 struct mempolicy *mempolicy; /* Protected by alloc_lock */
c7aceaba 1486 short il_next;
207205a2 1487 short pref_node_fork;
42b2dd0a 1488#endif
cbee9f88
PZ
1489#ifdef CONFIG_NUMA_BALANCING
1490 int numa_scan_seq;
cbee9f88 1491 unsigned int numa_scan_period;
598f0ec0 1492 unsigned int numa_scan_period_max;
de1c9ce6 1493 int numa_preferred_nid;
6b9a7460 1494 unsigned long numa_migrate_retry;
cbee9f88 1495 u64 node_stamp; /* migration stamp */
7e2703e6
RR
1496 u64 last_task_numa_placement;
1497 u64 last_sum_exec_runtime;
cbee9f88 1498 struct callback_head numa_work;
f809ca9a 1499
8c8a743c
PZ
1500 struct list_head numa_entry;
1501 struct numa_group *numa_group;
1502
745d6147
MG
1503 /*
1504 * Exponential decaying average of faults on a per-node basis.
1505 * Scheduling placement decisions are made based on the these counts.
1506 * The values remain static for the duration of a PTE scan
1507 */
ff1df896 1508 unsigned long *numa_faults_memory;
83e1d2cd 1509 unsigned long total_numa_faults;
745d6147
MG
1510
1511 /*
1512 * numa_faults_buffer records faults per node during the current
ff1df896
RR
1513 * scan window. When the scan completes, the counts in
1514 * numa_faults_memory decay and these values are copied.
745d6147 1515 */
ff1df896 1516 unsigned long *numa_faults_buffer_memory;
745d6147 1517
50ec8a40
RR
1518 /*
1519 * Track the nodes the process was running on when a NUMA hinting
1520 * fault was incurred.
1521 */
1522 unsigned long *numa_faults_cpu;
1523 unsigned long *numa_faults_buffer_cpu;
1524
04bb2f94
RR
1525 /*
1526 * numa_faults_locality tracks if faults recorded during the last
1527 * scan window were remote/local. The task scan period is adapted
1528 * based on the locality of the faults with different weights
1529 * depending on whether they were shared or private faults
1530 */
1531 unsigned long numa_faults_locality[2];
1532
b32e86b4 1533 unsigned long numa_pages_migrated;
cbee9f88
PZ
1534#endif /* CONFIG_NUMA_BALANCING */
1535
e56d0903 1536 struct rcu_head rcu;
b92ce558
JA
1537
1538 /*
1539 * cache last used pipe for splice
1540 */
1541 struct pipe_inode_info *splice_pipe;
5640f768
ED
1542
1543 struct page_frag task_frag;
1544
ca74e92b
SN
1545#ifdef CONFIG_TASK_DELAY_ACCT
1546 struct task_delay_info *delays;
f4f154fd
AM
1547#endif
1548#ifdef CONFIG_FAULT_INJECTION
1549 int make_it_fail;
ca74e92b 1550#endif
9d823e8f
WF
1551 /*
1552 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1553 * balance_dirty_pages() for some dirty throttling pause
1554 */
1555 int nr_dirtied;
1556 int nr_dirtied_pause;
83712358 1557 unsigned long dirty_paused_when; /* start of a write-and-pause period */
9d823e8f 1558
9745512c
AV
1559#ifdef CONFIG_LATENCYTOP
1560 int latency_record_count;
1561 struct latency_record latency_record[LT_SAVECOUNT];
1562#endif
6976675d
AV
1563 /*
1564 * time slack values; these are used to round up poll() and
1565 * select() etc timeout values. These are in nanoseconds.
1566 */
1567 unsigned long timer_slack_ns;
1568 unsigned long default_timer_slack_ns;
f8d570a4 1569
fb52607a 1570#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3ad2f3fb 1571 /* Index of current stored address in ret_stack */
f201ae23
FW
1572 int curr_ret_stack;
1573 /* Stack of return addresses for return function tracing */
1574 struct ftrace_ret_stack *ret_stack;
8aef2d28
SR
1575 /* time stamp for last schedule */
1576 unsigned long long ftrace_timestamp;
f201ae23
FW
1577 /*
1578 * Number of functions that haven't been traced
1579 * because of depth overrun.
1580 */
1581 atomic_t trace_overrun;
380c4b14
FW
1582 /* Pause for the tracing */
1583 atomic_t tracing_graph_pause;
f201ae23 1584#endif
ea4e2bc4
SR
1585#ifdef CONFIG_TRACING
1586 /* state flags for use by tracers */
1587 unsigned long trace;
b1cff0ad 1588 /* bitmask and counter of trace recursion */
261842b7
SR
1589 unsigned long trace_recursion;
1590#endif /* CONFIG_TRACING */
c255a458 1591#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
569b846d
KH
1592 struct memcg_batch_info {
1593 int do_batch; /* incremented when batch uncharge started */
1594 struct mem_cgroup *memcg; /* target memcg of uncharge */
7ffd4ca7
JW
1595 unsigned long nr_pages; /* uncharged usage */
1596 unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
569b846d 1597 } memcg_batch;
0e9d92f2 1598 unsigned int memcg_kmem_skip_account;
519e5247 1599 struct memcg_oom_info {
49426420
JW
1600 struct mem_cgroup *memcg;
1601 gfp_t gfp_mask;
1602 int order;
519e5247
JW
1603 unsigned int may_oom:1;
1604 } memcg_oom;
569b846d 1605#endif
0326f5a9
SD
1606#ifdef CONFIG_UPROBES
1607 struct uprobe_task *utask;
0326f5a9 1608#endif
cafe5635
KO
1609#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1610 unsigned int sequential_io;
1611 unsigned int sequential_io_avg;
1612#endif
1da177e4
LT
1613};
1614
76e6eee0 1615/* Future-safe accessor for struct task_struct's cpus_allowed. */
a4636818 1616#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
76e6eee0 1617
6688cc05
PZ
1618#define TNF_MIGRATED 0x01
1619#define TNF_NO_GROUP 0x02
dabe1d99 1620#define TNF_SHARED 0x04
04bb2f94 1621#define TNF_FAULT_LOCAL 0x08
6688cc05 1622
cbee9f88 1623#ifdef CONFIG_NUMA_BALANCING
6688cc05 1624extern void task_numa_fault(int last_node, int node, int pages, int flags);
e29cf08b 1625extern pid_t task_numa_group_id(struct task_struct *p);
1a687c2e 1626extern void set_numabalancing_state(bool enabled);
82727018 1627extern void task_numa_free(struct task_struct *p);
10f39042
RR
1628extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
1629 int src_nid, int dst_cpu);
cbee9f88 1630#else
ac8e895b 1631static inline void task_numa_fault(int last_node, int node, int pages,
6688cc05 1632 int flags)
cbee9f88
PZ
1633{
1634}
e29cf08b
MG
1635static inline pid_t task_numa_group_id(struct task_struct *p)
1636{
1637 return 0;
1638}
1a687c2e
MG
1639static inline void set_numabalancing_state(bool enabled)
1640{
1641}
82727018
RR
1642static inline void task_numa_free(struct task_struct *p)
1643{
1644}
10f39042
RR
1645static inline bool should_numa_migrate_memory(struct task_struct *p,
1646 struct page *page, int src_nid, int dst_cpu)
1647{
1648 return true;
1649}
cbee9f88
PZ
1650#endif
1651
e868171a 1652static inline struct pid *task_pid(struct task_struct *task)
22c935f4
EB
1653{
1654 return task->pids[PIDTYPE_PID].pid;
1655}
1656
e868171a 1657static inline struct pid *task_tgid(struct task_struct *task)
22c935f4
EB
1658{
1659 return task->group_leader->pids[PIDTYPE_PID].pid;
1660}
1661
6dda81f4
ON
1662/*
1663 * Without tasklist or rcu lock it is not safe to dereference
1664 * the result of task_pgrp/task_session even if task == current,
1665 * we can race with another thread doing sys_setsid/sys_setpgid.
1666 */
e868171a 1667static inline struct pid *task_pgrp(struct task_struct *task)
22c935f4
EB
1668{
1669 return task->group_leader->pids[PIDTYPE_PGID].pid;
1670}
1671
e868171a 1672static inline struct pid *task_session(struct task_struct *task)
22c935f4
EB
1673{
1674 return task->group_leader->pids[PIDTYPE_SID].pid;
1675}
1676
7af57294
PE
1677struct pid_namespace;
1678
1679/*
1680 * the helpers to get the task's different pids as they are seen
1681 * from various namespaces
1682 *
1683 * task_xid_nr() : global id, i.e. the id seen from the init namespace;
44c4e1b2
EB
1684 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
1685 * current.
7af57294
PE
1686 * task_xid_nr_ns() : id seen from the ns specified;
1687 *
1688 * set_task_vxid() : assigns a virtual id to a task;
1689 *
7af57294
PE
1690 * see also pid_nr() etc in include/linux/pid.h
1691 */
52ee2dfd
ON
1692pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1693 struct pid_namespace *ns);
7af57294 1694
e868171a 1695static inline pid_t task_pid_nr(struct task_struct *tsk)
7af57294
PE
1696{
1697 return tsk->pid;
1698}
1699
52ee2dfd
ON
1700static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1701 struct pid_namespace *ns)
1702{
1703 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1704}
7af57294
PE
1705
1706static inline pid_t task_pid_vnr(struct task_struct *tsk)
1707{
52ee2dfd 1708 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
7af57294
PE
1709}
1710
1711
e868171a 1712static inline pid_t task_tgid_nr(struct task_struct *tsk)
7af57294
PE
1713{
1714 return tsk->tgid;
1715}
1716
2f2a3a46 1717pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
7af57294
PE
1718
1719static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1720{
1721 return pid_vnr(task_tgid(tsk));
1722}
1723
1724
80e0b6e8 1725static inline int pid_alive(const struct task_struct *p);
ad36d282
RGB
1726static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1727{
1728 pid_t pid = 0;
1729
1730 rcu_read_lock();
1731 if (pid_alive(tsk))
1732 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1733 rcu_read_unlock();
1734
1735 return pid;
1736}
1737
1738static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1739{
1740 return task_ppid_nr_ns(tsk, &init_pid_ns);
1741}
1742
52ee2dfd
ON
1743static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1744 struct pid_namespace *ns)
7af57294 1745{
52ee2dfd 1746 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
7af57294
PE
1747}
1748
7af57294
PE
1749static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1750{
52ee2dfd 1751 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
7af57294
PE
1752}
1753
1754
52ee2dfd
ON
1755static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1756 struct pid_namespace *ns)
7af57294 1757{
52ee2dfd 1758 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
7af57294
PE
1759}
1760
7af57294
PE
1761static inline pid_t task_session_vnr(struct task_struct *tsk)
1762{
52ee2dfd 1763 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
7af57294
PE
1764}
1765
1b0f7ffd
ON
1766/* obsolete, do not use */
1767static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1768{
1769 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1770}
7af57294 1771
1da177e4
LT
1772/**
1773 * pid_alive - check that a task structure is not stale
1774 * @p: Task structure to be checked.
1775 *
1776 * Test if a process is not yet dead (at most zombie state)
1777 * If pid_alive fails, then pointers within the task structure
1778 * can be stale and must not be dereferenced.
e69f6186
YB
1779 *
1780 * Return: 1 if the process is alive. 0 otherwise.
1da177e4 1781 */
ad36d282 1782static inline int pid_alive(const struct task_struct *p)
1da177e4 1783{
92476d7f 1784 return p->pids[PIDTYPE_PID].pid != NULL;
1da177e4
LT
1785}
1786
f400e198 1787/**
b460cbc5 1788 * is_global_init - check if a task structure is init
3260259f
H
1789 * @tsk: Task structure to be checked.
1790 *
1791 * Check if a task structure is the first user space task the kernel created.
e69f6186
YB
1792 *
1793 * Return: 1 if the task structure is init. 0 otherwise.
b460cbc5 1794 */
e868171a 1795static inline int is_global_init(struct task_struct *tsk)
b461cc03
PE
1796{
1797 return tsk->pid == 1;
1798}
b460cbc5 1799
9ec52099
CLG
1800extern struct pid *cad_pid;
1801
1da177e4 1802extern void free_task(struct task_struct *tsk);
1da177e4 1803#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
e56d0903 1804
158d9ebd 1805extern void __put_task_struct(struct task_struct *t);
e56d0903
IM
1806
1807static inline void put_task_struct(struct task_struct *t)
1808{
1809 if (atomic_dec_and_test(&t->usage))
8c7904a0 1810 __put_task_struct(t);
e56d0903 1811}
1da177e4 1812
6a61671b
FW
1813#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1814extern void task_cputime(struct task_struct *t,
1815 cputime_t *utime, cputime_t *stime);
1816extern void task_cputime_scaled(struct task_struct *t,
1817 cputime_t *utimescaled, cputime_t *stimescaled);
1818extern cputime_t task_gtime(struct task_struct *t);
1819#else
6fac4829
FW
1820static inline void task_cputime(struct task_struct *t,
1821 cputime_t *utime, cputime_t *stime)
1822{
1823 if (utime)
1824 *utime = t->utime;
1825 if (stime)
1826 *stime = t->stime;
1827}
1828
1829static inline void task_cputime_scaled(struct task_struct *t,
1830 cputime_t *utimescaled,
1831 cputime_t *stimescaled)
1832{
1833 if (utimescaled)
1834 *utimescaled = t->utimescaled;
1835 if (stimescaled)
1836 *stimescaled = t->stimescaled;
1837}
6a61671b
FW
1838
1839static inline cputime_t task_gtime(struct task_struct *t)
1840{
1841 return t->gtime;
1842}
1843#endif
e80d0a1a
FW
1844extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1845extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
49048622 1846
1da177e4
LT
1847/*
1848 * Per process flags
1849 */
1da177e4 1850#define PF_EXITING 0x00000004 /* getting shut down */
778e9a9c 1851#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
94886b84 1852#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
21aa9af0 1853#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1da177e4 1854#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
4db96cf0 1855#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
1da177e4
LT
1856#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
1857#define PF_DUMPCORE 0x00000200 /* dumped core */
1858#define PF_SIGNALED 0x00000400 /* killed by a signal */
1859#define PF_MEMALLOC 0x00000800 /* Allocating memory */
72fa5997 1860#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
1da177e4 1861#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
774a1221 1862#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
1da177e4
LT
1863#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1864#define PF_FROZEN 0x00010000 /* frozen for system suspend */
1865#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
1866#define PF_KSWAPD 0x00040000 /* I am kswapd */
21caf2fc 1867#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
1da177e4 1868#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
246bb0b1 1869#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
b31dc66a
JA
1870#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
1871#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1872#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
1873#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
14a40ffc 1874#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
4db96cf0 1875#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
61a87122 1876#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
58a69cb4 1877#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
2b44c4db 1878#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
1da177e4
LT
1879
1880/*
1881 * Only the _current_ task can read/write to tsk->flags, but other
1882 * tasks can access tsk->flags in readonly mode for example
1883 * with tsk_used_math (like during threaded core dumping).
1884 * There is however an exception to this rule during ptrace
1885 * or during fork: the ptracer task is allowed to write to the
1886 * child->flags of its traced child (same goes for fork, the parent
1887 * can write to the child->flags), because we're guaranteed the
1888 * child is not running and in turn not changing child->flags
1889 * at the same time the parent does it.
1890 */
1891#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1892#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1893#define clear_used_math() clear_stopped_child_used_math(current)
1894#define set_used_math() set_stopped_child_used_math(current)
1895#define conditional_stopped_child_used_math(condition, child) \
1896 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1897#define conditional_used_math(condition) \
1898 conditional_stopped_child_used_math(condition, current)
1899#define copy_to_stopped_child_used_math(child) \
1900 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1901/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1902#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1903#define used_math() tsk_used_math(current)
1904
21caf2fc
ML
1905/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
1906static inline gfp_t memalloc_noio_flags(gfp_t flags)
1907{
1908 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
1909 flags &= ~__GFP_IO;
1910 return flags;
1911}
1912
1913static inline unsigned int memalloc_noio_save(void)
1914{
1915 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
1916 current->flags |= PF_MEMALLOC_NOIO;
1917 return flags;
1918}
1919
1920static inline void memalloc_noio_restore(unsigned int flags)
1921{
1922 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
1923}
1924
e5c1902e 1925/*
a8f072c1 1926 * task->jobctl flags
e5c1902e 1927 */
a8f072c1 1928#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */
e5c1902e 1929
a8f072c1
TH
1930#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */
1931#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
1932#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */
73ddff2b 1933#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */
fb1d910c 1934#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */
a8f072c1 1935#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
544b2c91 1936#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
a8f072c1
TH
1937
1938#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
1939#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
1940#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
73ddff2b 1941#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
fb1d910c 1942#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
a8f072c1 1943#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
544b2c91 1944#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
a8f072c1 1945
fb1d910c 1946#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
73ddff2b 1947#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
3759a0d9 1948
7dd3db54
TH
1949extern bool task_set_jobctl_pending(struct task_struct *task,
1950 unsigned int mask);
73ddff2b 1951extern void task_clear_jobctl_trapping(struct task_struct *task);
3759a0d9
TH
1952extern void task_clear_jobctl_pending(struct task_struct *task,
1953 unsigned int mask);
39efa3ef 1954
a57eb940 1955#ifdef CONFIG_PREEMPT_RCU
f41d911f
PM
1956
1957#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1aa03f11 1958#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
f41d911f
PM
1959
1960static inline void rcu_copy_process(struct task_struct *p)
1961{
1962 p->rcu_read_lock_nesting = 0;
1963 p->rcu_read_unlock_special = 0;
a57eb940 1964#ifdef CONFIG_TREE_PREEMPT_RCU
dd5d19ba 1965 p->rcu_blocked_node = NULL;
24278d14
PM
1966#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1967#ifdef CONFIG_RCU_BOOST
1968 p->rcu_boost_mutex = NULL;
1969#endif /* #ifdef CONFIG_RCU_BOOST */
f41d911f
PM
1970 INIT_LIST_HEAD(&p->rcu_node_entry);
1971}
1972
f41d911f
PM
1973#else
1974
1975static inline void rcu_copy_process(struct task_struct *p)
1976{
1977}
1978
1979#endif
1980
907aed48
MG
1981static inline void tsk_restore_flags(struct task_struct *task,
1982 unsigned long orig_flags, unsigned long flags)
1983{
1984 task->flags &= ~flags;
1985 task->flags |= orig_flags & flags;
1986}
1987
1da177e4 1988#ifdef CONFIG_SMP
1e1b6c51
KM
1989extern void do_set_cpus_allowed(struct task_struct *p,
1990 const struct cpumask *new_mask);
1991
cd8ba7cd 1992extern int set_cpus_allowed_ptr(struct task_struct *p,
96f874e2 1993 const struct cpumask *new_mask);
1da177e4 1994#else
1e1b6c51
KM
1995static inline void do_set_cpus_allowed(struct task_struct *p,
1996 const struct cpumask *new_mask)
1997{
1998}
cd8ba7cd 1999static inline int set_cpus_allowed_ptr(struct task_struct *p,
96f874e2 2000 const struct cpumask *new_mask)
1da177e4 2001{
96f874e2 2002 if (!cpumask_test_cpu(0, new_mask))
1da177e4
LT
2003 return -EINVAL;
2004 return 0;
2005}
2006#endif
e0ad9556 2007
3451d024 2008#ifdef CONFIG_NO_HZ_COMMON
5167e8d5
PZ
2009void calc_load_enter_idle(void);
2010void calc_load_exit_idle(void);
2011#else
2012static inline void calc_load_enter_idle(void) { }
2013static inline void calc_load_exit_idle(void) { }
3451d024 2014#endif /* CONFIG_NO_HZ_COMMON */
5167e8d5 2015
e0ad9556 2016#ifndef CONFIG_CPUMASK_OFFSTACK
cd8ba7cd
MT
2017static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
2018{
2019 return set_cpus_allowed_ptr(p, &new_mask);
2020}
e0ad9556 2021#endif
1da177e4 2022
b342501c 2023/*
c676329a
PZ
2024 * Do not use outside of architecture code which knows its limitations.
2025 *
2026 * sched_clock() has no promise of monotonicity or bounded drift between
2027 * CPUs, use (which you should not) requires disabling IRQs.
2028 *
2029 * Please use one of the three interfaces below.
b342501c 2030 */
1bbfa6f2 2031extern unsigned long long notrace sched_clock(void);
c676329a 2032/*
489a71b0 2033 * See the comment in kernel/sched/clock.c
c676329a
PZ
2034 */
2035extern u64 cpu_clock(int cpu);
2036extern u64 local_clock(void);
2037extern u64 sched_clock_cpu(int cpu);
2038
e436d800 2039
c1955a3d 2040extern void sched_clock_init(void);
3e51f33f 2041
c1955a3d 2042#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
3e51f33f
PZ
2043static inline void sched_clock_tick(void)
2044{
2045}
2046
2047static inline void sched_clock_idle_sleep_event(void)
2048{
2049}
2050
2051static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2052{
2053}
2054#else
c676329a
PZ
2055/*
2056 * Architectures can set this to 1 if they have specified
2057 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
2058 * but then during bootup it turns out that sched_clock()
2059 * is reliable after all:
2060 */
35af99e6
PZ
2061extern int sched_clock_stable(void);
2062extern void set_sched_clock_stable(void);
2063extern void clear_sched_clock_stable(void);
c676329a 2064
3e51f33f
PZ
2065extern void sched_clock_tick(void);
2066extern void sched_clock_idle_sleep_event(void);
2067extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2068#endif
2069
b52bfee4
VP
2070#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2071/*
2072 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
2073 * The reason for this explicit opt-in is not to have perf penalty with
2074 * slow sched_clocks.
2075 */
2076extern void enable_sched_clock_irqtime(void);
2077extern void disable_sched_clock_irqtime(void);
2078#else
2079static inline void enable_sched_clock_irqtime(void) {}
2080static inline void disable_sched_clock_irqtime(void) {}
2081#endif
2082
36c8b586 2083extern unsigned long long
41b86e9c 2084task_sched_runtime(struct task_struct *task);
1da177e4
LT
2085
2086/* sched_exec is called by processes performing an exec */
2087#ifdef CONFIG_SMP
2088extern void sched_exec(void);
2089#else
2090#define sched_exec() {}
2091#endif
2092
2aa44d05
IM
2093extern void sched_clock_idle_sleep_event(void);
2094extern void sched_clock_idle_wakeup_event(u64 delta_ns);
bb29ab26 2095
1da177e4
LT
2096#ifdef CONFIG_HOTPLUG_CPU
2097extern void idle_task_exit(void);
2098#else
2099static inline void idle_task_exit(void) {}
2100#endif
2101
3451d024 2102#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
1c20091e 2103extern void wake_up_nohz_cpu(int cpu);
06d8308c 2104#else
1c20091e 2105static inline void wake_up_nohz_cpu(int cpu) { }
06d8308c
TG
2106#endif
2107
ce831b38
FW
2108#ifdef CONFIG_NO_HZ_FULL
2109extern bool sched_can_stop_tick(void);
265f22a9 2110extern u64 scheduler_tick_max_deferment(void);
ce831b38
FW
2111#else
2112static inline bool sched_can_stop_tick(void) { return false; }
06d8308c
TG
2113#endif
2114
5091faa4 2115#ifdef CONFIG_SCHED_AUTOGROUP
5091faa4
MG
2116extern void sched_autogroup_create_attach(struct task_struct *p);
2117extern void sched_autogroup_detach(struct task_struct *p);
2118extern void sched_autogroup_fork(struct signal_struct *sig);
2119extern void sched_autogroup_exit(struct signal_struct *sig);
2120#ifdef CONFIG_PROC_FS
2121extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2e5b5b3a 2122extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
5091faa4
MG
2123#endif
2124#else
2125static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2126static inline void sched_autogroup_detach(struct task_struct *p) { }
2127static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2128static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2129#endif
2130
d95f4122 2131extern bool yield_to(struct task_struct *p, bool preempt);
36c8b586
IM
2132extern void set_user_nice(struct task_struct *p, long nice);
2133extern int task_prio(const struct task_struct *p);
d0ea0268
DY
2134/**
2135 * task_nice - return the nice value of a given task.
2136 * @p: the task in question.
2137 *
2138 * Return: The nice value [ -20 ... 0 ... 19 ].
2139 */
2140static inline int task_nice(const struct task_struct *p)
2141{
2142 return PRIO_TO_NICE((p)->static_prio);
2143}
36c8b586
IM
2144extern int can_nice(const struct task_struct *p, const int nice);
2145extern int task_curr(const struct task_struct *p);
1da177e4 2146extern int idle_cpu(int cpu);
fe7de49f
KM
2147extern int sched_setscheduler(struct task_struct *, int,
2148 const struct sched_param *);
961ccddd 2149extern int sched_setscheduler_nocheck(struct task_struct *, int,
fe7de49f 2150 const struct sched_param *);
d50dde5a
DF
2151extern int sched_setattr(struct task_struct *,
2152 const struct sched_attr *);
36c8b586 2153extern struct task_struct *idle_task(int cpu);
c4f30608
PM
2154/**
2155 * is_idle_task - is the specified task an idle task?
fa757281 2156 * @p: the task in question.
e69f6186
YB
2157 *
2158 * Return: 1 if @p is an idle task. 0 otherwise.
c4f30608 2159 */
7061ca3b 2160static inline bool is_idle_task(const struct task_struct *p)
c4f30608
PM
2161{
2162 return p->pid == 0;
2163}
36c8b586
IM
2164extern struct task_struct *curr_task(int cpu);
2165extern void set_curr_task(int cpu, struct task_struct *p);
1da177e4
LT
2166
2167void yield(void);
2168
2169/*
2170 * The default (Linux) execution domain.
2171 */
2172extern struct exec_domain default_exec_domain;
2173
2174union thread_union {
2175 struct thread_info thread_info;
2176 unsigned long stack[THREAD_SIZE/sizeof(long)];
2177};
2178
2179#ifndef __HAVE_ARCH_KSTACK_END
2180static inline int kstack_end(void *addr)
2181{
2182 /* Reliable end of stack detection:
2183 * Some APM bios versions misalign the stack
2184 */
2185 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2186}
2187#endif
2188
2189extern union thread_union init_thread_union;
2190extern struct task_struct init_task;
2191
2192extern struct mm_struct init_mm;
2193
198fe21b
PE
2194extern struct pid_namespace init_pid_ns;
2195
2196/*
2197 * find a task by one of its numerical ids
2198 *
198fe21b
PE
2199 * find_task_by_pid_ns():
2200 * finds a task by its pid in the specified namespace
228ebcbe
PE
2201 * find_task_by_vpid():
2202 * finds a task by its virtual pid
198fe21b 2203 *
e49859e7 2204 * see also find_vpid() etc in include/linux/pid.h
198fe21b
PE
2205 */
2206
228ebcbe
PE
2207extern struct task_struct *find_task_by_vpid(pid_t nr);
2208extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2209 struct pid_namespace *ns);
198fe21b 2210
1da177e4 2211/* per-UID process charging. */
7b44ab97 2212extern struct user_struct * alloc_uid(kuid_t);
1da177e4
LT
2213static inline struct user_struct *get_uid(struct user_struct *u)
2214{
2215 atomic_inc(&u->__count);
2216 return u;
2217}
2218extern void free_uid(struct user_struct *);
1da177e4
LT
2219
2220#include <asm/current.h>
2221
f0af911a 2222extern void xtime_update(unsigned long ticks);
1da177e4 2223
b3c97528
HH
2224extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2225extern int wake_up_process(struct task_struct *tsk);
3e51e3ed 2226extern void wake_up_new_task(struct task_struct *tsk);
1da177e4
LT
2227#ifdef CONFIG_SMP
2228 extern void kick_process(struct task_struct *tsk);
2229#else
2230 static inline void kick_process(struct task_struct *tsk) { }
2231#endif
aab03e05 2232extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
ad46c2c4 2233extern void sched_dead(struct task_struct *p);
1da177e4 2234
1da177e4
LT
2235extern void proc_caches_init(void);
2236extern void flush_signals(struct task_struct *);
3bcac026 2237extern void __flush_signals(struct task_struct *);
10ab825b 2238extern void ignore_signals(struct task_struct *);
1da177e4
LT
2239extern void flush_signal_handlers(struct task_struct *, int force_default);
2240extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2241
2242static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2243{
2244 unsigned long flags;
2245 int ret;
2246
2247 spin_lock_irqsave(&tsk->sighand->siglock, flags);
2248 ret = dequeue_signal(tsk, mask, info);
2249 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2250
2251 return ret;
53c8f9f1 2252}
1da177e4
LT
2253
2254extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2255 sigset_t *mask);
2256extern void unblock_all_signals(void);
2257extern void release_task(struct task_struct * p);
2258extern int send_sig_info(int, struct siginfo *, struct task_struct *);
1da177e4
LT
2259extern int force_sigsegv(int, struct task_struct *);
2260extern int force_sig_info(int, struct siginfo *, struct task_struct *);
c4b92fc1 2261extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
c4b92fc1 2262extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
d178bc3a
SH
2263extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2264 const struct cred *, u32);
c4b92fc1
EB
2265extern int kill_pgrp(struct pid *pid, int sig, int priv);
2266extern int kill_pid(struct pid *pid, int sig, int priv);
c3de4b38 2267extern int kill_proc_info(int, struct siginfo *, pid_t);
86773473 2268extern __must_check bool do_notify_parent(struct task_struct *, int);
a7f0765e 2269extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
1da177e4 2270extern void force_sig(int, struct task_struct *);
1da177e4 2271extern int send_sig(int, struct task_struct *, int);
09faef11 2272extern int zap_other_threads(struct task_struct *p);
1da177e4
LT
2273extern struct sigqueue *sigqueue_alloc(void);
2274extern void sigqueue_free(struct sigqueue *);
ac5c2153 2275extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
9ac95f2f 2276extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
1da177e4 2277
51a7b448
AV
2278static inline void restore_saved_sigmask(void)
2279{
2280 if (test_and_clear_restore_sigmask())
77097ae5 2281 __set_current_blocked(&current->saved_sigmask);
51a7b448
AV
2282}
2283
b7f9a11a
AV
2284static inline sigset_t *sigmask_to_save(void)
2285{
2286 sigset_t *res = &current->blocked;
2287 if (unlikely(test_restore_sigmask()))
2288 res = &current->saved_sigmask;
2289 return res;
2290}
2291
9ec52099
CLG
2292static inline int kill_cad_pid(int sig, int priv)
2293{
2294 return kill_pid(cad_pid, sig, priv);
2295}
2296
1da177e4
LT
2297/* These can be the second arg to send_sig_info/send_group_sig_info. */
2298#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2299#define SEND_SIG_PRIV ((struct siginfo *) 1)
2300#define SEND_SIG_FORCED ((struct siginfo *) 2)
2301
2a855dd0
SAS
2302/*
2303 * True if we are on the alternate signal stack.
2304 */
1da177e4
LT
2305static inline int on_sig_stack(unsigned long sp)
2306{
2a855dd0
SAS
2307#ifdef CONFIG_STACK_GROWSUP
2308 return sp >= current->sas_ss_sp &&
2309 sp - current->sas_ss_sp < current->sas_ss_size;
2310#else
2311 return sp > current->sas_ss_sp &&
2312 sp - current->sas_ss_sp <= current->sas_ss_size;
2313#endif
1da177e4
LT
2314}
2315
2316static inline int sas_ss_flags(unsigned long sp)
2317{
2318 return (current->sas_ss_size == 0 ? SS_DISABLE
2319 : on_sig_stack(sp) ? SS_ONSTACK : 0);
2320}
2321
5a1b98d3
AV
2322static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2323{
2324 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2325#ifdef CONFIG_STACK_GROWSUP
2326 return current->sas_ss_sp;
2327#else
2328 return current->sas_ss_sp + current->sas_ss_size;
2329#endif
2330 return sp;
2331}
2332
1da177e4
LT
2333/*
2334 * Routines for handling mm_structs
2335 */
2336extern struct mm_struct * mm_alloc(void);
2337
2338/* mmdrop drops the mm and the page tables */
b3c97528 2339extern void __mmdrop(struct mm_struct *);
1da177e4
LT
2340static inline void mmdrop(struct mm_struct * mm)
2341{
6fb43d7b 2342 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
1da177e4
LT
2343 __mmdrop(mm);
2344}
2345
2346/* mmput gets rid of the mappings and all user-space */
2347extern void mmput(struct mm_struct *);
2348/* Grab a reference to a task's mm, if it is not already going away */
2349extern struct mm_struct *get_task_mm(struct task_struct *task);
8cdb878d
CY
2350/*
2351 * Grab a reference to a task's mm, if it is not already going away
2352 * and ptrace_may_access with the mode parameter passed to it
2353 * succeeds.
2354 */
2355extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
1da177e4
LT
2356/* Remove the current tasks stale references to the old mm_struct */
2357extern void mm_release(struct task_struct *, struct mm_struct *);
2358
6f2c55b8 2359extern int copy_thread(unsigned long, unsigned long, unsigned long,
afa86fc4 2360 struct task_struct *);
1da177e4
LT
2361extern void flush_thread(void);
2362extern void exit_thread(void);
2363
1da177e4 2364extern void exit_files(struct task_struct *);
a7e5328a 2365extern void __cleanup_sighand(struct sighand_struct *);
cbaffba1 2366
1da177e4 2367extern void exit_itimers(struct signal_struct *);
cbaffba1 2368extern void flush_itimer_signals(void);
1da177e4 2369
9402c95f 2370extern void do_group_exit(int);
1da177e4 2371
1da177e4
LT
2372extern int allow_signal(int);
2373extern int disallow_signal(int);
1da177e4 2374
c4ad8f98 2375extern int do_execve(struct filename *,
d7627467 2376 const char __user * const __user *,
da3d4c5f 2377 const char __user * const __user *);
e80d6661 2378extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
36c8b586 2379struct task_struct *fork_idle(int);
2aa3a7f8 2380extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
1da177e4 2381
23aebe16 2382extern void set_task_comm(struct task_struct *tsk, const char *from);
59714d65 2383extern char *get_task_comm(char *to, struct task_struct *tsk);
1da177e4
LT
2384
2385#ifdef CONFIG_SMP
317f3941 2386void scheduler_ipi(void);
85ba2d86 2387extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1da177e4 2388#else
184748cc 2389static inline void scheduler_ipi(void) { }
85ba2d86
RM
2390static inline unsigned long wait_task_inactive(struct task_struct *p,
2391 long match_state)
2392{
2393 return 1;
2394}
1da177e4
LT
2395#endif
2396
05725f7e
JP
2397#define next_task(p) \
2398 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
1da177e4
LT
2399
2400#define for_each_process(p) \
2401 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2402
5bb459bb 2403extern bool current_is_single_threaded(void);
d84f4f99 2404
1da177e4
LT
2405/*
2406 * Careful: do_each_thread/while_each_thread is a double loop so
2407 * 'break' will not work as expected - use goto instead.
2408 */
2409#define do_each_thread(g, t) \
2410 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2411
2412#define while_each_thread(g, t) \
2413 while ((t = next_thread(t)) != g)
2414
0c740d0a
ON
2415#define __for_each_thread(signal, t) \
2416 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
2417
2418#define for_each_thread(p, t) \
2419 __for_each_thread((p)->signal, t)
2420
2421/* Careful: this is a double loop, 'break' won't work as expected. */
2422#define for_each_process_thread(p, t) \
2423 for_each_process(p) for_each_thread(p, t)
2424
7e49827c
ON
2425static inline int get_nr_threads(struct task_struct *tsk)
2426{
b3ac022c 2427 return tsk->signal->nr_threads;
7e49827c
ON
2428}
2429
087806b1
ON
2430static inline bool thread_group_leader(struct task_struct *p)
2431{
2432 return p->exit_signal >= 0;
2433}
1da177e4 2434
0804ef4b
EB
2435/* Do to the insanities of de_thread it is possible for a process
2436 * to have the pid of the thread group leader without actually being
2437 * the thread group leader. For iteration through the pids in proc
2438 * all we care about is that we have a task with the appropriate
2439 * pid, we don't actually care if we have the right task.
2440 */
e1403b8e 2441static inline bool has_group_leader_pid(struct task_struct *p)
0804ef4b 2442{
e1403b8e 2443 return task_pid(p) == p->signal->leader_pid;
0804ef4b
EB
2444}
2445
bac0abd6 2446static inline
e1403b8e 2447bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
bac0abd6 2448{
e1403b8e 2449 return p1->signal == p2->signal;
bac0abd6
PE
2450}
2451
36c8b586 2452static inline struct task_struct *next_thread(const struct task_struct *p)
47e65328 2453{
05725f7e
JP
2454 return list_entry_rcu(p->thread_group.next,
2455 struct task_struct, thread_group);
47e65328
ON
2456}
2457
e868171a 2458static inline int thread_group_empty(struct task_struct *p)
1da177e4 2459{
47e65328 2460 return list_empty(&p->thread_group);
1da177e4
LT
2461}
2462
2463#define delay_group_leader(p) \
2464 (thread_group_leader(p) && !thread_group_empty(p))
2465
1da177e4 2466/*
260ea101 2467 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
22e2c507 2468 * subscriptions and synchronises with wait4(). Also used in procfs. Also
ddbcc7e8 2469 * pins the final release of task.io_context. Also protects ->cpuset and
d68b46fe 2470 * ->cgroup.subsys[]. And ->vfork_done.
1da177e4
LT
2471 *
2472 * Nests both inside and outside of read_lock(&tasklist_lock).
2473 * It must not be nested with write_lock_irq(&tasklist_lock),
2474 * neither inside nor outside.
2475 */
2476static inline void task_lock(struct task_struct *p)
2477{
2478 spin_lock(&p->alloc_lock);
2479}
2480
2481static inline void task_unlock(struct task_struct *p)
2482{
2483 spin_unlock(&p->alloc_lock);
2484}
2485
b8ed374e 2486extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
f63ee72e
ON
2487 unsigned long *flags);
2488
9388dc30
AV
2489static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2490 unsigned long *flags)
2491{
2492 struct sighand_struct *ret;
2493
2494 ret = __lock_task_sighand(tsk, flags);
2495 (void)__cond_lock(&tsk->sighand->siglock, ret);
2496 return ret;
2497}
b8ed374e 2498
f63ee72e
ON
2499static inline void unlock_task_sighand(struct task_struct *tsk,
2500 unsigned long *flags)
2501{
2502 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2503}
2504
4714d1d3 2505#ifdef CONFIG_CGROUPS
257058ae 2506static inline void threadgroup_change_begin(struct task_struct *tsk)
4714d1d3 2507{
257058ae 2508 down_read(&tsk->signal->group_rwsem);
4714d1d3 2509}
257058ae 2510static inline void threadgroup_change_end(struct task_struct *tsk)
4714d1d3 2511{
257058ae 2512 up_read(&tsk->signal->group_rwsem);
4714d1d3 2513}
77e4ef99
TH
2514
2515/**
2516 * threadgroup_lock - lock threadgroup
2517 * @tsk: member task of the threadgroup to lock
2518 *
2519 * Lock the threadgroup @tsk belongs to. No new task is allowed to enter
2520 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
e56fb287
ON
2521 * change ->group_leader/pid. This is useful for cases where the threadgroup
2522 * needs to stay stable across blockable operations.
77e4ef99
TH
2523 *
2524 * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
2525 * synchronization. While held, no new task will be added to threadgroup
2526 * and no existing live task will have its PF_EXITING set.
2527 *
e56fb287
ON
2528 * de_thread() does threadgroup_change_{begin|end}() when a non-leader
2529 * sub-thread becomes a new leader.
77e4ef99 2530 */
257058ae 2531static inline void threadgroup_lock(struct task_struct *tsk)
4714d1d3 2532{
257058ae 2533 down_write(&tsk->signal->group_rwsem);
4714d1d3 2534}
77e4ef99
TH
2535
2536/**
2537 * threadgroup_unlock - unlock threadgroup
2538 * @tsk: member task of the threadgroup to unlock
2539 *
2540 * Reverse threadgroup_lock().
2541 */
257058ae 2542static inline void threadgroup_unlock(struct task_struct *tsk)
4714d1d3 2543{
257058ae 2544 up_write(&tsk->signal->group_rwsem);
4714d1d3
BB
2545}
2546#else
257058ae
TH
2547static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2548static inline void threadgroup_change_end(struct task_struct *tsk) {}
2549static inline void threadgroup_lock(struct task_struct *tsk) {}
2550static inline void threadgroup_unlock(struct task_struct *tsk) {}
4714d1d3
BB
2551#endif
2552
f037360f
AV
2553#ifndef __HAVE_THREAD_FUNCTIONS
2554
f7e4217b
RZ
2555#define task_thread_info(task) ((struct thread_info *)(task)->stack)
2556#define task_stack_page(task) ((task)->stack)
a1261f54 2557
10ebffde
AV
2558static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2559{
2560 *task_thread_info(p) = *task_thread_info(org);
2561 task_thread_info(p)->task = p;
2562}
2563
2564static inline unsigned long *end_of_stack(struct task_struct *p)
2565{
f7e4217b 2566 return (unsigned long *)(task_thread_info(p) + 1);
10ebffde
AV
2567}
2568
f037360f
AV
2569#endif
2570
8b05c7e6
FT
2571static inline int object_is_on_stack(void *obj)
2572{
2573 void *stack = task_stack_page(current);
2574
2575 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2576}
2577
8c9843e5
BH
2578extern void thread_info_cache_init(void);
2579
7c9f8861
ES
2580#ifdef CONFIG_DEBUG_STACK_USAGE
2581static inline unsigned long stack_not_used(struct task_struct *p)
2582{
2583 unsigned long *n = end_of_stack(p);
2584
2585 do { /* Skip over canary */
2586 n++;
2587 } while (!*n);
2588
2589 return (unsigned long)n - (unsigned long)end_of_stack(p);
2590}
2591#endif
2592
1da177e4
LT
2593/* set thread flags in other task's structures
2594 * - see asm/thread_info.h for TIF_xxxx flags available
2595 */
2596static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2597{
a1261f54 2598 set_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
2599}
2600
2601static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2602{
a1261f54 2603 clear_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
2604}
2605
2606static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2607{
a1261f54 2608 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
2609}
2610
2611static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2612{
a1261f54 2613 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
2614}
2615
2616static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2617{
a1261f54 2618 return test_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
2619}
2620
2621static inline void set_tsk_need_resched(struct task_struct *tsk)
2622{
2623 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2624}
2625
2626static inline void clear_tsk_need_resched(struct task_struct *tsk)
2627{
2628 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2629}
2630
8ae121ac
GH
2631static inline int test_tsk_need_resched(struct task_struct *tsk)
2632{
2633 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2634}
2635
690cc3ff
EB
2636static inline int restart_syscall(void)
2637{
2638 set_tsk_thread_flag(current, TIF_SIGPENDING);
2639 return -ERESTARTNOINTR;
2640}
2641
1da177e4
LT
2642static inline int signal_pending(struct task_struct *p)
2643{
2644 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2645}
f776d12d 2646
d9588725
RM
2647static inline int __fatal_signal_pending(struct task_struct *p)
2648{
2649 return unlikely(sigismember(&p->pending.signal, SIGKILL));
2650}
f776d12d
MW
2651
2652static inline int fatal_signal_pending(struct task_struct *p)
2653{
2654 return signal_pending(p) && __fatal_signal_pending(p);
2655}
2656
16882c1e
ON
2657static inline int signal_pending_state(long state, struct task_struct *p)
2658{
2659 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2660 return 0;
2661 if (!signal_pending(p))
2662 return 0;
2663
16882c1e
ON
2664 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2665}
2666
1da177e4
LT
2667/*
2668 * cond_resched() and cond_resched_lock(): latency reduction via
2669 * explicit rescheduling in places that are safe. The return
2670 * value indicates whether a reschedule was done in fact.
2671 * cond_resched_lock() will drop the spinlock before scheduling,
2672 * cond_resched_softirq() will enable bhs before scheduling.
2673 */
c3921ab7 2674extern int _cond_resched(void);
6f80bd98 2675
613afbf8
FW
2676#define cond_resched() ({ \
2677 __might_sleep(__FILE__, __LINE__, 0); \
2678 _cond_resched(); \
2679})
6f80bd98 2680
613afbf8
FW
2681extern int __cond_resched_lock(spinlock_t *lock);
2682
bdd4e85d 2683#ifdef CONFIG_PREEMPT_COUNT
716a4234 2684#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
02b67cc3 2685#else
716a4234 2686#define PREEMPT_LOCK_OFFSET 0
02b67cc3 2687#endif
716a4234 2688
613afbf8 2689#define cond_resched_lock(lock) ({ \
716a4234 2690 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
613afbf8
FW
2691 __cond_resched_lock(lock); \
2692})
2693
2694extern int __cond_resched_softirq(void);
2695
75e1056f
VP
2696#define cond_resched_softirq() ({ \
2697 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2698 __cond_resched_softirq(); \
613afbf8 2699})
1da177e4 2700
f6f3c437
SH
2701static inline void cond_resched_rcu(void)
2702{
2703#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2704 rcu_read_unlock();
2705 cond_resched();
2706 rcu_read_lock();
2707#endif
2708}
2709
1da177e4
LT
2710/*
2711 * Does a critical section need to be broken due to another
95c354fe
NP
2712 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2713 * but a general need for low latency)
1da177e4 2714 */
95c354fe 2715static inline int spin_needbreak(spinlock_t *lock)
1da177e4 2716{
95c354fe
NP
2717#ifdef CONFIG_PREEMPT
2718 return spin_is_contended(lock);
2719#else
1da177e4 2720 return 0;
95c354fe 2721#endif
1da177e4
LT
2722}
2723
ee761f62
TG
2724/*
2725 * Idle thread specific functions to determine the need_resched
2726 * polling state. We have two versions, one based on TS_POLLING in
2727 * thread_info.status and one based on TIF_POLLING_NRFLAG in
2728 * thread_info.flags
2729 */
2730#ifdef TS_POLLING
2731static inline int tsk_is_polling(struct task_struct *p)
2732{
2733 return task_thread_info(p)->status & TS_POLLING;
2734}
ea811747 2735static inline void __current_set_polling(void)
3a98f871
TG
2736{
2737 current_thread_info()->status |= TS_POLLING;
2738}
2739
ea811747
PZ
2740static inline bool __must_check current_set_polling_and_test(void)
2741{
2742 __current_set_polling();
2743
2744 /*
2745 * Polling state must be visible before we test NEED_RESCHED,
2746 * paired by resched_task()
2747 */
2748 smp_mb();
2749
2750 return unlikely(tif_need_resched());
2751}
2752
2753static inline void __current_clr_polling(void)
3a98f871
TG
2754{
2755 current_thread_info()->status &= ~TS_POLLING;
ea811747
PZ
2756}
2757
2758static inline bool __must_check current_clr_polling_and_test(void)
2759{
2760 __current_clr_polling();
2761
2762 /*
2763 * Polling state must be visible before we test NEED_RESCHED,
2764 * paired by resched_task()
2765 */
2766 smp_mb();
2767
2768 return unlikely(tif_need_resched());
3a98f871 2769}
ee761f62
TG
2770#elif defined(TIF_POLLING_NRFLAG)
2771static inline int tsk_is_polling(struct task_struct *p)
2772{
2773 return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2774}
ea811747
PZ
2775
2776static inline void __current_set_polling(void)
3a98f871
TG
2777{
2778 set_thread_flag(TIF_POLLING_NRFLAG);
2779}
2780
ea811747
PZ
2781static inline bool __must_check current_set_polling_and_test(void)
2782{
2783 __current_set_polling();
2784
2785 /*
2786 * Polling state must be visible before we test NEED_RESCHED,
2787 * paired by resched_task()
2788 *
2789 * XXX: assumes set/clear bit are identical barrier wise.
2790 */
2791 smp_mb__after_clear_bit();
2792
2793 return unlikely(tif_need_resched());
2794}
2795
2796static inline void __current_clr_polling(void)
3a98f871
TG
2797{
2798 clear_thread_flag(TIF_POLLING_NRFLAG);
2799}
ea811747
PZ
2800
2801static inline bool __must_check current_clr_polling_and_test(void)
2802{
2803 __current_clr_polling();
2804
2805 /*
2806 * Polling state must be visible before we test NEED_RESCHED,
2807 * paired by resched_task()
2808 */
2809 smp_mb__after_clear_bit();
2810
2811 return unlikely(tif_need_resched());
2812}
2813
ee761f62
TG
2814#else
2815static inline int tsk_is_polling(struct task_struct *p) { return 0; }
ea811747
PZ
2816static inline void __current_set_polling(void) { }
2817static inline void __current_clr_polling(void) { }
2818
2819static inline bool __must_check current_set_polling_and_test(void)
2820{
2821 return unlikely(tif_need_resched());
2822}
2823static inline bool __must_check current_clr_polling_and_test(void)
2824{
2825 return unlikely(tif_need_resched());
2826}
ee761f62
TG
2827#endif
2828
8cb75e0c
PZ
2829static inline void current_clr_polling(void)
2830{
2831 __current_clr_polling();
2832
2833 /*
2834 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
2835 * Once the bit is cleared, we'll get IPIs with every new
2836 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
2837 * fold.
2838 */
2839 smp_mb(); /* paired with resched_task() */
2840
2841 preempt_fold_need_resched();
2842}
2843
75f93fed
PZ
2844static __always_inline bool need_resched(void)
2845{
2846 return unlikely(tif_need_resched());
2847}
2848
f06febc9
FM
2849/*
2850 * Thread group CPU time accounting.
2851 */
4cd4c1b4 2852void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
4da94d49 2853void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
f06febc9 2854
490dea45 2855static inline void thread_group_cputime_init(struct signal_struct *sig)
f06febc9 2856{
ee30a7b2 2857 raw_spin_lock_init(&sig->cputimer.lock);
f06febc9
FM
2858}
2859
7bb44ade
RM
2860/*
2861 * Reevaluate whether the task has signals pending delivery.
2862 * Wake the task if so.
2863 * This is required every time the blocked sigset_t changes.
2864 * callers must hold sighand->siglock.
2865 */
2866extern void recalc_sigpending_and_wake(struct task_struct *t);
1da177e4
LT
2867extern void recalc_sigpending(void);
2868
910ffdb1
ON
2869extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2870
2871static inline void signal_wake_up(struct task_struct *t, bool resume)
2872{
2873 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2874}
2875static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2876{
2877 signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2878}
1da177e4
LT
2879
2880/*
2881 * Wrappers for p->thread_info->cpu access. No-op on UP.
2882 */
2883#ifdef CONFIG_SMP
2884
2885static inline unsigned int task_cpu(const struct task_struct *p)
2886{
a1261f54 2887 return task_thread_info(p)->cpu;
1da177e4
LT
2888}
2889
b32e86b4
IM
2890static inline int task_node(const struct task_struct *p)
2891{
2892 return cpu_to_node(task_cpu(p));
2893}
2894
c65cc870 2895extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
1da177e4
LT
2896
2897#else
2898
2899static inline unsigned int task_cpu(const struct task_struct *p)
2900{
2901 return 0;
2902}
2903
2904static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2905{
2906}
2907
2908#endif /* CONFIG_SMP */
2909
96f874e2
RR
2910extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2911extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
5c45bf27 2912
7c941438 2913#ifdef CONFIG_CGROUP_SCHED
07e06b01 2914extern struct task_group root_task_group;
8323f26c 2915#endif /* CONFIG_CGROUP_SCHED */
9b5b7751 2916
54e99124
DG
2917extern int task_can_switch_user(struct user_struct *up,
2918 struct task_struct *tsk);
2919
4b98d11b
AD
2920#ifdef CONFIG_TASK_XACCT
2921static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2922{
940389b8 2923 tsk->ioac.rchar += amt;
4b98d11b
AD
2924}
2925
2926static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2927{
940389b8 2928 tsk->ioac.wchar += amt;
4b98d11b
AD
2929}
2930
2931static inline void inc_syscr(struct task_struct *tsk)
2932{
940389b8 2933 tsk->ioac.syscr++;
4b98d11b
AD
2934}
2935
2936static inline void inc_syscw(struct task_struct *tsk)
2937{
940389b8 2938 tsk->ioac.syscw++;
4b98d11b
AD
2939}
2940#else
2941static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2942{
2943}
2944
2945static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2946{
2947}
2948
2949static inline void inc_syscr(struct task_struct *tsk)
2950{
2951}
2952
2953static inline void inc_syscw(struct task_struct *tsk)
2954{
2955}
2956#endif
2957
82455257
DH
2958#ifndef TASK_SIZE_OF
2959#define TASK_SIZE_OF(tsk) TASK_SIZE
2960#endif
2961
cf475ad2
BS
2962#ifdef CONFIG_MM_OWNER
2963extern void mm_update_next_owner(struct mm_struct *mm);
2964extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2965#else
2966static inline void mm_update_next_owner(struct mm_struct *mm)
2967{
2968}
2969
2970static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2971{
2972}
2973#endif /* CONFIG_MM_OWNER */
2974
3e10e716
JS
2975static inline unsigned long task_rlimit(const struct task_struct *tsk,
2976 unsigned int limit)
2977{
2978 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
2979}
2980
2981static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2982 unsigned int limit)
2983{
2984 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
2985}
2986
2987static inline unsigned long rlimit(unsigned int limit)
2988{
2989 return task_rlimit(current, limit);
2990}
2991
2992static inline unsigned long rlimit_max(unsigned int limit)
2993{
2994 return task_rlimit_max(current, limit);
2995}
2996
1da177e4 2997#endif