Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * Implement CPU time clocks for the POSIX clock interface. | |
4 | */ | |
5 | ||
3f07c014 | 6 | #include <linux/sched/signal.h> |
32ef5517 | 7 | #include <linux/sched/cputime.h> |
1da177e4 | 8 | #include <linux/posix-timers.h> |
1da177e4 | 9 | #include <linux/errno.h> |
f8bd2258 | 10 | #include <linux/math64.h> |
7c0f6ba6 | 11 | #include <linux/uaccess.h> |
bb34d92f | 12 | #include <linux/kernel_stat.h> |
3f0a525e | 13 | #include <trace/events/timer.h> |
a8572160 FW |
14 | #include <linux/tick.h> |
15 | #include <linux/workqueue.h> | |
edbeda46 | 16 | #include <linux/compat.h> |
34be3930 | 17 | #include <linux/sched/deadline.h> |
8ca07e17 | 18 | #include <linux/task_work.h> |
1da177e4 | 19 | |
bab0aae9 TG |
20 | #include "posix-timers.h" |
21 | ||
f37fb0aa TG |
22 | static void posix_cpu_timer_rearm(struct k_itimer *timer); |
23 | ||
3a245c0f TG |
24 | void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit) |
25 | { | |
26 | posix_cputimers_init(pct); | |
244d49e3 | 27 | if (cpu_limit != RLIM_INFINITY) { |
87dc6448 | 28 | pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC; |
244d49e3 TG |
29 | pct->timers_active = true; |
30 | } | |
3a245c0f TG |
31 | } |
32 | ||
f06febc9 | 33 | /* |
f55db609 | 34 | * Called after updating RLIMIT_CPU to run cpu timer and update |
87dc6448 TG |
35 | * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if |
36 | * necessary. Needs siglock protection since other code may update the | |
3a245c0f | 37 | * expiration cache as well. |
18c91bb2 BR |
38 | * |
39 | * Returns 0 on success, -ESRCH on failure. Can fail if the task is exiting and | |
40 | * we cannot lock_task_sighand. Cannot fail if task is current. | |
f06febc9 | 41 | */ |
18c91bb2 | 42 | int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new) |
f06febc9 | 43 | { |
858cf3a8 | 44 | u64 nsecs = rlim_new * NSEC_PER_SEC; |
18c91bb2 | 45 | unsigned long irq_fl; |
f06febc9 | 46 | |
18c91bb2 BR |
47 | if (!lock_task_sighand(task, &irq_fl)) |
48 | return -ESRCH; | |
858cf3a8 | 49 | set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL); |
18c91bb2 BR |
50 | unlock_task_sighand(task, &irq_fl); |
51 | return 0; | |
f06febc9 FM |
52 | } |
53 | ||
6ae40e3f TG |
54 | /* |
55 | * Functions for validating access to tasks. | |
56 | */ | |
96498773 | 57 | static struct pid *pid_for_clock(const clockid_t clock, bool gettime) |
1da177e4 | 58 | { |
96498773 EB |
59 | const bool thread = !!CPUCLOCK_PERTHREAD(clock); |
60 | const pid_t upid = CPUCLOCK_PID(clock); | |
61 | struct pid *pid; | |
62 | ||
63 | if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX) | |
64 | return NULL; | |
1da177e4 | 65 | |
77b4b542 TG |
66 | /* |
67 | * If the encoded PID is 0, then the timer is targeted at current | |
68 | * or the process to which current belongs. | |
69 | */ | |
96498773 EB |
70 | if (upid == 0) |
71 | return thread ? task_pid(current) : task_tgid(current); | |
1da177e4 | 72 | |
96498773 EB |
73 | pid = find_vpid(upid); |
74 | if (!pid) | |
75 | return NULL; | |
77b4b542 | 76 | |
96498773 EB |
77 | if (thread) { |
78 | struct task_struct *tsk = pid_task(pid, PIDTYPE_PID); | |
79 | return (tsk && same_thread_group(tsk, current)) ? pid : NULL; | |
80 | } | |
77b4b542 | 81 | |
c7f51940 | 82 | /* |
96498773 EB |
83 | * For clock_gettime(PROCESS) allow finding the process by |
84 | * with the pid of the current task. The code needs the tgid | |
85 | * of the process so that pid_task(pid, PIDTYPE_TGID) can be | |
86 | * used to find the process. | |
c7f51940 | 87 | */ |
96498773 EB |
88 | if (gettime && (pid == task_pid(current))) |
89 | return task_tgid(current); | |
77b4b542 TG |
90 | |
91 | /* | |
96498773 | 92 | * For processes require that pid identifies a process. |
77b4b542 | 93 | */ |
96498773 | 94 | return pid_has_task(pid, PIDTYPE_TGID) ? pid : NULL; |
6ae40e3f TG |
95 | } |
96 | ||
97 | static inline int validate_clock_permissions(const clockid_t clock) | |
98 | { | |
9bf7c324 EB |
99 | int ret; |
100 | ||
101 | rcu_read_lock(); | |
96498773 | 102 | ret = pid_for_clock(clock, false) ? 0 : -EINVAL; |
9bf7c324 EB |
103 | rcu_read_unlock(); |
104 | ||
105 | return ret; | |
1da177e4 LT |
106 | } |
107 | ||
fece9826 | 108 | static inline enum pid_type clock_pid_type(const clockid_t clock) |
55e8c8eb | 109 | { |
fece9826 | 110 | return CPUCLOCK_PERTHREAD(clock) ? PIDTYPE_PID : PIDTYPE_TGID; |
55e8c8eb EB |
111 | } |
112 | ||
113 | static inline struct task_struct *cpu_timer_task_rcu(struct k_itimer *timer) | |
114 | { | |
fece9826 | 115 | return pid_task(timer->it.cpu.pid, clock_pid_type(timer->it_clock)); |
55e8c8eb EB |
116 | } |
117 | ||
1da177e4 LT |
118 | /* |
119 | * Update expiry time from increment, and increase overrun count, | |
120 | * given the current clock sample. | |
121 | */ | |
60bda037 | 122 | static u64 bump_cpu_timer(struct k_itimer *timer, u64 now) |
1da177e4 | 123 | { |
60bda037 | 124 | u64 delta, incr, expires = timer->it.cpu.node.expires; |
1da177e4 LT |
125 | int i; |
126 | ||
16118794 | 127 | if (!timer->it_interval) |
60bda037 | 128 | return expires; |
1da177e4 | 129 | |
60bda037 TG |
130 | if (now < expires) |
131 | return expires; | |
1da177e4 | 132 | |
16118794 | 133 | incr = timer->it_interval; |
60bda037 | 134 | delta = now + incr - expires; |
1da177e4 | 135 | |
55ccb616 FW |
136 | /* Don't use (incr*2 < delta), incr*2 might overflow. */ |
137 | for (i = 0; incr < delta - incr; i++) | |
138 | incr = incr << 1; | |
139 | ||
140 | for (; i >= 0; incr >>= 1, i--) { | |
141 | if (delta < incr) | |
142 | continue; | |
143 | ||
60bda037 | 144 | timer->it.cpu.node.expires += incr; |
78c9c4df | 145 | timer->it_overrun += 1LL << i; |
55ccb616 | 146 | delta -= incr; |
1da177e4 | 147 | } |
60bda037 | 148 | return timer->it.cpu.node.expires; |
1da177e4 LT |
149 | } |
150 | ||
2bbdbdae TG |
151 | /* Check whether all cache entries contain U64_MAX, i.e. eternal expiry time */ |
152 | static inline bool expiry_cache_is_inactive(const struct posix_cputimers *pct) | |
555347f6 | 153 | { |
2bbdbdae TG |
154 | return !(~pct->bases[CPUCLOCK_PROF].nextevt | |
155 | ~pct->bases[CPUCLOCK_VIRT].nextevt | | |
156 | ~pct->bases[CPUCLOCK_SCHED].nextevt); | |
555347f6 FW |
157 | } |
158 | ||
bc2c8ea4 | 159 | static int |
d2e3e0ca | 160 | posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp) |
1da177e4 | 161 | { |
6ae40e3f TG |
162 | int error = validate_clock_permissions(which_clock); |
163 | ||
1da177e4 LT |
164 | if (!error) { |
165 | tp->tv_sec = 0; | |
166 | tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ); | |
167 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | |
168 | /* | |
169 | * If sched_clock is using a cycle counter, we | |
170 | * don't have any idea of its true resolution | |
171 | * exported, but it is much more than 1s/HZ. | |
172 | */ | |
173 | tp->tv_nsec = 1; | |
174 | } | |
175 | } | |
176 | return error; | |
177 | } | |
178 | ||
bc2c8ea4 | 179 | static int |
6ae40e3f | 180 | posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp) |
1da177e4 | 181 | { |
6ae40e3f TG |
182 | int error = validate_clock_permissions(clock); |
183 | ||
1da177e4 LT |
184 | /* |
185 | * You can never reset a CPU clock, but we check for other errors | |
186 | * in the call before failing with EPERM. | |
187 | */ | |
6ae40e3f | 188 | return error ? : -EPERM; |
1da177e4 LT |
189 | } |
190 | ||
1da177e4 | 191 | /* |
2092c1d4 | 192 | * Sample a per-thread clock for the given task. clkid is validated. |
1da177e4 | 193 | */ |
8c2d74f0 | 194 | static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p) |
1da177e4 | 195 | { |
ab693c5a TG |
196 | u64 utime, stime; |
197 | ||
198 | if (clkid == CPUCLOCK_SCHED) | |
199 | return task_sched_runtime(p); | |
200 | ||
201 | task_cputime(p, &utime, &stime); | |
202 | ||
2092c1d4 | 203 | switch (clkid) { |
1da177e4 | 204 | case CPUCLOCK_PROF: |
ab693c5a | 205 | return utime + stime; |
1da177e4 | 206 | case CPUCLOCK_VIRT: |
ab693c5a | 207 | return utime; |
2092c1d4 TG |
208 | default: |
209 | WARN_ON_ONCE(1); | |
1da177e4 | 210 | } |
8c2d74f0 | 211 | return 0; |
1da177e4 LT |
212 | } |
213 | ||
b0d524f7 TG |
214 | static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime) |
215 | { | |
216 | samples[CPUCLOCK_PROF] = stime + utime; | |
217 | samples[CPUCLOCK_VIRT] = utime; | |
218 | samples[CPUCLOCK_SCHED] = rtime; | |
219 | } | |
220 | ||
221 | static void task_sample_cputime(struct task_struct *p, u64 *samples) | |
222 | { | |
223 | u64 stime, utime; | |
224 | ||
225 | task_cputime(p, &utime, &stime); | |
226 | store_samples(samples, stime, utime, p->se.sum_exec_runtime); | |
227 | } | |
228 | ||
229 | static void proc_sample_cputime_atomic(struct task_cputime_atomic *at, | |
230 | u64 *samples) | |
231 | { | |
232 | u64 stime, utime, rtime; | |
233 | ||
234 | utime = atomic64_read(&at->utime); | |
235 | stime = atomic64_read(&at->stime); | |
236 | rtime = atomic64_read(&at->sum_exec_runtime); | |
237 | store_samples(samples, stime, utime, rtime); | |
238 | } | |
239 | ||
1018016c JL |
240 | /* |
241 | * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg | |
242 | * to avoid race conditions with concurrent updates to cputime. | |
243 | */ | |
244 | static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime) | |
4da94d49 | 245 | { |
1018016c JL |
246 | u64 curr_cputime; |
247 | retry: | |
248 | curr_cputime = atomic64_read(cputime); | |
249 | if (sum_cputime > curr_cputime) { | |
250 | if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime) | |
251 | goto retry; | |
252 | } | |
253 | } | |
4da94d49 | 254 | |
b7be4ef1 TG |
255 | static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, |
256 | struct task_cputime *sum) | |
1018016c | 257 | { |
71107445 JL |
258 | __update_gt_cputime(&cputime_atomic->utime, sum->utime); |
259 | __update_gt_cputime(&cputime_atomic->stime, sum->stime); | |
260 | __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime); | |
1018016c | 261 | } |
4da94d49 | 262 | |
19298fbf TG |
263 | /** |
264 | * thread_group_sample_cputime - Sample cputime for a given task | |
265 | * @tsk: Task for which cputime needs to be started | |
7f2cbcbc | 266 | * @samples: Storage for time samples |
19298fbf TG |
267 | * |
268 | * Called from sys_getitimer() to calculate the expiry time of an active | |
269 | * timer. That means group cputime accounting is already active. Called | |
270 | * with task sighand lock held. | |
271 | * | |
272 | * Updates @times with an uptodate sample of the thread group cputimes. | |
273 | */ | |
b7be4ef1 | 274 | void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples) |
19298fbf TG |
275 | { |
276 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | |
244d49e3 | 277 | struct posix_cputimers *pct = &tsk->signal->posix_cputimers; |
19298fbf | 278 | |
244d49e3 | 279 | WARN_ON_ONCE(!pct->timers_active); |
19298fbf | 280 | |
b7be4ef1 | 281 | proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples); |
19298fbf TG |
282 | } |
283 | ||
c506bef4 TG |
284 | /** |
285 | * thread_group_start_cputime - Start cputime and return a sample | |
286 | * @tsk: Task for which cputime needs to be started | |
b7be4ef1 | 287 | * @samples: Storage for time samples |
c506bef4 | 288 | * |
4bf07f65 | 289 | * The thread group cputime accounting is avoided when there are no posix |
c506bef4 TG |
290 | * CPU timers armed. Before starting a timer it's required to check whether |
291 | * the time accounting is active. If not, a full update of the atomic | |
292 | * accounting store needs to be done and the accounting enabled. | |
293 | * | |
294 | * Updates @times with an uptodate sample of the thread group cputimes. | |
295 | */ | |
b7be4ef1 | 296 | static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples) |
4da94d49 PZ |
297 | { |
298 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | |
244d49e3 | 299 | struct posix_cputimers *pct = &tsk->signal->posix_cputimers; |
4da94d49 | 300 | |
a5dec9f8 FW |
301 | lockdep_assert_task_sighand_held(tsk); |
302 | ||
1018016c | 303 | /* Check if cputimer isn't running. This is accessed without locking. */ |
244d49e3 | 304 | if (!READ_ONCE(pct->timers_active)) { |
b7be4ef1 TG |
305 | struct task_cputime sum; |
306 | ||
4da94d49 PZ |
307 | /* |
308 | * The POSIX timer interface allows for absolute time expiry | |
309 | * values through the TIMER_ABSTIME flag, therefore we have | |
1018016c | 310 | * to synchronize the timer to the clock every time we start it. |
4da94d49 | 311 | */ |
ebd7e7fc | 312 | thread_group_cputime(tsk, &sum); |
71107445 | 313 | update_gt_cputime(&cputimer->cputime_atomic, &sum); |
1018016c JL |
314 | |
315 | /* | |
244d49e3 TG |
316 | * We're setting timers_active without a lock. Ensure this |
317 | * only gets written to in one operation. We set it after | |
318 | * update_gt_cputime() as a small optimization, but | |
319 | * barriers are not required because update_gt_cputime() | |
1018016c JL |
320 | * can handle concurrent updates. |
321 | */ | |
244d49e3 | 322 | WRITE_ONCE(pct->timers_active, true); |
1018016c | 323 | } |
b7be4ef1 TG |
324 | proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples); |
325 | } | |
326 | ||
327 | static void __thread_group_cputime(struct task_struct *tsk, u64 *samples) | |
328 | { | |
329 | struct task_cputime ct; | |
330 | ||
331 | thread_group_cputime(tsk, &ct); | |
332 | store_samples(samples, ct.stime, ct.utime, ct.sum_exec_runtime); | |
4da94d49 PZ |
333 | } |
334 | ||
1da177e4 | 335 | /* |
24ab7f5a TG |
336 | * Sample a process (thread group) clock for the given task clkid. If the |
337 | * group's cputime accounting is already enabled, read the atomic | |
a2efdbf4 | 338 | * store. Otherwise a full update is required. clkid is already validated. |
1da177e4 | 339 | */ |
8c2d74f0 TG |
340 | static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p, |
341 | bool start) | |
1da177e4 | 342 | { |
24ab7f5a | 343 | struct thread_group_cputimer *cputimer = &p->signal->cputimer; |
244d49e3 | 344 | struct posix_cputimers *pct = &p->signal->posix_cputimers; |
b7be4ef1 | 345 | u64 samples[CPUCLOCK_MAX]; |
f06febc9 | 346 | |
244d49e3 | 347 | if (!READ_ONCE(pct->timers_active)) { |
24ab7f5a | 348 | if (start) |
b7be4ef1 | 349 | thread_group_start_cputime(p, samples); |
24ab7f5a | 350 | else |
b7be4ef1 | 351 | __thread_group_cputime(p, samples); |
24ab7f5a | 352 | } else { |
b7be4ef1 | 353 | proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples); |
24ab7f5a TG |
354 | } |
355 | ||
b7be4ef1 | 356 | return samples[clkid]; |
1da177e4 LT |
357 | } |
358 | ||
bfcf3e92 | 359 | static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp) |
33ab0fec | 360 | { |
bfcf3e92 TG |
361 | const clockid_t clkid = CPUCLOCK_WHICH(clock); |
362 | struct task_struct *tsk; | |
363 | u64 t; | |
33ab0fec | 364 | |
9bf7c324 | 365 | rcu_read_lock(); |
96498773 | 366 | tsk = pid_task(pid_for_clock(clock, true), clock_pid_type(clock)); |
9bf7c324 EB |
367 | if (!tsk) { |
368 | rcu_read_unlock(); | |
bfcf3e92 | 369 | return -EINVAL; |
9bf7c324 | 370 | } |
1da177e4 | 371 | |
bfcf3e92 | 372 | if (CPUCLOCK_PERTHREAD(clock)) |
8c2d74f0 | 373 | t = cpu_clock_sample(clkid, tsk); |
bfcf3e92 | 374 | else |
8c2d74f0 | 375 | t = cpu_clock_sample_group(clkid, tsk, false); |
9bf7c324 | 376 | rcu_read_unlock(); |
1da177e4 | 377 | |
bfcf3e92 TG |
378 | *tp = ns_to_timespec64(t); |
379 | return 0; | |
1da177e4 LT |
380 | } |
381 | ||
1da177e4 LT |
382 | /* |
383 | * Validate the clockid_t for a new CPU-clock timer, and initialize the timer. | |
ba5ea951 SG |
384 | * This is called from sys_timer_create() and do_cpu_nanosleep() with the |
385 | * new timer already all-zeros initialized. | |
1da177e4 | 386 | */ |
bc2c8ea4 | 387 | static int posix_cpu_timer_create(struct k_itimer *new_timer) |
1da177e4 | 388 | { |
1fb497dd | 389 | static struct lock_class_key posix_cpu_timers_key; |
96498773 | 390 | struct pid *pid; |
1da177e4 | 391 | |
9bf7c324 | 392 | rcu_read_lock(); |
96498773 EB |
393 | pid = pid_for_clock(new_timer->it_clock, false); |
394 | if (!pid) { | |
9bf7c324 | 395 | rcu_read_unlock(); |
1da177e4 | 396 | return -EINVAL; |
9bf7c324 | 397 | } |
1da177e4 | 398 | |
1fb497dd TG |
399 | /* |
400 | * If posix timer expiry is handled in task work context then | |
401 | * timer::it_lock can be taken without disabling interrupts as all | |
4bf07f65 | 402 | * other locking happens in task context. This requires a separate |
1fb497dd TG |
403 | * lock class key otherwise regular posix timer expiry would record |
404 | * the lock class being taken in interrupt context and generate a | |
405 | * false positive warning. | |
406 | */ | |
407 | if (IS_ENABLED(CONFIG_POSIX_CPU_TIMERS_TASK_WORK)) | |
408 | lockdep_set_class(&new_timer->it_lock, &posix_cpu_timers_key); | |
409 | ||
d97bb75d | 410 | new_timer->kclock = &clock_posix_cpu; |
60bda037 | 411 | timerqueue_init(&new_timer->it.cpu.node); |
96498773 | 412 | new_timer->it.cpu.pid = get_pid(pid); |
9bf7c324 | 413 | rcu_read_unlock(); |
e5a8b65b | 414 | return 0; |
1da177e4 LT |
415 | } |
416 | ||
5c8f23e6 FW |
417 | static struct posix_cputimer_base *timer_base(struct k_itimer *timer, |
418 | struct task_struct *tsk) | |
419 | { | |
420 | int clkidx = CPUCLOCK_WHICH(timer->it_clock); | |
421 | ||
422 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) | |
423 | return tsk->posix_cputimers.bases + clkidx; | |
424 | else | |
425 | return tsk->signal->posix_cputimers.bases + clkidx; | |
426 | } | |
427 | ||
ee375328 FW |
428 | /* |
429 | * Force recalculating the base earliest expiration on the next tick. | |
430 | * This will also re-evaluate the need to keep around the process wide | |
431 | * cputime counter and tick dependency and eventually shut these down | |
432 | * if necessary. | |
433 | */ | |
434 | static void trigger_base_recalc_expires(struct k_itimer *timer, | |
435 | struct task_struct *tsk) | |
436 | { | |
437 | struct posix_cputimer_base *base = timer_base(timer, tsk); | |
438 | ||
439 | base->nextevt = 0; | |
440 | } | |
441 | ||
175cc3ab FW |
442 | /* |
443 | * Dequeue the timer and reset the base if it was its earliest expiration. | |
444 | * It makes sure the next tick recalculates the base next expiration so we | |
445 | * don't keep the costly process wide cputime counter around for a random | |
446 | * amount of time, along with the tick dependency. | |
447 | * | |
448 | * If another timer gets queued between this and the next tick, its | |
449 | * expiration will update the base next event if necessary on the next | |
450 | * tick. | |
451 | */ | |
452 | static void disarm_timer(struct k_itimer *timer, struct task_struct *p) | |
453 | { | |
454 | struct cpu_timer *ctmr = &timer->it.cpu; | |
455 | struct posix_cputimer_base *base; | |
175cc3ab FW |
456 | |
457 | if (!cpu_timer_dequeue(ctmr)) | |
458 | return; | |
459 | ||
5c8f23e6 | 460 | base = timer_base(timer, p); |
175cc3ab | 461 | if (cpu_timer_getexpires(ctmr) == base->nextevt) |
ee375328 | 462 | trigger_base_recalc_expires(timer, p); |
175cc3ab FW |
463 | } |
464 | ||
465 | ||
1da177e4 LT |
466 | /* |
467 | * Clean up a CPU-clock timer that is about to be destroyed. | |
468 | * This is called from timer deletion with the timer already locked. | |
469 | * If we return TIMER_RETRY, it's necessary to release the timer's lock | |
470 | * and try again. (This happens when the timer is in the middle of firing.) | |
471 | */ | |
bc2c8ea4 | 472 | static int posix_cpu_timer_del(struct k_itimer *timer) |
1da177e4 | 473 | { |
60bda037 | 474 | struct cpu_timer *ctmr = &timer->it.cpu; |
3d7a1427 | 475 | struct sighand_struct *sighand; |
55e8c8eb | 476 | struct task_struct *p; |
60bda037 TG |
477 | unsigned long flags; |
478 | int ret = 0; | |
1da177e4 | 479 | |
55e8c8eb EB |
480 | rcu_read_lock(); |
481 | p = cpu_timer_task_rcu(timer); | |
482 | if (!p) | |
483 | goto out; | |
108150ea | 484 | |
3d7a1427 FW |
485 | /* |
486 | * Protect against sighand release/switch in exit/exec and process/ | |
487 | * thread timer list entry concurrent read/writes. | |
488 | */ | |
489 | sighand = lock_task_sighand(p, &flags); | |
490 | if (unlikely(sighand == NULL)) { | |
a3222f88 | 491 | /* |
60bda037 TG |
492 | * This raced with the reaping of the task. The exit cleanup |
493 | * should have removed this timer from the timer queue. | |
a3222f88 | 494 | */ |
60bda037 | 495 | WARN_ON_ONCE(ctmr->head || timerqueue_node_queued(&ctmr->node)); |
a3222f88 | 496 | } else { |
a3222f88 FW |
497 | if (timer->it.cpu.firing) |
498 | ret = TIMER_RETRY; | |
499 | else | |
175cc3ab | 500 | disarm_timer(timer, p); |
3d7a1427 FW |
501 | |
502 | unlock_task_sighand(p, &flags); | |
1da177e4 | 503 | } |
a3222f88 | 504 | |
55e8c8eb EB |
505 | out: |
506 | rcu_read_unlock(); | |
a3222f88 | 507 | if (!ret) |
55e8c8eb | 508 | put_pid(ctmr->pid); |
1da177e4 | 509 | |
108150ea | 510 | return ret; |
1da177e4 LT |
511 | } |
512 | ||
60bda037 | 513 | static void cleanup_timerqueue(struct timerqueue_head *head) |
1a7fa510 | 514 | { |
60bda037 TG |
515 | struct timerqueue_node *node; |
516 | struct cpu_timer *ctmr; | |
1a7fa510 | 517 | |
60bda037 TG |
518 | while ((node = timerqueue_getnext(head))) { |
519 | timerqueue_del(head, node); | |
520 | ctmr = container_of(node, struct cpu_timer, node); | |
521 | ctmr->head = NULL; | |
522 | } | |
1a7fa510 FW |
523 | } |
524 | ||
1da177e4 | 525 | /* |
7cb9a94c TG |
526 | * Clean out CPU timers which are still armed when a thread exits. The |
527 | * timers are only removed from the list. No other updates are done. The | |
528 | * corresponding posix timers are still accessible, but cannot be rearmed. | |
529 | * | |
1da177e4 LT |
530 | * This must be called with the siglock held. |
531 | */ | |
2b69942f | 532 | static void cleanup_timers(struct posix_cputimers *pct) |
1da177e4 | 533 | { |
60bda037 TG |
534 | cleanup_timerqueue(&pct->bases[CPUCLOCK_PROF].tqhead); |
535 | cleanup_timerqueue(&pct->bases[CPUCLOCK_VIRT].tqhead); | |
536 | cleanup_timerqueue(&pct->bases[CPUCLOCK_SCHED].tqhead); | |
1da177e4 LT |
537 | } |
538 | ||
539 | /* | |
540 | * These are both called with the siglock held, when the current thread | |
541 | * is being reaped. When the final (leader) thread in the group is reaped, | |
542 | * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit. | |
543 | */ | |
544 | void posix_cpu_timers_exit(struct task_struct *tsk) | |
545 | { | |
2b69942f | 546 | cleanup_timers(&tsk->posix_cputimers); |
1da177e4 LT |
547 | } |
548 | void posix_cpu_timers_exit_group(struct task_struct *tsk) | |
549 | { | |
2b69942f | 550 | cleanup_timers(&tsk->signal->posix_cputimers); |
1da177e4 LT |
551 | } |
552 | ||
1da177e4 LT |
553 | /* |
554 | * Insert the timer on the appropriate list before any timers that | |
e73d84e3 | 555 | * expire later. This must be called with the sighand lock held. |
1da177e4 | 556 | */ |
beb41d9c | 557 | static void arm_timer(struct k_itimer *timer, struct task_struct *p) |
1da177e4 | 558 | { |
5c8f23e6 | 559 | struct posix_cputimer_base *base = timer_base(timer, p); |
60bda037 TG |
560 | struct cpu_timer *ctmr = &timer->it.cpu; |
561 | u64 newexp = cpu_timer_getexpires(ctmr); | |
1da177e4 | 562 | |
60bda037 | 563 | if (!cpu_timer_enqueue(&base->tqhead, ctmr)) |
3b495b22 | 564 | return; |
5eb9aa64 | 565 | |
3b495b22 TG |
566 | /* |
567 | * We are the new earliest-expiring POSIX 1.b timer, hence | |
568 | * need to update expiration cache. Take into account that | |
569 | * for process timers we share expiration cache with itimers | |
570 | * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME. | |
571 | */ | |
2bbdbdae | 572 | if (newexp < base->nextevt) |
87dc6448 | 573 | base->nextevt = newexp; |
1da177e4 | 574 | |
3b495b22 TG |
575 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) |
576 | tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER); | |
577 | else | |
1e4ca26d | 578 | tick_dep_set_signal(p, TICK_DEP_BIT_POSIX_TIMER); |
1da177e4 LT |
579 | } |
580 | ||
581 | /* | |
582 | * The timer is locked, fire it and arrange for its reload. | |
583 | */ | |
584 | static void cpu_timer_fire(struct k_itimer *timer) | |
585 | { | |
60bda037 TG |
586 | struct cpu_timer *ctmr = &timer->it.cpu; |
587 | ||
1f169f84 SG |
588 | if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { |
589 | /* | |
590 | * User don't want any signal. | |
591 | */ | |
60bda037 | 592 | cpu_timer_setexpires(ctmr, 0); |
1f169f84 | 593 | } else if (unlikely(timer->sigq == NULL)) { |
1da177e4 LT |
594 | /* |
595 | * This a special case for clock_nanosleep, | |
596 | * not a normal timer from sys_timer_create. | |
597 | */ | |
598 | wake_up_process(timer->it_process); | |
60bda037 | 599 | cpu_timer_setexpires(ctmr, 0); |
16118794 | 600 | } else if (!timer->it_interval) { |
1da177e4 LT |
601 | /* |
602 | * One-shot timer. Clear it as soon as it's fired. | |
603 | */ | |
604 | posix_timer_event(timer, 0); | |
60bda037 | 605 | cpu_timer_setexpires(ctmr, 0); |
1da177e4 LT |
606 | } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) { |
607 | /* | |
608 | * The signal did not get queued because the signal | |
609 | * was ignored, so we won't get any callback to | |
610 | * reload the timer. But we need to keep it | |
611 | * ticking in case the signal is deliverable next time. | |
612 | */ | |
f37fb0aa | 613 | posix_cpu_timer_rearm(timer); |
af888d67 | 614 | ++timer->it_requeue_pending; |
1da177e4 LT |
615 | } |
616 | } | |
617 | ||
618 | /* | |
619 | * Guts of sys_timer_settime for CPU timers. | |
620 | * This is called with the timer locked and interrupts disabled. | |
621 | * If we return TIMER_RETRY, it's necessary to release the timer's lock | |
622 | * and try again. (This happens when the timer is in the middle of firing.) | |
623 | */ | |
e73d84e3 | 624 | static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, |
5f252b32 | 625 | struct itimerspec64 *new, struct itimerspec64 *old) |
1da177e4 | 626 | { |
c7a37c6f | 627 | clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock); |
ebd7e7fc | 628 | u64 old_expires, new_expires, old_incr, val; |
60bda037 | 629 | struct cpu_timer *ctmr = &timer->it.cpu; |
c7a37c6f | 630 | struct sighand_struct *sighand; |
55e8c8eb | 631 | struct task_struct *p; |
c7a37c6f | 632 | unsigned long flags; |
60bda037 | 633 | int ret = 0; |
1da177e4 | 634 | |
55e8c8eb EB |
635 | rcu_read_lock(); |
636 | p = cpu_timer_task_rcu(timer); | |
637 | if (!p) { | |
638 | /* | |
639 | * If p has just been reaped, we can no | |
640 | * longer get any information about it at all. | |
641 | */ | |
642 | rcu_read_unlock(); | |
643 | return -ESRCH; | |
644 | } | |
1da177e4 | 645 | |
098b0e01 TG |
646 | /* |
647 | * Use the to_ktime conversion because that clamps the maximum | |
648 | * value to KTIME_MAX and avoid multiplication overflows. | |
649 | */ | |
650 | new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value)); | |
1da177e4 | 651 | |
1da177e4 | 652 | /* |
e73d84e3 FW |
653 | * Protect against sighand release/switch in exit/exec and p->cpu_timers |
654 | * and p->signal->cpu_timers read/write in arm_timer() | |
655 | */ | |
656 | sighand = lock_task_sighand(p, &flags); | |
657 | /* | |
658 | * If p has just been reaped, we can no | |
1da177e4 LT |
659 | * longer get any information about it at all. |
660 | */ | |
55e8c8eb EB |
661 | if (unlikely(sighand == NULL)) { |
662 | rcu_read_unlock(); | |
1da177e4 | 663 | return -ESRCH; |
55e8c8eb | 664 | } |
1da177e4 LT |
665 | |
666 | /* | |
667 | * Disarm any old timer after extracting its expiry time. | |
668 | */ | |
16118794 | 669 | old_incr = timer->it_interval; |
60bda037 TG |
670 | old_expires = cpu_timer_getexpires(ctmr); |
671 | ||
a69ac4a7 ON |
672 | if (unlikely(timer->it.cpu.firing)) { |
673 | timer->it.cpu.firing = -1; | |
674 | ret = TIMER_RETRY; | |
60bda037 TG |
675 | } else { |
676 | cpu_timer_dequeue(ctmr); | |
677 | } | |
1da177e4 LT |
678 | |
679 | /* | |
680 | * We need to sample the current value to convert the new | |
681 | * value from to relative and absolute, and to convert the | |
682 | * old value from absolute to relative. To set a process | |
683 | * timer, we need a sample to balance the thread expiry | |
684 | * times (in arm_timer). With an absolute time, we must | |
685 | * check if it's already passed. In short, we need a sample. | |
686 | */ | |
8c2d74f0 TG |
687 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) |
688 | val = cpu_clock_sample(clkid, p); | |
689 | else | |
690 | val = cpu_clock_sample_group(clkid, p, true); | |
1da177e4 LT |
691 | |
692 | if (old) { | |
55ccb616 | 693 | if (old_expires == 0) { |
1da177e4 LT |
694 | old->it_value.tv_sec = 0; |
695 | old->it_value.tv_nsec = 0; | |
696 | } else { | |
697 | /* | |
60bda037 TG |
698 | * Update the timer in case it has overrun already. |
699 | * If it has, we'll report it as having overrun and | |
700 | * with the next reloaded timer already ticking, | |
701 | * though we are swallowing that pending | |
702 | * notification here to install the new setting. | |
1da177e4 | 703 | */ |
60bda037 TG |
704 | u64 exp = bump_cpu_timer(timer, val); |
705 | ||
706 | if (val < exp) { | |
707 | old_expires = exp - val; | |
5f252b32 | 708 | old->it_value = ns_to_timespec64(old_expires); |
1da177e4 LT |
709 | } else { |
710 | old->it_value.tv_nsec = 1; | |
711 | old->it_value.tv_sec = 0; | |
712 | } | |
713 | } | |
714 | } | |
715 | ||
a69ac4a7 | 716 | if (unlikely(ret)) { |
1da177e4 LT |
717 | /* |
718 | * We are colliding with the timer actually firing. | |
719 | * Punt after filling in the timer's old value, and | |
720 | * disable this firing since we are already reporting | |
721 | * it as an overrun (thanks to bump_cpu_timer above). | |
722 | */ | |
e73d84e3 | 723 | unlock_task_sighand(p, &flags); |
1da177e4 LT |
724 | goto out; |
725 | } | |
726 | ||
e73d84e3 | 727 | if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) { |
55ccb616 | 728 | new_expires += val; |
1da177e4 LT |
729 | } |
730 | ||
731 | /* | |
732 | * Install the new expiry time (or zero). | |
733 | * For a timer with no notification action, we don't actually | |
734 | * arm the timer (we'll just fake it for timer_gettime). | |
735 | */ | |
60bda037 | 736 | cpu_timer_setexpires(ctmr, new_expires); |
55ccb616 | 737 | if (new_expires != 0 && val < new_expires) { |
beb41d9c | 738 | arm_timer(timer, p); |
1da177e4 LT |
739 | } |
740 | ||
e73d84e3 | 741 | unlock_task_sighand(p, &flags); |
1da177e4 LT |
742 | /* |
743 | * Install the new reload setting, and | |
744 | * set up the signal and overrun bookkeeping. | |
745 | */ | |
16118794 | 746 | timer->it_interval = timespec64_to_ktime(new->it_interval); |
1da177e4 LT |
747 | |
748 | /* | |
749 | * This acts as a modification timestamp for the timer, | |
750 | * so any automatic reload attempt will punt on seeing | |
751 | * that we have reset the timer manually. | |
752 | */ | |
753 | timer->it_requeue_pending = (timer->it_requeue_pending + 2) & | |
754 | ~REQUEUE_PENDING; | |
755 | timer->it_overrun_last = 0; | |
756 | timer->it_overrun = -1; | |
757 | ||
ee375328 FW |
758 | if (val >= new_expires) { |
759 | if (new_expires != 0) { | |
760 | /* | |
761 | * The designated time already passed, so we notify | |
762 | * immediately, even if the thread never runs to | |
763 | * accumulate more time on this clock. | |
764 | */ | |
765 | cpu_timer_fire(timer); | |
766 | } | |
767 | ||
1da177e4 | 768 | /* |
ee375328 FW |
769 | * Make sure we don't keep around the process wide cputime |
770 | * counter or the tick dependency if they are not necessary. | |
1da177e4 | 771 | */ |
ee375328 FW |
772 | sighand = lock_task_sighand(p, &flags); |
773 | if (!sighand) | |
774 | goto out; | |
775 | ||
776 | if (!cpu_timer_queued(ctmr)) | |
777 | trigger_base_recalc_expires(timer, p); | |
778 | ||
779 | unlock_task_sighand(p, &flags); | |
1da177e4 | 780 | } |
1da177e4 | 781 | out: |
55e8c8eb | 782 | rcu_read_unlock(); |
ebd7e7fc | 783 | if (old) |
5f252b32 | 784 | old->it_interval = ns_to_timespec64(old_incr); |
b7878300 | 785 | |
1da177e4 LT |
786 | return ret; |
787 | } | |
788 | ||
5f252b32 | 789 | static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp) |
1da177e4 | 790 | { |
99093c5b | 791 | clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock); |
60bda037 TG |
792 | struct cpu_timer *ctmr = &timer->it.cpu; |
793 | u64 now, expires = cpu_timer_getexpires(ctmr); | |
55e8c8eb | 794 | struct task_struct *p; |
1da177e4 | 795 | |
55e8c8eb EB |
796 | rcu_read_lock(); |
797 | p = cpu_timer_task_rcu(timer); | |
798 | if (!p) | |
799 | goto out; | |
a3222f88 | 800 | |
1da177e4 LT |
801 | /* |
802 | * Easy part: convert the reload time. | |
803 | */ | |
16118794 | 804 | itp->it_interval = ktime_to_timespec64(timer->it_interval); |
1da177e4 | 805 | |
60bda037 | 806 | if (!expires) |
55e8c8eb | 807 | goto out; |
1da177e4 | 808 | |
1da177e4 LT |
809 | /* |
810 | * Sample the clock to take the difference with the expiry time. | |
811 | */ | |
60f2ceaa | 812 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) |
8c2d74f0 | 813 | now = cpu_clock_sample(clkid, p); |
60f2ceaa EB |
814 | else |
815 | now = cpu_clock_sample_group(clkid, p, false); | |
1da177e4 | 816 | |
60bda037 TG |
817 | if (now < expires) { |
818 | itp->it_value = ns_to_timespec64(expires - now); | |
1da177e4 LT |
819 | } else { |
820 | /* | |
821 | * The timer should have expired already, but the firing | |
822 | * hasn't taken place yet. Say it's just about to expire. | |
823 | */ | |
824 | itp->it_value.tv_nsec = 1; | |
825 | itp->it_value.tv_sec = 0; | |
826 | } | |
55e8c8eb EB |
827 | out: |
828 | rcu_read_unlock(); | |
1da177e4 LT |
829 | } |
830 | ||
60bda037 | 831 | #define MAX_COLLECTED 20 |
2473f3e7 | 832 | |
60bda037 TG |
833 | static u64 collect_timerqueue(struct timerqueue_head *head, |
834 | struct list_head *firing, u64 now) | |
835 | { | |
836 | struct timerqueue_node *next; | |
837 | int i = 0; | |
838 | ||
839 | while ((next = timerqueue_getnext(head))) { | |
840 | struct cpu_timer *ctmr; | |
841 | u64 expires; | |
842 | ||
843 | ctmr = container_of(next, struct cpu_timer, node); | |
844 | expires = cpu_timer_getexpires(ctmr); | |
845 | /* Limit the number of timers to expire at once */ | |
846 | if (++i == MAX_COLLECTED || now < expires) | |
847 | return expires; | |
848 | ||
849 | ctmr->firing = 1; | |
850 | cpu_timer_dequeue(ctmr); | |
851 | list_add_tail(&ctmr->elist, firing); | |
2473f3e7 FW |
852 | } |
853 | ||
2bbdbdae | 854 | return U64_MAX; |
2473f3e7 FW |
855 | } |
856 | ||
60bda037 TG |
857 | static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples, |
858 | struct list_head *firing) | |
1cd07c0b TG |
859 | { |
860 | struct posix_cputimer_base *base = pct->bases; | |
861 | int i; | |
862 | ||
863 | for (i = 0; i < CPUCLOCK_MAX; i++, base++) { | |
60bda037 TG |
864 | base->nextevt = collect_timerqueue(&base->tqhead, firing, |
865 | samples[i]); | |
1cd07c0b TG |
866 | } |
867 | } | |
868 | ||
34be3930 JL |
869 | static inline void check_dl_overrun(struct task_struct *tsk) |
870 | { | |
871 | if (tsk->dl.dl_overrun) { | |
872 | tsk->dl.dl_overrun = 0; | |
873 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); | |
874 | } | |
875 | } | |
876 | ||
8991afe2 TG |
877 | static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard) |
878 | { | |
879 | if (time < limit) | |
880 | return false; | |
881 | ||
882 | if (print_fatal_signals) { | |
883 | pr_info("%s Watchdog Timeout (%s): %s[%d]\n", | |
884 | rt ? "RT" : "CPU", hard ? "hard" : "soft", | |
885 | current->comm, task_pid_nr(current)); | |
886 | } | |
887 | __group_send_sig_info(signo, SEND_SIG_PRIV, current); | |
888 | return true; | |
889 | } | |
890 | ||
1da177e4 LT |
891 | /* |
892 | * Check for any per-thread CPU timers that have fired and move them off | |
893 | * the tsk->cpu_timers[N] list onto the firing list. Here we update the | |
894 | * tsk->it_*_expires values to reflect the remaining thread CPU timers. | |
895 | */ | |
896 | static void check_thread_timers(struct task_struct *tsk, | |
897 | struct list_head *firing) | |
898 | { | |
1cd07c0b TG |
899 | struct posix_cputimers *pct = &tsk->posix_cputimers; |
900 | u64 samples[CPUCLOCK_MAX]; | |
d4bb5274 | 901 | unsigned long soft; |
1da177e4 | 902 | |
34be3930 JL |
903 | if (dl_task(tsk)) |
904 | check_dl_overrun(tsk); | |
905 | ||
1cd07c0b | 906 | if (expiry_cache_is_inactive(pct)) |
934715a1 JL |
907 | return; |
908 | ||
1cd07c0b TG |
909 | task_sample_cputime(tsk, samples); |
910 | collect_posix_cputimers(pct, samples, firing); | |
78f2c7db PZ |
911 | |
912 | /* | |
913 | * Check for the special case thread timers. | |
914 | */ | |
3cf29496 | 915 | soft = task_rlimit(tsk, RLIMIT_RTTIME); |
d4bb5274 | 916 | if (soft != RLIM_INFINITY) { |
8ea1de90 | 917 | /* Task RT timeout is accounted in jiffies. RTTIME is usec */ |
8991afe2 | 918 | unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ); |
3cf29496 | 919 | unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME); |
78f2c7db | 920 | |
8991afe2 TG |
921 | /* At the hard limit, send SIGKILL. No further action. */ |
922 | if (hard != RLIM_INFINITY && | |
923 | check_rlimit(rttime, hard, SIGKILL, true, true)) | |
78f2c7db | 924 | return; |
dd670224 | 925 | |
8991afe2 TG |
926 | /* At the soft limit, send a SIGXCPU every second */ |
927 | if (check_rlimit(rttime, soft, SIGXCPU, true, false)) { | |
dd670224 TG |
928 | soft += USEC_PER_SEC; |
929 | tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft; | |
78f2c7db PZ |
930 | } |
931 | } | |
c02b078e | 932 | |
1cd07c0b | 933 | if (expiry_cache_is_inactive(pct)) |
b7878300 | 934 | tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER); |
1da177e4 LT |
935 | } |
936 | ||
1018016c | 937 | static inline void stop_process_timers(struct signal_struct *sig) |
3fccfd67 | 938 | { |
244d49e3 | 939 | struct posix_cputimers *pct = &sig->posix_cputimers; |
3fccfd67 | 940 | |
244d49e3 TG |
941 | /* Turn off the active flag. This is done without locking. */ |
942 | WRITE_ONCE(pct->timers_active, false); | |
b7878300 | 943 | tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER); |
3fccfd67 PZ |
944 | } |
945 | ||
42c4ab41 | 946 | static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, |
ebd7e7fc | 947 | u64 *expires, u64 cur_time, int signo) |
42c4ab41 | 948 | { |
64861634 | 949 | if (!it->expires) |
42c4ab41 SG |
950 | return; |
951 | ||
858cf3a8 FW |
952 | if (cur_time >= it->expires) { |
953 | if (it->incr) | |
64861634 | 954 | it->expires += it->incr; |
858cf3a8 | 955 | else |
64861634 | 956 | it->expires = 0; |
42c4ab41 | 957 | |
3f0a525e XG |
958 | trace_itimer_expire(signo == SIGPROF ? |
959 | ITIMER_PROF : ITIMER_VIRTUAL, | |
6883f81a | 960 | task_tgid(tsk), cur_time); |
42c4ab41 SG |
961 | __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); |
962 | } | |
963 | ||
2bbdbdae | 964 | if (it->expires && it->expires < *expires) |
858cf3a8 | 965 | *expires = it->expires; |
42c4ab41 SG |
966 | } |
967 | ||
1da177e4 LT |
968 | /* |
969 | * Check for any per-thread CPU timers that have fired and move them | |
970 | * off the tsk->*_timers list onto the firing list. Per-thread timers | |
971 | * have already been taken off. | |
972 | */ | |
973 | static void check_process_timers(struct task_struct *tsk, | |
974 | struct list_head *firing) | |
975 | { | |
976 | struct signal_struct *const sig = tsk->signal; | |
1cd07c0b TG |
977 | struct posix_cputimers *pct = &sig->posix_cputimers; |
978 | u64 samples[CPUCLOCK_MAX]; | |
d4bb5274 | 979 | unsigned long soft; |
1da177e4 | 980 | |
934715a1 | 981 | /* |
244d49e3 | 982 | * If there are no active process wide timers (POSIX 1.b, itimers, |
a2ed4fd6 TG |
983 | * RLIMIT_CPU) nothing to check. Also skip the process wide timer |
984 | * processing when there is already another task handling them. | |
934715a1 | 985 | */ |
a2ed4fd6 | 986 | if (!READ_ONCE(pct->timers_active) || pct->expiry_active) |
934715a1 JL |
987 | return; |
988 | ||
a2ed4fd6 | 989 | /* |
c8d75aa4 JL |
990 | * Signify that a thread is checking for process timers. |
991 | * Write access to this field is protected by the sighand lock. | |
992 | */ | |
a2ed4fd6 | 993 | pct->expiry_active = true; |
c8d75aa4 | 994 | |
1da177e4 | 995 | /* |
a324956f TG |
996 | * Collect the current process totals. Group accounting is active |
997 | * so the sample can be taken directly. | |
1da177e4 | 998 | */ |
b7be4ef1 | 999 | proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, samples); |
1cd07c0b | 1000 | collect_posix_cputimers(pct, samples, firing); |
1da177e4 LT |
1001 | |
1002 | /* | |
1003 | * Check for the special case process timers. | |
1004 | */ | |
1cd07c0b TG |
1005 | check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], |
1006 | &pct->bases[CPUCLOCK_PROF].nextevt, | |
b7be4ef1 | 1007 | samples[CPUCLOCK_PROF], SIGPROF); |
1cd07c0b TG |
1008 | check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], |
1009 | &pct->bases[CPUCLOCK_VIRT].nextevt, | |
1010 | samples[CPUCLOCK_VIRT], SIGVTALRM); | |
b7be4ef1 | 1011 | |
3cf29496 | 1012 | soft = task_rlimit(tsk, RLIMIT_CPU); |
d4bb5274 | 1013 | if (soft != RLIM_INFINITY) { |
8ea1de90 | 1014 | /* RLIMIT_CPU is in seconds. Samples are nanoseconds */ |
3cf29496 | 1015 | unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU); |
8ea1de90 TG |
1016 | u64 ptime = samples[CPUCLOCK_PROF]; |
1017 | u64 softns = (u64)soft * NSEC_PER_SEC; | |
1018 | u64 hardns = (u64)hard * NSEC_PER_SEC; | |
b7be4ef1 | 1019 | |
8991afe2 TG |
1020 | /* At the hard limit, send SIGKILL. No further action. */ |
1021 | if (hard != RLIM_INFINITY && | |
1022 | check_rlimit(ptime, hardns, SIGKILL, false, true)) | |
1da177e4 | 1023 | return; |
dd670224 | 1024 | |
8991afe2 TG |
1025 | /* At the soft limit, send a SIGXCPU every second */ |
1026 | if (check_rlimit(ptime, softns, SIGXCPU, false, false)) { | |
dd670224 TG |
1027 | sig->rlim[RLIMIT_CPU].rlim_cur = soft + 1; |
1028 | softns += NSEC_PER_SEC; | |
1da177e4 | 1029 | } |
8ea1de90 TG |
1030 | |
1031 | /* Update the expiry cache */ | |
1cd07c0b TG |
1032 | if (softns < pct->bases[CPUCLOCK_PROF].nextevt) |
1033 | pct->bases[CPUCLOCK_PROF].nextevt = softns; | |
1da177e4 LT |
1034 | } |
1035 | ||
1cd07c0b | 1036 | if (expiry_cache_is_inactive(pct)) |
29f87b79 | 1037 | stop_process_timers(sig); |
c8d75aa4 | 1038 | |
244d49e3 | 1039 | pct->expiry_active = false; |
1da177e4 LT |
1040 | } |
1041 | ||
1042 | /* | |
96fe3b07 | 1043 | * This is called from the signal code (via posixtimer_rearm) |
1da177e4 LT |
1044 | * when the last timer signal was delivered and we have to reload the timer. |
1045 | */ | |
f37fb0aa | 1046 | static void posix_cpu_timer_rearm(struct k_itimer *timer) |
1da177e4 | 1047 | { |
da020ce4 | 1048 | clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock); |
55e8c8eb | 1049 | struct task_struct *p; |
e73d84e3 FW |
1050 | struct sighand_struct *sighand; |
1051 | unsigned long flags; | |
ebd7e7fc | 1052 | u64 now; |
1da177e4 | 1053 | |
55e8c8eb EB |
1054 | rcu_read_lock(); |
1055 | p = cpu_timer_task_rcu(timer); | |
1056 | if (!p) | |
1057 | goto out; | |
1da177e4 | 1058 | |
1a3402d9 FW |
1059 | /* Protect timer list r/w in arm_timer() */ |
1060 | sighand = lock_task_sighand(p, &flags); | |
1061 | if (unlikely(sighand == NULL)) | |
1062 | goto out; | |
1063 | ||
1da177e4 LT |
1064 | /* |
1065 | * Fetch the current sample and update the timer's expiry time. | |
1066 | */ | |
60f2ceaa | 1067 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) |
8c2d74f0 | 1068 | now = cpu_clock_sample(clkid, p); |
60f2ceaa | 1069 | else |
8c2d74f0 | 1070 | now = cpu_clock_sample_group(clkid, p, true); |
60f2ceaa EB |
1071 | |
1072 | bump_cpu_timer(timer, now); | |
1073 | ||
1da177e4 LT |
1074 | /* |
1075 | * Now re-arm for the new expiry time. | |
1076 | */ | |
beb41d9c | 1077 | arm_timer(timer, p); |
e73d84e3 | 1078 | unlock_task_sighand(p, &flags); |
55e8c8eb EB |
1079 | out: |
1080 | rcu_read_unlock(); | |
1da177e4 LT |
1081 | } |
1082 | ||
f06febc9 | 1083 | /** |
87dc6448 | 1084 | * task_cputimers_expired - Check whether posix CPU timers are expired |
f06febc9 | 1085 | * |
001f7971 | 1086 | * @samples: Array of current samples for the CPUCLOCK clocks |
87dc6448 | 1087 | * @pct: Pointer to a posix_cputimers container |
f06febc9 | 1088 | * |
87dc6448 TG |
1089 | * Returns true if any member of @samples is greater than the corresponding |
1090 | * member of @pct->bases[CLK].nextevt. False otherwise | |
f06febc9 | 1091 | */ |
87dc6448 | 1092 | static inline bool |
7f2cbcbc | 1093 | task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct) |
f06febc9 | 1094 | { |
001f7971 TG |
1095 | int i; |
1096 | ||
1097 | for (i = 0; i < CPUCLOCK_MAX; i++) { | |
7f2cbcbc | 1098 | if (samples[i] >= pct->bases[i].nextevt) |
001f7971 TG |
1099 | return true; |
1100 | } | |
1101 | return false; | |
f06febc9 FM |
1102 | } |
1103 | ||
1104 | /** | |
1105 | * fastpath_timer_check - POSIX CPU timers fast path. | |
1106 | * | |
1107 | * @tsk: The task (thread) being checked. | |
f06febc9 | 1108 | * |
bb34d92f FM |
1109 | * Check the task and thread group timers. If both are zero (there are no |
1110 | * timers set) return false. Otherwise snapshot the task and thread group | |
1111 | * timers and compare them with the corresponding expiration times. Return | |
1112 | * true if a timer has expired, else return false. | |
f06febc9 | 1113 | */ |
001f7971 | 1114 | static inline bool fastpath_timer_check(struct task_struct *tsk) |
f06febc9 | 1115 | { |
244d49e3 | 1116 | struct posix_cputimers *pct = &tsk->posix_cputimers; |
ad133ba3 | 1117 | struct signal_struct *sig; |
bb34d92f | 1118 | |
244d49e3 | 1119 | if (!expiry_cache_is_inactive(pct)) { |
001f7971 | 1120 | u64 samples[CPUCLOCK_MAX]; |
bb34d92f | 1121 | |
001f7971 | 1122 | task_sample_cputime(tsk, samples); |
244d49e3 | 1123 | if (task_cputimers_expired(samples, pct)) |
001f7971 | 1124 | return true; |
bb34d92f | 1125 | } |
ad133ba3 ON |
1126 | |
1127 | sig = tsk->signal; | |
244d49e3 | 1128 | pct = &sig->posix_cputimers; |
c8d75aa4 | 1129 | /* |
244d49e3 TG |
1130 | * Check if thread group timers expired when timers are active and |
1131 | * no other thread in the group is already handling expiry for | |
1132 | * thread group cputimers. These fields are read without the | |
1133 | * sighand lock. However, this is fine because this is meant to be | |
1134 | * a fastpath heuristic to determine whether we should try to | |
1135 | * acquire the sighand lock to handle timer expiry. | |
c8d75aa4 | 1136 | * |
244d49e3 TG |
1137 | * In the worst case scenario, if concurrently timers_active is set |
1138 | * or expiry_active is cleared, but the current thread doesn't see | |
1139 | * the change yet, the timer checks are delayed until the next | |
1140 | * thread in the group gets a scheduler interrupt to handle the | |
1141 | * timer. This isn't an issue in practice because these types of | |
1142 | * delays with signals actually getting sent are expected. | |
c8d75aa4 | 1143 | */ |
244d49e3 | 1144 | if (READ_ONCE(pct->timers_active) && !READ_ONCE(pct->expiry_active)) { |
001f7971 | 1145 | u64 samples[CPUCLOCK_MAX]; |
bb34d92f | 1146 | |
001f7971 TG |
1147 | proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, |
1148 | samples); | |
8d1f431c | 1149 | |
244d49e3 | 1150 | if (task_cputimers_expired(samples, pct)) |
001f7971 | 1151 | return true; |
bb34d92f | 1152 | } |
37bebc70 | 1153 | |
34be3930 | 1154 | if (dl_task(tsk) && tsk->dl.dl_overrun) |
001f7971 | 1155 | return true; |
34be3930 | 1156 | |
001f7971 | 1157 | return false; |
f06febc9 FM |
1158 | } |
1159 | ||
1fb497dd TG |
1160 | static void handle_posix_cpu_timers(struct task_struct *tsk); |
1161 | ||
1162 | #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK | |
1163 | static void posix_cpu_timers_work(struct callback_head *work) | |
1164 | { | |
1165 | handle_posix_cpu_timers(current); | |
1166 | } | |
1167 | ||
ca7752ca MP |
1168 | /* |
1169 | * Clear existing posix CPU timers task work. | |
1170 | */ | |
1171 | void clear_posix_cputimers_work(struct task_struct *p) | |
1172 | { | |
1173 | /* | |
1174 | * A copied work entry from the old task is not meaningful, clear it. | |
1175 | * N.B. init_task_work will not do this. | |
1176 | */ | |
1177 | memset(&p->posix_cputimers_work.work, 0, | |
1178 | sizeof(p->posix_cputimers_work.work)); | |
1179 | init_task_work(&p->posix_cputimers_work.work, | |
1180 | posix_cpu_timers_work); | |
1181 | p->posix_cputimers_work.scheduled = false; | |
1182 | } | |
1183 | ||
1fb497dd TG |
1184 | /* |
1185 | * Initialize posix CPU timers task work in init task. Out of line to | |
1186 | * keep the callback static and to avoid header recursion hell. | |
1187 | */ | |
1188 | void __init posix_cputimers_init_work(void) | |
1189 | { | |
ca7752ca | 1190 | clear_posix_cputimers_work(current); |
1fb497dd TG |
1191 | } |
1192 | ||
1193 | /* | |
1194 | * Note: All operations on tsk->posix_cputimer_work.scheduled happen either | |
1195 | * in hard interrupt context or in task context with interrupts | |
1196 | * disabled. Aside of that the writer/reader interaction is always in the | |
1197 | * context of the current task, which means they are strict per CPU. | |
1198 | */ | |
1199 | static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk) | |
1200 | { | |
1201 | return tsk->posix_cputimers_work.scheduled; | |
1202 | } | |
1203 | ||
1204 | static inline void __run_posix_cpu_timers(struct task_struct *tsk) | |
1205 | { | |
1206 | if (WARN_ON_ONCE(tsk->posix_cputimers_work.scheduled)) | |
1207 | return; | |
1208 | ||
1209 | /* Schedule task work to actually expire the timers */ | |
1210 | tsk->posix_cputimers_work.scheduled = true; | |
1211 | task_work_add(tsk, &tsk->posix_cputimers_work.work, TWA_RESUME); | |
1212 | } | |
1213 | ||
1214 | static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk, | |
1215 | unsigned long start) | |
1216 | { | |
1217 | bool ret = true; | |
1218 | ||
1219 | /* | |
1220 | * On !RT kernels interrupts are disabled while collecting expired | |
1221 | * timers, so no tick can happen and the fast path check can be | |
1222 | * reenabled without further checks. | |
1223 | */ | |
1224 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { | |
1225 | tsk->posix_cputimers_work.scheduled = false; | |
1226 | return true; | |
1227 | } | |
1228 | ||
1229 | /* | |
1230 | * On RT enabled kernels ticks can happen while the expired timers | |
1231 | * are collected under sighand lock. But any tick which observes | |
1232 | * the CPUTIMERS_WORK_SCHEDULED bit set, does not run the fastpath | |
1233 | * checks. So reenabling the tick work has do be done carefully: | |
1234 | * | |
1235 | * Disable interrupts and run the fast path check if jiffies have | |
1236 | * advanced since the collecting of expired timers started. If | |
1237 | * jiffies have not advanced or the fast path check did not find | |
1238 | * newly expired timers, reenable the fast path check in the timer | |
1239 | * interrupt. If there are newly expired timers, return false and | |
1240 | * let the collection loop repeat. | |
1241 | */ | |
1242 | local_irq_disable(); | |
1243 | if (start != jiffies && fastpath_timer_check(tsk)) | |
1244 | ret = false; | |
1245 | else | |
1246 | tsk->posix_cputimers_work.scheduled = false; | |
1247 | local_irq_enable(); | |
1248 | ||
1249 | return ret; | |
1250 | } | |
1251 | #else /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */ | |
1252 | static inline void __run_posix_cpu_timers(struct task_struct *tsk) | |
1253 | { | |
1254 | lockdep_posixtimer_enter(); | |
1255 | handle_posix_cpu_timers(tsk); | |
1256 | lockdep_posixtimer_exit(); | |
1257 | } | |
1258 | ||
1259 | static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk) | |
1260 | { | |
1261 | return false; | |
1262 | } | |
1263 | ||
1264 | static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk, | |
1265 | unsigned long start) | |
1266 | { | |
1267 | return true; | |
1268 | } | |
1269 | #endif /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */ | |
1270 | ||
1271 | static void handle_posix_cpu_timers(struct task_struct *tsk) | |
1da177e4 | 1272 | { |
1da177e4 | 1273 | struct k_itimer *timer, *next; |
1fb497dd | 1274 | unsigned long flags, start; |
dce3e8fd | 1275 | LIST_HEAD(firing); |
1da177e4 | 1276 | |
820903c7 | 1277 | if (!lock_task_sighand(tsk, &flags)) |
f06febc9 | 1278 | return; |
5ce73a4a | 1279 | |
1fb497dd TG |
1280 | do { |
1281 | /* | |
1282 | * On RT locking sighand lock does not disable interrupts, | |
1283 | * so this needs to be careful vs. ticks. Store the current | |
1284 | * jiffies value. | |
1285 | */ | |
1286 | start = READ_ONCE(jiffies); | |
1287 | barrier(); | |
934715a1 | 1288 | |
1fb497dd TG |
1289 | /* |
1290 | * Here we take off tsk->signal->cpu_timers[N] and | |
1291 | * tsk->cpu_timers[N] all the timers that are firing, and | |
1292 | * put them on the firing list. | |
1293 | */ | |
1294 | check_thread_timers(tsk, &firing); | |
1295 | ||
1296 | check_process_timers(tsk, &firing); | |
1297 | ||
1298 | /* | |
4bf07f65 | 1299 | * The above timer checks have updated the expiry cache and |
1fb497dd TG |
1300 | * because nothing can have queued or modified timers after |
1301 | * sighand lock was taken above it is guaranteed to be | |
1302 | * consistent. So the next timer interrupt fastpath check | |
1303 | * will find valid data. | |
1304 | * | |
1305 | * If timer expiry runs in the timer interrupt context then | |
1306 | * the loop is not relevant as timers will be directly | |
1307 | * expired in interrupt context. The stub function below | |
1308 | * returns always true which allows the compiler to | |
1309 | * optimize the loop out. | |
1310 | * | |
1311 | * If timer expiry is deferred to task work context then | |
1312 | * the following rules apply: | |
1313 | * | |
1314 | * - On !RT kernels no tick can have happened on this CPU | |
1315 | * after sighand lock was acquired because interrupts are | |
1316 | * disabled. So reenabling task work before dropping | |
1317 | * sighand lock and reenabling interrupts is race free. | |
1318 | * | |
1319 | * - On RT kernels ticks might have happened but the tick | |
1320 | * work ignored posix CPU timer handling because the | |
1321 | * CPUTIMERS_WORK_SCHEDULED bit is set. Reenabling work | |
1322 | * must be done very carefully including a check whether | |
1323 | * ticks have happened since the start of the timer | |
1324 | * expiry checks. posix_cpu_timers_enable_work() takes | |
1325 | * care of that and eventually lets the expiry checks | |
1326 | * run again. | |
1327 | */ | |
1328 | } while (!posix_cpu_timers_enable_work(tsk, start)); | |
1da177e4 | 1329 | |
bb34d92f | 1330 | /* |
1fb497dd | 1331 | * We must release sighand lock before taking any timer's lock. |
bb34d92f FM |
1332 | * There is a potential race with timer deletion here, as the |
1333 | * siglock now protects our private firing list. We have set | |
1334 | * the firing flag in each timer, so that a deletion attempt | |
1335 | * that gets the timer lock before we do will give it up and | |
1336 | * spin until we've taken care of that timer below. | |
1337 | */ | |
0bdd2ed4 | 1338 | unlock_task_sighand(tsk, &flags); |
1da177e4 LT |
1339 | |
1340 | /* | |
1341 | * Now that all the timers on our list have the firing flag, | |
25985edc | 1342 | * no one will touch their list entries but us. We'll take |
1da177e4 LT |
1343 | * each timer's lock before clearing its firing flag, so no |
1344 | * timer call will interfere. | |
1345 | */ | |
60bda037 | 1346 | list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) { |
6e85c5ba HS |
1347 | int cpu_firing; |
1348 | ||
1fb497dd TG |
1349 | /* |
1350 | * spin_lock() is sufficient here even independent of the | |
1351 | * expiry context. If expiry happens in hard interrupt | |
1352 | * context it's obvious. For task work context it's safe | |
1353 | * because all other operations on timer::it_lock happen in | |
1354 | * task context (syscall or exit). | |
1355 | */ | |
1da177e4 | 1356 | spin_lock(&timer->it_lock); |
60bda037 | 1357 | list_del_init(&timer->it.cpu.elist); |
6e85c5ba | 1358 | cpu_firing = timer->it.cpu.firing; |
1da177e4 LT |
1359 | timer->it.cpu.firing = 0; |
1360 | /* | |
1361 | * The firing flag is -1 if we collided with a reset | |
1362 | * of the timer, which already reported this | |
1363 | * almost-firing as an overrun. So don't generate an event. | |
1364 | */ | |
6e85c5ba | 1365 | if (likely(cpu_firing >= 0)) |
1da177e4 | 1366 | cpu_timer_fire(timer); |
1da177e4 LT |
1367 | spin_unlock(&timer->it_lock); |
1368 | } | |
820903c7 TG |
1369 | } |
1370 | ||
1371 | /* | |
1372 | * This is called from the timer interrupt handler. The irq handler has | |
1373 | * already updated our counts. We need to check if any timers fire now. | |
1374 | * Interrupts are disabled. | |
1375 | */ | |
1376 | void run_posix_cpu_timers(void) | |
1377 | { | |
1378 | struct task_struct *tsk = current; | |
1379 | ||
1380 | lockdep_assert_irqs_disabled(); | |
1381 | ||
1fb497dd TG |
1382 | /* |
1383 | * If the actual expiry is deferred to task work context and the | |
1384 | * work is already scheduled there is no point to do anything here. | |
1385 | */ | |
1386 | if (posix_cpu_timers_work_scheduled(tsk)) | |
1387 | return; | |
1388 | ||
820903c7 TG |
1389 | /* |
1390 | * The fast path checks that there are no expired thread or thread | |
1391 | * group timers. If that's so, just return. | |
1392 | */ | |
1393 | if (!fastpath_timer_check(tsk)) | |
1394 | return; | |
1395 | ||
820903c7 | 1396 | __run_posix_cpu_timers(tsk); |
1da177e4 LT |
1397 | } |
1398 | ||
1399 | /* | |
f55db609 | 1400 | * Set one of the process-wide special case CPU timers or RLIMIT_CPU. |
f06febc9 | 1401 | * The tsk->sighand->siglock must be held by the caller. |
1da177e4 | 1402 | */ |
1b0dd96d | 1403 | void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid, |
858cf3a8 | 1404 | u64 *newval, u64 *oldval) |
1da177e4 | 1405 | { |
87dc6448 | 1406 | u64 now, *nextevt; |
1da177e4 | 1407 | |
1b0dd96d | 1408 | if (WARN_ON_ONCE(clkid >= CPUCLOCK_SCHED)) |
692117c1 TG |
1409 | return; |
1410 | ||
87dc6448 | 1411 | nextevt = &tsk->signal->posix_cputimers.bases[clkid].nextevt; |
1b0dd96d | 1412 | now = cpu_clock_sample_group(clkid, tsk, true); |
1da177e4 | 1413 | |
5405d005 | 1414 | if (oldval) { |
f55db609 SG |
1415 | /* |
1416 | * We are setting itimer. The *oldval is absolute and we update | |
1417 | * it to be relative, *newval argument is relative and we update | |
1418 | * it to be absolute. | |
1419 | */ | |
64861634 | 1420 | if (*oldval) { |
858cf3a8 | 1421 | if (*oldval <= now) { |
1da177e4 | 1422 | /* Just about to fire. */ |
858cf3a8 | 1423 | *oldval = TICK_NSEC; |
1da177e4 | 1424 | } else { |
858cf3a8 | 1425 | *oldval -= now; |
1da177e4 LT |
1426 | } |
1427 | } | |
1428 | ||
8cd9da85 FW |
1429 | if (*newval) |
1430 | *newval += now; | |
1da177e4 LT |
1431 | } |
1432 | ||
1433 | /* | |
1b0dd96d TG |
1434 | * Update expiration cache if this is the earliest timer. CPUCLOCK_PROF |
1435 | * expiry cache is also used by RLIMIT_CPU!. | |
1da177e4 | 1436 | */ |
2bbdbdae | 1437 | if (*newval < *nextevt) |
87dc6448 | 1438 | *nextevt = *newval; |
b7878300 | 1439 | |
1e4ca26d | 1440 | tick_dep_set_signal(tsk, TICK_DEP_BIT_POSIX_TIMER); |
1da177e4 LT |
1441 | } |
1442 | ||
e4b76555 | 1443 | static int do_cpu_nanosleep(const clockid_t which_clock, int flags, |
343d8fc2 | 1444 | const struct timespec64 *rqtp) |
1da177e4 | 1445 | { |
86a9c446 | 1446 | struct itimerspec64 it; |
343d8fc2 TG |
1447 | struct k_itimer timer; |
1448 | u64 expires; | |
1da177e4 LT |
1449 | int error; |
1450 | ||
1da177e4 LT |
1451 | /* |
1452 | * Set up a temporary timer and then wait for it to go off. | |
1453 | */ | |
1454 | memset(&timer, 0, sizeof timer); | |
1455 | spin_lock_init(&timer.it_lock); | |
1456 | timer.it_clock = which_clock; | |
1457 | timer.it_overrun = -1; | |
1458 | error = posix_cpu_timer_create(&timer); | |
1459 | timer.it_process = current; | |
60bda037 | 1460 | |
1da177e4 | 1461 | if (!error) { |
5f252b32 | 1462 | static struct itimerspec64 zero_it; |
edbeda46 | 1463 | struct restart_block *restart; |
e4b76555 | 1464 | |
edbeda46 | 1465 | memset(&it, 0, sizeof(it)); |
86a9c446 | 1466 | it.it_value = *rqtp; |
1da177e4 LT |
1467 | |
1468 | spin_lock_irq(&timer.it_lock); | |
86a9c446 | 1469 | error = posix_cpu_timer_set(&timer, flags, &it, NULL); |
1da177e4 LT |
1470 | if (error) { |
1471 | spin_unlock_irq(&timer.it_lock); | |
1472 | return error; | |
1473 | } | |
1474 | ||
1475 | while (!signal_pending(current)) { | |
60bda037 | 1476 | if (!cpu_timer_getexpires(&timer.it.cpu)) { |
1da177e4 | 1477 | /* |
e6c42c29 SG |
1478 | * Our timer fired and was reset, below |
1479 | * deletion can not fail. | |
1da177e4 | 1480 | */ |
e6c42c29 | 1481 | posix_cpu_timer_del(&timer); |
1da177e4 LT |
1482 | spin_unlock_irq(&timer.it_lock); |
1483 | return 0; | |
1484 | } | |
1485 | ||
1486 | /* | |
1487 | * Block until cpu_timer_fire (or a signal) wakes us. | |
1488 | */ | |
1489 | __set_current_state(TASK_INTERRUPTIBLE); | |
1490 | spin_unlock_irq(&timer.it_lock); | |
1491 | schedule(); | |
1492 | spin_lock_irq(&timer.it_lock); | |
1493 | } | |
1494 | ||
1495 | /* | |
1496 | * We were interrupted by a signal. | |
1497 | */ | |
60bda037 | 1498 | expires = cpu_timer_getexpires(&timer.it.cpu); |
86a9c446 | 1499 | error = posix_cpu_timer_set(&timer, 0, &zero_it, &it); |
e6c42c29 SG |
1500 | if (!error) { |
1501 | /* | |
1502 | * Timer is now unarmed, deletion can not fail. | |
1503 | */ | |
1504 | posix_cpu_timer_del(&timer); | |
1505 | } | |
1da177e4 LT |
1506 | spin_unlock_irq(&timer.it_lock); |
1507 | ||
e6c42c29 SG |
1508 | while (error == TIMER_RETRY) { |
1509 | /* | |
1510 | * We need to handle case when timer was or is in the | |
1511 | * middle of firing. In other cases we already freed | |
1512 | * resources. | |
1513 | */ | |
1514 | spin_lock_irq(&timer.it_lock); | |
1515 | error = posix_cpu_timer_del(&timer); | |
1516 | spin_unlock_irq(&timer.it_lock); | |
1517 | } | |
1518 | ||
86a9c446 | 1519 | if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) { |
1da177e4 LT |
1520 | /* |
1521 | * It actually did fire already. | |
1522 | */ | |
1523 | return 0; | |
1524 | } | |
1525 | ||
e4b76555 | 1526 | error = -ERESTART_RESTARTBLOCK; |
86a9c446 AV |
1527 | /* |
1528 | * Report back to the user the time still remaining. | |
1529 | */ | |
edbeda46 | 1530 | restart = ¤t->restart_block; |
343d8fc2 | 1531 | restart->nanosleep.expires = expires; |
c0edd7c9 DD |
1532 | if (restart->nanosleep.type != TT_NONE) |
1533 | error = nanosleep_copyout(restart, &it.it_value); | |
e4b76555 TA |
1534 | } |
1535 | ||
1536 | return error; | |
1537 | } | |
1538 | ||
bc2c8ea4 TG |
1539 | static long posix_cpu_nsleep_restart(struct restart_block *restart_block); |
1540 | ||
1541 | static int posix_cpu_nsleep(const clockid_t which_clock, int flags, | |
938e7cf2 | 1542 | const struct timespec64 *rqtp) |
e4b76555 | 1543 | { |
f56141e3 | 1544 | struct restart_block *restart_block = ¤t->restart_block; |
e4b76555 TA |
1545 | int error; |
1546 | ||
1547 | /* | |
1548 | * Diagnose required errors first. | |
1549 | */ | |
1550 | if (CPUCLOCK_PERTHREAD(which_clock) && | |
1551 | (CPUCLOCK_PID(which_clock) == 0 || | |
01a21974 | 1552 | CPUCLOCK_PID(which_clock) == task_pid_vnr(current))) |
e4b76555 TA |
1553 | return -EINVAL; |
1554 | ||
86a9c446 | 1555 | error = do_cpu_nanosleep(which_clock, flags, rqtp); |
e4b76555 TA |
1556 | |
1557 | if (error == -ERESTART_RESTARTBLOCK) { | |
1558 | ||
3751f9f2 | 1559 | if (flags & TIMER_ABSTIME) |
e4b76555 | 1560 | return -ERESTARTNOHAND; |
1da177e4 | 1561 | |
ab8177bc | 1562 | restart_block->nanosleep.clockid = which_clock; |
5abbe51a | 1563 | set_restart_fn(restart_block, posix_cpu_nsleep_restart); |
1da177e4 | 1564 | } |
1da177e4 LT |
1565 | return error; |
1566 | } | |
1567 | ||
bc2c8ea4 | 1568 | static long posix_cpu_nsleep_restart(struct restart_block *restart_block) |
1da177e4 | 1569 | { |
ab8177bc | 1570 | clockid_t which_clock = restart_block->nanosleep.clockid; |
ad196384 | 1571 | struct timespec64 t; |
97735f25 | 1572 | |
ad196384 | 1573 | t = ns_to_timespec64(restart_block->nanosleep.expires); |
97735f25 | 1574 | |
86a9c446 | 1575 | return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t); |
1da177e4 LT |
1576 | } |
1577 | ||
29f1b2b0 ND |
1578 | #define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED) |
1579 | #define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED) | |
1da177e4 | 1580 | |
a924b04d | 1581 | static int process_cpu_clock_getres(const clockid_t which_clock, |
d2e3e0ca | 1582 | struct timespec64 *tp) |
1da177e4 LT |
1583 | { |
1584 | return posix_cpu_clock_getres(PROCESS_CLOCK, tp); | |
1585 | } | |
a924b04d | 1586 | static int process_cpu_clock_get(const clockid_t which_clock, |
3c9c12f4 | 1587 | struct timespec64 *tp) |
1da177e4 LT |
1588 | { |
1589 | return posix_cpu_clock_get(PROCESS_CLOCK, tp); | |
1590 | } | |
1591 | static int process_cpu_timer_create(struct k_itimer *timer) | |
1592 | { | |
1593 | timer->it_clock = PROCESS_CLOCK; | |
1594 | return posix_cpu_timer_create(timer); | |
1595 | } | |
a924b04d | 1596 | static int process_cpu_nsleep(const clockid_t which_clock, int flags, |
938e7cf2 | 1597 | const struct timespec64 *rqtp) |
1da177e4 | 1598 | { |
99e6c0e6 | 1599 | return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp); |
1da177e4 | 1600 | } |
a924b04d | 1601 | static int thread_cpu_clock_getres(const clockid_t which_clock, |
d2e3e0ca | 1602 | struct timespec64 *tp) |
1da177e4 LT |
1603 | { |
1604 | return posix_cpu_clock_getres(THREAD_CLOCK, tp); | |
1605 | } | |
a924b04d | 1606 | static int thread_cpu_clock_get(const clockid_t which_clock, |
3c9c12f4 | 1607 | struct timespec64 *tp) |
1da177e4 LT |
1608 | { |
1609 | return posix_cpu_clock_get(THREAD_CLOCK, tp); | |
1610 | } | |
1611 | static int thread_cpu_timer_create(struct k_itimer *timer) | |
1612 | { | |
1613 | timer->it_clock = THREAD_CLOCK; | |
1614 | return posix_cpu_timer_create(timer); | |
1615 | } | |
1da177e4 | 1616 | |
d3ba5a9a | 1617 | const struct k_clock clock_posix_cpu = { |
819a95fe AV |
1618 | .clock_getres = posix_cpu_clock_getres, |
1619 | .clock_set = posix_cpu_clock_set, | |
1620 | .clock_get_timespec = posix_cpu_clock_get, | |
1621 | .timer_create = posix_cpu_timer_create, | |
1622 | .nsleep = posix_cpu_nsleep, | |
1623 | .timer_set = posix_cpu_timer_set, | |
1624 | .timer_del = posix_cpu_timer_del, | |
1625 | .timer_get = posix_cpu_timer_get, | |
1626 | .timer_rearm = posix_cpu_timer_rearm, | |
1976945e TG |
1627 | }; |
1628 | ||
d3ba5a9a | 1629 | const struct k_clock clock_process = { |
819a95fe AV |
1630 | .clock_getres = process_cpu_clock_getres, |
1631 | .clock_get_timespec = process_cpu_clock_get, | |
1632 | .timer_create = process_cpu_timer_create, | |
1633 | .nsleep = process_cpu_nsleep, | |
d3ba5a9a | 1634 | }; |
1da177e4 | 1635 | |
d3ba5a9a | 1636 | const struct k_clock clock_thread = { |
819a95fe AV |
1637 | .clock_getres = thread_cpu_clock_getres, |
1638 | .clock_get_timespec = thread_cpu_clock_get, | |
1639 | .timer_create = thread_cpu_timer_create, | |
d3ba5a9a | 1640 | }; |