Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Virtual cpu timer based timer functions. |
3 | * | |
27f6b416 | 4 | * Copyright IBM Corp. 2004, 2012 |
1da177e4 LT |
5 | * Author(s): Jan Glauber <jan.glauber@de.ibm.com> |
6 | */ | |
7 | ||
27f6b416 MS |
8 | #include <linux/kernel_stat.h> |
9 | #include <linux/notifier.h> | |
10 | #include <linux/kprobes.h> | |
11 | #include <linux/export.h> | |
1da177e4 | 12 | #include <linux/kernel.h> |
1da177e4 | 13 | #include <linux/timex.h> |
27f6b416 MS |
14 | #include <linux/types.h> |
15 | #include <linux/time.h> | |
39881215 | 16 | #include <linux/cpu.h> |
27f6b416 | 17 | #include <linux/smp.h> |
1da177e4 | 18 | |
5a489b98 | 19 | #include <asm/irq_regs.h> |
76d4e00a | 20 | #include <asm/cputime.h> |
27f6b416 | 21 | #include <asm/vtimer.h> |
d7b250e2 | 22 | #include <asm/irq.h> |
4c1051e3 | 23 | #include "entry.h" |
1da177e4 | 24 | |
27f6b416 | 25 | static void virt_timer_expire(void); |
1da177e4 | 26 | |
e98bbaaf | 27 | DEFINE_PER_CPU(struct s390_idle_data, s390_idle); |
9cfb9b3c | 28 | |
27f6b416 MS |
29 | static LIST_HEAD(virt_timer_list); |
30 | static DEFINE_SPINLOCK(virt_timer_lock); | |
31 | static atomic64_t virt_timer_current; | |
32 | static atomic64_t virt_timer_elapsed; | |
33 | ||
34 | static inline u64 get_vtimer(void) | |
9cfb9b3c | 35 | { |
27f6b416 | 36 | u64 timer; |
9cfb9b3c | 37 | |
27f6b416 | 38 | asm volatile("stpt %0" : "=m" (timer)); |
9cfb9b3c MS |
39 | return timer; |
40 | } | |
41 | ||
27f6b416 | 42 | static inline void set_vtimer(u64 expires) |
9cfb9b3c | 43 | { |
27f6b416 | 44 | u64 timer; |
9cfb9b3c | 45 | |
27f6b416 MS |
46 | asm volatile( |
47 | " stpt %0\n" /* Store current cpu timer value */ | |
48 | " spt %1" /* Set new value imm. afterwards */ | |
49 | : "=m" (timer) : "m" (expires)); | |
9cfb9b3c MS |
50 | S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; |
51 | S390_lowcore.last_update_timer = expires; | |
52 | } | |
53 | ||
27f6b416 MS |
54 | static inline int virt_timer_forward(u64 elapsed) |
55 | { | |
56 | BUG_ON(!irqs_disabled()); | |
57 | ||
58 | if (list_empty(&virt_timer_list)) | |
59 | return 0; | |
60 | elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed); | |
61 | return elapsed >= atomic64_read(&virt_timer_current); | |
62 | } | |
63 | ||
1da177e4 LT |
64 | /* |
65 | * Update process times based on virtual cpu times stored by entry.S | |
66 | * to the lowcore fields user_timer, system_timer & steal_clock. | |
67 | */ | |
27f6b416 | 68 | static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) |
1da177e4 | 69 | { |
aa5e97ce | 70 | struct thread_info *ti = task_thread_info(tsk); |
27f6b416 | 71 | u64 timer, clock, user, system, steal; |
1da177e4 LT |
72 | |
73 | timer = S390_lowcore.last_update_timer; | |
74 | clock = S390_lowcore.last_update_clock; | |
27f6b416 MS |
75 | asm volatile( |
76 | " stpt %0\n" /* Store current cpu timer value */ | |
77 | " stck %1" /* Store current tod clock value */ | |
78 | : "=m" (S390_lowcore.last_update_timer), | |
79 | "=m" (S390_lowcore.last_update_clock)); | |
1da177e4 | 80 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; |
aa5e97ce | 81 | S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; |
1da177e4 | 82 | |
aa5e97ce MS |
83 | user = S390_lowcore.user_timer - ti->user_timer; |
84 | S390_lowcore.steal_timer -= user; | |
85 | ti->user_timer = S390_lowcore.user_timer; | |
86 | account_user_time(tsk, user, user); | |
1da177e4 | 87 | |
aa5e97ce MS |
88 | system = S390_lowcore.system_timer - ti->system_timer; |
89 | S390_lowcore.steal_timer -= system; | |
90 | ti->system_timer = S390_lowcore.system_timer; | |
9cfb9b3c | 91 | account_system_time(tsk, hardirq_offset, system, system); |
1da177e4 | 92 | |
aa5e97ce MS |
93 | steal = S390_lowcore.steal_timer; |
94 | if ((s64) steal > 0) { | |
95 | S390_lowcore.steal_timer = 0; | |
9cfb9b3c | 96 | account_steal_time(steal); |
1da177e4 | 97 | } |
27f6b416 MS |
98 | |
99 | return virt_timer_forward(user + system); | |
1da177e4 LT |
100 | } |
101 | ||
bf9fae9f | 102 | void vtime_task_switch(struct task_struct *prev) |
1f1c12af | 103 | { |
aa5e97ce MS |
104 | struct thread_info *ti; |
105 | ||
106 | do_account_vtime(prev, 0); | |
107 | ti = task_thread_info(prev); | |
108 | ti->user_timer = S390_lowcore.user_timer; | |
109 | ti->system_timer = S390_lowcore.system_timer; | |
baa36046 | 110 | ti = task_thread_info(current); |
aa5e97ce MS |
111 | S390_lowcore.user_timer = ti->user_timer; |
112 | S390_lowcore.system_timer = ti->system_timer; | |
113 | } | |
1f1c12af | 114 | |
aa5e97ce MS |
115 | void account_process_tick(struct task_struct *tsk, int user_tick) |
116 | { | |
27f6b416 MS |
117 | if (do_account_vtime(tsk, HARDIRQ_OFFSET)) |
118 | virt_timer_expire(); | |
1f1c12af MS |
119 | } |
120 | ||
1da177e4 LT |
121 | /* |
122 | * Update process times based on virtual cpu times stored by entry.S | |
123 | * to the lowcore fields user_timer, system_timer & steal_clock. | |
124 | */ | |
bf9fae9f | 125 | void vtime_account(struct task_struct *tsk) |
1da177e4 | 126 | { |
aa5e97ce | 127 | struct thread_info *ti = task_thread_info(tsk); |
27f6b416 | 128 | u64 timer, system; |
1da177e4 LT |
129 | |
130 | timer = S390_lowcore.last_update_timer; | |
9cfb9b3c | 131 | S390_lowcore.last_update_timer = get_vtimer(); |
1da177e4 LT |
132 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; |
133 | ||
aa5e97ce MS |
134 | system = S390_lowcore.system_timer - ti->system_timer; |
135 | S390_lowcore.steal_timer -= system; | |
136 | ti->system_timer = S390_lowcore.system_timer; | |
9cfb9b3c | 137 | account_system_time(tsk, 0, system, system); |
27f6b416 MS |
138 | |
139 | virt_timer_forward(system); | |
1da177e4 | 140 | } |
bf9fae9f | 141 | EXPORT_SYMBOL_GPL(vtime_account); |
1da177e4 | 142 | |
fd25b4c2 | 143 | void vtime_account_system(struct task_struct *tsk) |
11113334 | 144 | __attribute__((alias("vtime_account"))); |
fd25b4c2 | 145 | EXPORT_SYMBOL_GPL(vtime_account_system); |
11113334 | 146 | |
4c1051e3 | 147 | void __kprobes vtime_stop_cpu(void) |
1da177e4 | 148 | { |
9cfb9b3c | 149 | struct s390_idle_data *idle = &__get_cpu_var(s390_idle); |
4c1051e3 MS |
150 | unsigned long long idle_time; |
151 | unsigned long psw_mask; | |
9cfb9b3c | 152 | |
4c1051e3 MS |
153 | trace_hardirqs_on(); |
154 | /* Don't trace preempt off for idle. */ | |
155 | stop_critical_timings(); | |
6377981f | 156 | |
4c1051e3 MS |
157 | /* Wait for external, I/O or machine check interrupt. */ |
158 | psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT | | |
159 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; | |
160 | idle->nohz_delay = 0; | |
161 | ||
162 | /* Call the assembler magic in entry.S */ | |
27f6b416 | 163 | psw_idle(idle, psw_mask); |
4c1051e3 MS |
164 | |
165 | /* Reenable preemption tracer. */ | |
166 | start_critical_timings(); | |
4b7e0706 | 167 | |
4c1051e3 | 168 | /* Account time spent with enabled wait psw loaded as idle time. */ |
e98bbaaf MS |
169 | idle->sequence++; |
170 | smp_wmb(); | |
27f6b416 MS |
171 | idle_time = idle->clock_idle_exit - idle->clock_idle_enter; |
172 | idle->clock_idle_enter = idle->clock_idle_exit = 0ULL; | |
9cfb9b3c | 173 | idle->idle_time += idle_time; |
9cfb9b3c | 174 | idle->idle_count++; |
4c1051e3 | 175 | account_idle_time(idle_time); |
e98bbaaf MS |
176 | smp_wmb(); |
177 | idle->sequence++; | |
1da177e4 LT |
178 | } |
179 | ||
e1c80530 MS |
180 | cputime64_t s390_get_idle_time(int cpu) |
181 | { | |
4c1051e3 MS |
182 | struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); |
183 | unsigned long long now, idle_enter, idle_exit; | |
e98bbaaf | 184 | unsigned int sequence; |
e1c80530 | 185 | |
4c1051e3 MS |
186 | do { |
187 | now = get_clock(); | |
188 | sequence = ACCESS_ONCE(idle->sequence); | |
27f6b416 MS |
189 | idle_enter = ACCESS_ONCE(idle->clock_idle_enter); |
190 | idle_exit = ACCESS_ONCE(idle->clock_idle_exit); | |
4c1051e3 | 191 | } while ((sequence & 1) || (idle->sequence != sequence)); |
27f6b416 | 192 | return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0; |
e1c80530 MS |
193 | } |
194 | ||
1da177e4 LT |
195 | /* |
196 | * Sorted add to a list. List is linear searched until first bigger | |
197 | * element is found. | |
198 | */ | |
199 | static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) | |
200 | { | |
27f6b416 | 201 | struct vtimer_list *tmp; |
1da177e4 | 202 | |
27f6b416 MS |
203 | list_for_each_entry(tmp, head, entry) { |
204 | if (tmp->expires > timer->expires) { | |
205 | list_add_tail(&timer->entry, &tmp->entry); | |
1da177e4 LT |
206 | return; |
207 | } | |
208 | } | |
209 | list_add_tail(&timer->entry, head); | |
210 | } | |
211 | ||
212 | /* | |
27f6b416 | 213 | * Handler for expired virtual CPU timer. |
1da177e4 | 214 | */ |
27f6b416 | 215 | static void virt_timer_expire(void) |
1da177e4 | 216 | { |
27f6b416 MS |
217 | struct vtimer_list *timer, *tmp; |
218 | unsigned long elapsed; | |
219 | LIST_HEAD(cb_list); | |
220 | ||
221 | /* walk timer list, fire all expired timers */ | |
222 | spin_lock(&virt_timer_lock); | |
223 | elapsed = atomic64_read(&virt_timer_elapsed); | |
224 | list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) { | |
225 | if (timer->expires < elapsed) | |
9cfb9b3c | 226 | /* move expired timer to the callback queue */ |
27f6b416 | 227 | list_move_tail(&timer->entry, &cb_list); |
9cfb9b3c | 228 | else |
27f6b416 | 229 | timer->expires -= elapsed; |
1da177e4 | 230 | } |
27f6b416 MS |
231 | if (!list_empty(&virt_timer_list)) { |
232 | timer = list_first_entry(&virt_timer_list, | |
233 | struct vtimer_list, entry); | |
234 | atomic64_set(&virt_timer_current, timer->expires); | |
235 | } | |
236 | atomic64_sub(elapsed, &virt_timer_elapsed); | |
237 | spin_unlock(&virt_timer_lock); | |
238 | ||
239 | /* Do callbacks and recharge periodic timers */ | |
240 | list_for_each_entry_safe(timer, tmp, &cb_list, entry) { | |
241 | list_del_init(&timer->entry); | |
242 | timer->function(timer->data); | |
243 | if (timer->interval) { | |
244 | /* Recharge interval timer */ | |
245 | timer->expires = timer->interval + | |
246 | atomic64_read(&virt_timer_elapsed); | |
247 | spin_lock(&virt_timer_lock); | |
248 | list_add_sorted(timer, &virt_timer_list); | |
249 | spin_unlock(&virt_timer_lock); | |
250 | } | |
4c1051e3 | 251 | } |
1da177e4 LT |
252 | } |
253 | ||
254 | void init_virt_timer(struct vtimer_list *timer) | |
255 | { | |
1da177e4 LT |
256 | timer->function = NULL; |
257 | INIT_LIST_HEAD(&timer->entry); | |
1da177e4 LT |
258 | } |
259 | EXPORT_SYMBOL(init_virt_timer); | |
260 | ||
1da177e4 LT |
261 | static inline int vtimer_pending(struct vtimer_list *timer) |
262 | { | |
27f6b416 | 263 | return !list_empty(&timer->entry); |
1da177e4 LT |
264 | } |
265 | ||
1da177e4 LT |
266 | static void internal_add_vtimer(struct vtimer_list *timer) |
267 | { | |
27f6b416 MS |
268 | if (list_empty(&virt_timer_list)) { |
269 | /* First timer, just program it. */ | |
270 | atomic64_set(&virt_timer_current, timer->expires); | |
271 | atomic64_set(&virt_timer_elapsed, 0); | |
272 | list_add(&timer->entry, &virt_timer_list); | |
9cfb9b3c | 273 | } else { |
27f6b416 MS |
274 | /* Update timer against current base. */ |
275 | timer->expires += atomic64_read(&virt_timer_elapsed); | |
276 | if (likely((s64) timer->expires < | |
277 | (s64) atomic64_read(&virt_timer_current))) | |
9cfb9b3c | 278 | /* The new timer expires before the current timer. */ |
27f6b416 MS |
279 | atomic64_set(&virt_timer_current, timer->expires); |
280 | /* Insert new timer into the list. */ | |
281 | list_add_sorted(timer, &virt_timer_list); | |
1da177e4 | 282 | } |
1da177e4 LT |
283 | } |
284 | ||
27f6b416 | 285 | static void __add_vtimer(struct vtimer_list *timer, int periodic) |
1da177e4 | 286 | { |
27f6b416 MS |
287 | unsigned long flags; |
288 | ||
289 | timer->interval = periodic ? timer->expires : 0; | |
290 | spin_lock_irqsave(&virt_timer_lock, flags); | |
291 | internal_add_vtimer(timer); | |
292 | spin_unlock_irqrestore(&virt_timer_lock, flags); | |
1da177e4 LT |
293 | } |
294 | ||
295 | /* | |
296 | * add_virt_timer - add an oneshot virtual CPU timer | |
297 | */ | |
27f6b416 | 298 | void add_virt_timer(struct vtimer_list *timer) |
1da177e4 | 299 | { |
27f6b416 | 300 | __add_vtimer(timer, 0); |
1da177e4 LT |
301 | } |
302 | EXPORT_SYMBOL(add_virt_timer); | |
303 | ||
304 | /* | |
305 | * add_virt_timer_int - add an interval virtual CPU timer | |
306 | */ | |
27f6b416 | 307 | void add_virt_timer_periodic(struct vtimer_list *timer) |
1da177e4 | 308 | { |
27f6b416 | 309 | __add_vtimer(timer, 1); |
1da177e4 LT |
310 | } |
311 | EXPORT_SYMBOL(add_virt_timer_periodic); | |
312 | ||
27f6b416 | 313 | static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic) |
1da177e4 | 314 | { |
1da177e4 | 315 | unsigned long flags; |
27f6b416 | 316 | int rc; |
1da177e4 | 317 | |
ca366a32 | 318 | BUG_ON(!timer->function); |
1da177e4 | 319 | |
1da177e4 LT |
320 | if (timer->expires == expires && vtimer_pending(timer)) |
321 | return 1; | |
27f6b416 MS |
322 | spin_lock_irqsave(&virt_timer_lock, flags); |
323 | rc = vtimer_pending(timer); | |
324 | if (rc) | |
325 | list_del_init(&timer->entry); | |
326 | timer->interval = periodic ? expires : 0; | |
1da177e4 | 327 | timer->expires = expires; |
1da177e4 | 328 | internal_add_vtimer(timer); |
27f6b416 MS |
329 | spin_unlock_irqrestore(&virt_timer_lock, flags); |
330 | return rc; | |
1da177e4 | 331 | } |
b6ecfa92 JG |
332 | |
333 | /* | |
b6ecfa92 JG |
334 | * returns whether it has modified a pending timer (1) or not (0) |
335 | */ | |
27f6b416 | 336 | int mod_virt_timer(struct vtimer_list *timer, u64 expires) |
b6ecfa92 JG |
337 | { |
338 | return __mod_vtimer(timer, expires, 0); | |
339 | } | |
1da177e4 LT |
340 | EXPORT_SYMBOL(mod_virt_timer); |
341 | ||
b6ecfa92 | 342 | /* |
b6ecfa92 JG |
343 | * returns whether it has modified a pending timer (1) or not (0) |
344 | */ | |
27f6b416 | 345 | int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires) |
b6ecfa92 JG |
346 | { |
347 | return __mod_vtimer(timer, expires, 1); | |
348 | } | |
349 | EXPORT_SYMBOL(mod_virt_timer_periodic); | |
350 | ||
1da177e4 | 351 | /* |
27f6b416 | 352 | * Delete a virtual timer. |
1da177e4 LT |
353 | * |
354 | * returns whether the deleted timer was pending (1) or not (0) | |
355 | */ | |
356 | int del_virt_timer(struct vtimer_list *timer) | |
357 | { | |
358 | unsigned long flags; | |
1da177e4 | 359 | |
1da177e4 LT |
360 | if (!vtimer_pending(timer)) |
361 | return 0; | |
27f6b416 | 362 | spin_lock_irqsave(&virt_timer_lock, flags); |
1da177e4 | 363 | list_del_init(&timer->entry); |
27f6b416 | 364 | spin_unlock_irqrestore(&virt_timer_lock, flags); |
1da177e4 LT |
365 | return 1; |
366 | } | |
367 | EXPORT_SYMBOL(del_virt_timer); | |
368 | ||
369 | /* | |
370 | * Start the virtual CPU timer on the current CPU. | |
371 | */ | |
27f6b416 | 372 | void __cpuinit init_cpu_vtimer(void) |
1da177e4 | 373 | { |
8b646bd7 | 374 | /* set initial cpu timer */ |
27f6b416 | 375 | set_vtimer(VTIMER_MAX_SLICE); |
1da177e4 LT |
376 | } |
377 | ||
39881215 HC |
378 | static int __cpuinit s390_nohz_notify(struct notifier_block *self, |
379 | unsigned long action, void *hcpu) | |
380 | { | |
381 | struct s390_idle_data *idle; | |
382 | long cpu = (long) hcpu; | |
383 | ||
384 | idle = &per_cpu(s390_idle, cpu); | |
1c725922 | 385 | switch (action & ~CPU_TASKS_FROZEN) { |
39881215 | 386 | case CPU_DYING: |
39881215 HC |
387 | idle->nohz_delay = 0; |
388 | default: | |
389 | break; | |
390 | } | |
391 | return NOTIFY_OK; | |
392 | } | |
393 | ||
1da177e4 LT |
394 | void __init vtime_init(void) |
395 | { | |
d54853ef | 396 | /* Enable cpu timer interrupts on the boot cpu. */ |
1da177e4 | 397 | init_cpu_vtimer(); |
39881215 | 398 | cpu_notifier(s390_nohz_notify, 0); |
1da177e4 | 399 | } |