Merge tag 'm68k-for-v6.4-tag1' of git://git.kernel.org/pub/scm/linux/kernel/git/geert...
[linux-block.git] / arch / arm64 / kvm / arch_timer.c
CommitLineData
45051539 1// SPDX-License-Identifier: GPL-2.0-only
53e72406
MZ
2/*
3 * Copyright (C) 2012 ARM Ltd.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
53e72406
MZ
5 */
6
7#include <linux/cpu.h>
53e72406
MZ
8#include <linux/kvm.h>
9#include <linux/kvm_host.h>
10#include <linux/interrupt.h>
b452cb52 11#include <linux/irq.h>
5f592296 12#include <linux/irqdomain.h>
99a1db7a 13#include <linux/uaccess.h>
53e72406 14
372b7c1b 15#include <clocksource/arm_arch_timer.h>
53e72406 16#include <asm/arch_timer.h>
84135d3d 17#include <asm/kvm_emulate.h>
488f94d7 18#include <asm/kvm_hyp.h>
53e72406 19
7275acdf
MZ
20#include <kvm/arm_vgic.h>
21#include <kvm/arm_arch_timer.h>
53e72406 22
e21f0910
CD
23#include "trace.h"
24
53e72406 25static struct timecounter *timecounter;
5ae7f87a 26static unsigned int host_vtimer_irq;
9e01dc76 27static unsigned int host_ptimer_irq;
cabdc5c5 28static u32 host_vtimer_irq_flags;
9e01dc76 29static u32 host_ptimer_irq_flags;
53e72406 30
d60d8b64
CD
31static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
32
85e69ad7
CD
33static const struct kvm_irq_level default_ptimer_irq = {
34 .irq = 30,
35 .level = 1,
36};
37
38static const struct kvm_irq_level default_vtimer_irq = {
39 .irq = 27,
40 .level = 1,
41};
42
b103cc3f
CD
43static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
44static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
45 struct arch_timer_context *timer_ctx);
1c88ab7e 46static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
84135d3d
AP
47static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
48 struct arch_timer_context *timer,
49 enum kvm_arch_timer_regs treg,
50 u64 val);
51static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
52 struct arch_timer_context *timer,
53 enum kvm_arch_timer_regs treg);
9b4a3004 54
41ce82f6
MZ
55u32 timer_get_ctl(struct arch_timer_context *ctxt)
56{
57 struct kvm_vcpu *vcpu = ctxt->vcpu;
58
59 switch(arch_timer_ctx_index(ctxt)) {
60 case TIMER_VTIMER:
61 return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
62 case TIMER_PTIMER:
63 return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0);
64 default:
65 WARN_ON(1);
66 return 0;
67 }
68}
69
70u64 timer_get_cval(struct arch_timer_context *ctxt)
71{
72 struct kvm_vcpu *vcpu = ctxt->vcpu;
73
74 switch(arch_timer_ctx_index(ctxt)) {
75 case TIMER_VTIMER:
76 return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
77 case TIMER_PTIMER:
78 return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
79 default:
80 WARN_ON(1);
81 return 0;
82 }
83}
84
85static u64 timer_get_offset(struct arch_timer_context *ctxt)
86{
47053904
MZ
87 if (ctxt->offset.vm_offset)
88 return *ctxt->offset.vm_offset;
41ce82f6 89
47053904 90 return 0;
41ce82f6
MZ
91}
92
93static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
94{
95 struct kvm_vcpu *vcpu = ctxt->vcpu;
96
97 switch(arch_timer_ctx_index(ctxt)) {
98 case TIMER_VTIMER:
99 __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
100 break;
101 case TIMER_PTIMER:
102 __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
103 break;
104 default:
105 WARN_ON(1);
106 }
107}
108
109static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
110{
111 struct kvm_vcpu *vcpu = ctxt->vcpu;
112
113 switch(arch_timer_ctx_index(ctxt)) {
114 case TIMER_VTIMER:
115 __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
116 break;
117 case TIMER_PTIMER:
118 __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
119 break;
120 default:
121 WARN_ON(1);
122 }
123}
124
125static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
126{
47053904 127 if (!ctxt->offset.vm_offset) {
41ce82f6 128 WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
47053904 129 return;
41ce82f6 130 }
47053904
MZ
131
132 WRITE_ONCE(*ctxt->offset.vm_offset, offset);
41ce82f6
MZ
133}
134
7b6b4631 135u64 kvm_phys_timer_read(void)
53e72406
MZ
136{
137 return timecounter->cc->read(timecounter->cc);
138}
139
bee038a6
CD
140static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
141{
142 if (has_vhe()) {
143 map->direct_vtimer = vcpu_vtimer(vcpu);
144 map->direct_ptimer = vcpu_ptimer(vcpu);
145 map->emul_ptimer = NULL;
146 } else {
147 map->direct_vtimer = vcpu_vtimer(vcpu);
148 map->direct_ptimer = NULL;
149 map->emul_ptimer = vcpu_ptimer(vcpu);
150 }
151
152 trace_kvm_get_timer_map(vcpu->vcpu_id, map);
153}
154
d60d8b64
CD
155static inline bool userspace_irqchip(struct kvm *kvm)
156{
157 return static_branch_unlikely(&userspace_irqchip_in_use) &&
158 unlikely(!irqchip_in_kernel(kvm));
159}
160
8409a06f 161static void soft_timer_start(struct hrtimer *hrt, u64 ns)
53e72406 162{
8409a06f 163 hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
9090825f 164 HRTIMER_MODE_ABS_HARD);
53e72406
MZ
165}
166
8a411b06 167static void soft_timer_cancel(struct hrtimer *hrt)
53e72406 168{
8409a06f 169 hrtimer_cancel(hrt);
53e72406
MZ
170}
171
b103cc3f
CD
172static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
173{
174 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
9e01dc76 175 struct arch_timer_context *ctx;
bee038a6 176 struct timer_map map;
b103cc3f 177
36e5cfd4
CD
178 /*
179 * We may see a timer interrupt after vcpu_put() has been called which
180 * sets the CPU's vcpu pointer to NULL, because even though the timer
9e01dc76 181 * has been disabled in timer_save_state(), the hardware interrupt
36e5cfd4
CD
182 * signal may not have been retired from the interrupt controller yet.
183 */
184 if (!vcpu)
185 return IRQ_HANDLED;
b103cc3f 186
bee038a6
CD
187 get_timer_map(vcpu, &map);
188
9e01dc76 189 if (irq == host_vtimer_irq)
bee038a6 190 ctx = map.direct_vtimer;
9e01dc76 191 else
bee038a6 192 ctx = map.direct_ptimer;
9e01dc76
CD
193
194 if (kvm_timer_should_fire(ctx))
195 kvm_timer_update_irq(vcpu, true, ctx);
b103cc3f 196
d60d8b64
CD
197 if (userspace_irqchip(vcpu->kvm) &&
198 !static_branch_unlikely(&has_gic_active_state))
199 disable_percpu_irq(host_vtimer_irq);
b103cc3f 200
53e72406
MZ
201 return IRQ_HANDLED;
202}
203
daf85a5f
MZ
204static u64 kvm_counter_compute_delta(struct arch_timer_context *timer_ctx,
205 u64 val)
1c5631c7 206{
daf85a5f 207 u64 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
1c5631c7 208
daf85a5f 209 if (now < val) {
1c5631c7
MZ
210 u64 ns;
211
212 ns = cyclecounter_cyc2ns(timecounter->cc,
daf85a5f 213 val - now,
1c5631c7
MZ
214 timecounter->mask,
215 &timecounter->frac);
216 return ns;
217 }
218
219 return 0;
220}
221
daf85a5f
MZ
222static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
223{
224 return kvm_counter_compute_delta(timer_ctx, timer_get_cval(timer_ctx));
225}
226
fb280e97
JL
227static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
228{
bee038a6
CD
229 WARN_ON(timer_ctx && timer_ctx->loaded);
230 return timer_ctx &&
41ce82f6
MZ
231 ((timer_get_ctl(timer_ctx) &
232 (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
fb280e97
JL
233}
234
89f5074c
MZ
235static bool vcpu_has_wfit_active(struct kvm_vcpu *vcpu)
236{
237 return (cpus_have_final_cap(ARM64_HAS_WFXT) &&
eebc538d 238 vcpu_get_flag(vcpu, IN_WFIT));
89f5074c
MZ
239}
240
241static u64 wfit_delay_ns(struct kvm_vcpu *vcpu)
242{
243 struct arch_timer_context *ctx = vcpu_vtimer(vcpu);
244 u64 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
245
246 return kvm_counter_compute_delta(ctx, val);
247}
248
fb280e97
JL
249/*
250 * Returns the earliest expiration time in ns among guest timers.
251 * Note that it will return 0 if none of timers can fire.
252 */
253static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
254{
bee038a6
CD
255 u64 min_delta = ULLONG_MAX;
256 int i;
fb280e97 257
bee038a6
CD
258 for (i = 0; i < NR_KVM_TIMERS; i++) {
259 struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
fb280e97 260
bee038a6
CD
261 WARN(ctx->loaded, "timer %d loaded\n", i);
262 if (kvm_timer_irq_can_fire(ctx))
263 min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
264 }
fb280e97 265
89f5074c
MZ
266 if (vcpu_has_wfit_active(vcpu))
267 min_delta = min(min_delta, wfit_delay_ns(vcpu));
268
fb280e97 269 /* If none of timers can fire, then return 0 */
bee038a6 270 if (min_delta == ULLONG_MAX)
fb280e97
JL
271 return 0;
272
bee038a6 273 return min_delta;
fb280e97
JL
274}
275
14d61fa9 276static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
53e72406
MZ
277{
278 struct arch_timer_cpu *timer;
1c5631c7
MZ
279 struct kvm_vcpu *vcpu;
280 u64 ns;
281
14d61fa9 282 timer = container_of(hrt, struct arch_timer_cpu, bg_timer);
1c5631c7
MZ
283 vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
284
285 /*
286 * Check that the timer has really expired from the guest's
287 * PoV (NTP on the host may have forced it to expire
288 * early). If we should have slept longer, restart it.
289 */
fb280e97 290 ns = kvm_timer_earliest_exp(vcpu);
1c5631c7
MZ
291 if (unlikely(ns)) {
292 hrtimer_forward_now(hrt, ns_to_ktime(ns));
293 return HRTIMER_RESTART;
294 }
295
8a411b06 296 kvm_vcpu_wake_up(vcpu);
53e72406
MZ
297 return HRTIMER_NORESTART;
298}
299
bee038a6 300static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
f2a2129e 301{
bee038a6 302 struct arch_timer_context *ctx;
bbdd52cf
CD
303 struct kvm_vcpu *vcpu;
304 u64 ns;
305
bee038a6
CD
306 ctx = container_of(hrt, struct arch_timer_context, hrtimer);
307 vcpu = ctx->vcpu;
308
309 trace_kvm_timer_hrtimer_expire(ctx);
bbdd52cf
CD
310
311 /*
312 * Check that the timer has really expired from the guest's
313 * PoV (NTP on the host may have forced it to expire
314 * early). If not ready, schedule for a later time.
315 */
bee038a6 316 ns = kvm_timer_compute_delta(ctx);
bbdd52cf
CD
317 if (unlikely(ns)) {
318 hrtimer_forward_now(hrt, ns_to_ktime(ns));
319 return HRTIMER_RESTART;
320 }
321
bee038a6 322 kvm_timer_update_irq(vcpu, true, ctx);
f2a2129e
CD
323 return HRTIMER_NORESTART;
324}
325
1c88ab7e 326static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
1a748478 327{
bee038a6 328 enum kvm_arch_timers index;
a5a1d1c2 329 u64 cval, now;
1a748478 330
bee038a6
CD
331 if (!timer_ctx)
332 return false;
333
bee038a6
CD
334 index = arch_timer_ctx_index(timer_ctx);
335
336 if (timer_ctx->loaded) {
9e01dc76
CD
337 u32 cnt_ctl = 0;
338
339 switch (index) {
340 case TIMER_VTIMER:
fdec2a9e 341 cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
9e01dc76
CD
342 break;
343 case TIMER_PTIMER:
fdec2a9e 344 cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
9e01dc76
CD
345 break;
346 case NR_KVM_TIMERS:
347 /* GCC is braindead */
348 cnt_ctl = 0;
349 break;
350 }
13e59ece 351
13e59ece
CD
352 return (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
353 (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
354 !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
355 }
356
9171fa2e 357 if (!kvm_timer_irq_can_fire(timer_ctx))
1a748478
CD
358 return false;
359
41ce82f6
MZ
360 cval = timer_get_cval(timer_ctx);
361 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
1a748478
CD
362
363 return cval <= now;
364}
365
b57de4ff 366int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1c88ab7e 367{
89f5074c 368 return vcpu_has_wfit_active(vcpu) && wfit_delay_ns(vcpu) == 0;
1c88ab7e
CD
369}
370
d9e13977
AG
371/*
372 * Reflect the timer output level into the kvm_run structure
373 */
374void kvm_timer_update_run(struct kvm_vcpu *vcpu)
375{
376 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
377 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
378 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
379
d9e13977
AG
380 /* Populate the device bitmap with the timer states */
381 regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
382 KVM_ARM_DEV_EL1_PTIMER);
13e59ece 383 if (kvm_timer_should_fire(vtimer))
d9e13977 384 regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
13e59ece 385 if (kvm_timer_should_fire(ptimer))
d9e13977
AG
386 regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
387}
388
9171fa2e
JL
389static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
390 struct arch_timer_context *timer_ctx)
4b4b4512
CD
391{
392 int ret;
4b4b4512 393
9171fa2e
JL
394 timer_ctx->irq.level = new_level;
395 trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
396 timer_ctx->irq.level);
11710dec 397
d60d8b64 398 if (!userspace_irqchip(vcpu->kvm)) {
d9e13977
AG
399 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
400 timer_ctx->irq.irq,
cb3f0ad8
CD
401 timer_ctx->irq.level,
402 timer_ctx);
d9e13977
AG
403 WARN_ON(ret);
404 }
4b4b4512
CD
405}
406
e4e5a865 407/* Only called for a fully emulated timer */
bee038a6 408static void timer_emulate(struct arch_timer_context *ctx)
cda93b7a 409{
bee038a6 410 bool should_fire = kvm_timer_should_fire(ctx);
cda93b7a 411
bee038a6 412 trace_kvm_timer_emulate(ctx, should_fire);
4b4b4512 413
e4e5a865
AJ
414 if (should_fire != ctx->irq.level) {
415 kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
b22e7df2 416 return;
bee038a6 417 }
4b4b4512 418
70450a9f 419 /*
bee038a6
CD
420 * If the timer can fire now, we don't need to have a soft timer
421 * scheduled for the future. If the timer cannot fire at all,
422 * then we also don't need a soft timer.
70450a9f 423 */
4d74ecfa 424 if (should_fire || !kvm_timer_irq_can_fire(ctx))
9e01dc76 425 return;
9e01dc76 426
bee038a6 427 soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
4b4b4512
CD
428}
429
fc6ee952
MZ
430static void set_cntvoff(u64 cntvoff)
431{
432 kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff);
433}
434
9e01dc76 435static void timer_save_state(struct arch_timer_context *ctx)
688c50aa 436{
9e01dc76
CD
437 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
438 enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
b103cc3f
CD
439 unsigned long flags;
440
9e01dc76
CD
441 if (!timer->enabled)
442 return;
443
b103cc3f
CD
444 local_irq_save(flags);
445
bee038a6 446 if (!ctx->loaded)
b103cc3f 447 goto out;
688c50aa 448
9e01dc76
CD
449 switch (index) {
450 case TIMER_VTIMER:
41ce82f6
MZ
451 timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL));
452 timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL));
688c50aa 453
9e01dc76 454 /* Disable the timer */
fdec2a9e 455 write_sysreg_el0(0, SYS_CNTV_CTL);
9e01dc76
CD
456 isb();
457
fc6ee952
MZ
458 /*
459 * The kernel may decide to run userspace after
460 * calling vcpu_put, so we reset cntvoff to 0 to
461 * ensure a consistent read between user accesses to
462 * the virtual counter and kernel access to the
463 * physical counter of non-VHE case.
464 *
465 * For VHE, the virtual counter uses a fixed virtual
466 * offset of zero, so no need to zero CNTVOFF_EL2
467 * register, but this is actually useful when switching
468 * between EL1/vEL2 with NV.
469 *
470 * Do it unconditionally, as this is either unavoidable
471 * or dirt cheap.
472 */
473 set_cntvoff(0);
9e01dc76
CD
474 break;
475 case TIMER_PTIMER:
41ce82f6
MZ
476 timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
477 timer_set_cval(ctx, read_sysreg_el0(SYS_CNTP_CVAL));
9e01dc76
CD
478
479 /* Disable the timer */
fdec2a9e 480 write_sysreg_el0(0, SYS_CNTP_CTL);
9e01dc76
CD
481 isb();
482
483 break;
484 case NR_KVM_TIMERS:
bee038a6 485 BUG();
9e01dc76 486 }
b103cc3f 487
bee038a6
CD
488 trace_kvm_timer_save_state(ctx);
489
490 ctx->loaded = false;
b103cc3f
CD
491out:
492 local_irq_restore(flags);
688c50aa
CD
493}
494
d35268da 495/*
91b99ea7 496 * Schedule the background timer before calling kvm_vcpu_halt, so that this
d35268da
CD
497 * thread is removed from its waitqueue and made runnable when there's a timer
498 * interrupt to handle.
499 */
accb99bc 500static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
d35268da 501{
e604dd5d 502 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
bee038a6
CD
503 struct timer_map map;
504
505 get_timer_map(vcpu, &map);
d35268da 506
d35268da 507 /*
bee038a6 508 * If no timers are capable of raising interrupts (disabled or
d35268da
CD
509 * masked), then there's no more work for us to do.
510 */
bee038a6
CD
511 if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
512 !kvm_timer_irq_can_fire(map.direct_ptimer) &&
89f5074c
MZ
513 !kvm_timer_irq_can_fire(map.emul_ptimer) &&
514 !vcpu_has_wfit_active(vcpu))
d35268da
CD
515 return;
516
fb280e97 517 /*
accb99bc 518 * At least one guest time will expire. Schedule a background timer.
fb280e97
JL
519 * Set the earliest expiration time among the guest timers.
520 */
14d61fa9 521 soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
d35268da
CD
522}
523
accb99bc
CD
524static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
525{
e604dd5d 526 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
accb99bc
CD
527
528 soft_timer_cancel(&timer->bg_timer);
529}
530
9e01dc76 531static void timer_restore_state(struct arch_timer_context *ctx)
688c50aa 532{
9e01dc76
CD
533 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
534 enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
b103cc3f
CD
535 unsigned long flags;
536
9e01dc76
CD
537 if (!timer->enabled)
538 return;
539
b103cc3f
CD
540 local_irq_save(flags);
541
bee038a6 542 if (ctx->loaded)
b103cc3f 543 goto out;
688c50aa 544
9e01dc76
CD
545 switch (index) {
546 case TIMER_VTIMER:
fc6ee952 547 set_cntvoff(timer_get_offset(ctx));
41ce82f6 548 write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL);
9e01dc76 549 isb();
41ce82f6 550 write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
9e01dc76
CD
551 break;
552 case TIMER_PTIMER:
41ce82f6 553 write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL);
688c50aa 554 isb();
41ce82f6 555 write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
9e01dc76
CD
556 break;
557 case NR_KVM_TIMERS:
bee038a6 558 BUG();
688c50aa 559 }
b103cc3f 560
bee038a6
CD
561 trace_kvm_timer_restore_state(ctx);
562
563 ctx->loaded = true;
b103cc3f
CD
564out:
565 local_irq_restore(flags);
688c50aa
CD
566}
567
9e01dc76 568static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
d60d8b64
CD
569{
570 int r;
9e01dc76 571 r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active);
d60d8b64
CD
572 WARN_ON(r);
573}
574
9e01dc76 575static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
53e72406 576{
9e01dc76 577 struct kvm_vcpu *vcpu = ctx->vcpu;
bae561c0
MZ
578 bool phys_active = false;
579
580 /*
581 * Update the timer output so that it is likely to match the
582 * state we're about to restore. If the timer expires between
583 * this point and the register restoration, we'll take the
584 * interrupt anyway.
585 */
586 kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
9b4a3004 587
d60d8b64 588 if (irqchip_in_kernel(vcpu->kvm))
9e01dc76 589 phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq);
bae561c0
MZ
590
591 phys_active |= ctx->irq.level;
592
9e01dc76 593 set_timer_irq_phys_active(ctx, phys_active);
b103cc3f 594}
9b4a3004 595
d60d8b64 596static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
b103cc3f 597{
d60d8b64
CD
598 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
599
6bc21000
CD
600 /*
601 * Update the timer output so that it is likely to match the
602 * state we're about to restore. If the timer expires between
603 * this point and the register restoration, we'll take the
604 * interrupt anyway.
605 */
606 kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
607
d60d8b64
CD
608 /*
609 * When using a userspace irqchip with the architected timers and a
610 * host interrupt controller that doesn't support an active state, we
611 * must still prevent continuously exiting from the guest, and
612 * therefore mask the physical interrupt by disabling it on the host
613 * interrupt controller when the virtual level is high, such that the
614 * guest can make forward progress. Once we detect the output level
615 * being de-asserted, we unmask the interrupt again so that we exit
616 * from the guest when the timer fires.
617 */
618 if (vtimer->irq.level)
619 disable_percpu_irq(host_vtimer_irq);
620 else
621 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
b103cc3f
CD
622}
623
624void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
625{
e604dd5d 626 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
bee038a6 627 struct timer_map map;
b103cc3f
CD
628
629 if (unlikely(!timer->enabled))
630 return;
631
bee038a6
CD
632 get_timer_map(vcpu, &map);
633
9e01dc76 634 if (static_branch_likely(&has_gic_active_state)) {
bee038a6
CD
635 kvm_timer_vcpu_load_gic(map.direct_vtimer);
636 if (map.direct_ptimer)
637 kvm_timer_vcpu_load_gic(map.direct_ptimer);
9e01dc76 638 } else {
d60d8b64 639 kvm_timer_vcpu_load_nogic(vcpu);
9e01dc76 640 }
b103cc3f 641
accb99bc
CD
642 kvm_timer_unblocking(vcpu);
643
bee038a6
CD
644 timer_restore_state(map.direct_vtimer);
645 if (map.direct_ptimer)
646 timer_restore_state(map.direct_ptimer);
647
648 if (map.emul_ptimer)
649 timer_emulate(map.emul_ptimer);
53e72406
MZ
650}
651
d9e13977
AG
652bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
653{
654 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
655 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
656 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
657 bool vlevel, plevel;
658
659 if (likely(irqchip_in_kernel(vcpu->kvm)))
660 return false;
661
662 vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
663 plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
664
13e59ece
CD
665 return kvm_timer_should_fire(vtimer) != vlevel ||
666 kvm_timer_should_fire(ptimer) != plevel;
d9e13977
AG
667}
668
b103cc3f
CD
669void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
670{
e604dd5d 671 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
bee038a6 672 struct timer_map map;
688c50aa 673
b103cc3f
CD
674 if (unlikely(!timer->enabled))
675 return;
676
bee038a6
CD
677 get_timer_map(vcpu, &map);
678
679 timer_save_state(map.direct_vtimer);
680 if (map.direct_ptimer)
681 timer_save_state(map.direct_ptimer);
b103cc3f 682
bbdd52cf 683 /*
bee038a6 684 * Cancel soft timer emulation, because the only case where we
bbdd52cf
CD
685 * need it after a vcpu_put is in the context of a sleeping VCPU, and
686 * in that case we already factor in the deadline for the physical
687 * timer when scheduling the bg_timer.
688 *
689 * In any case, we re-schedule the hrtimer for the physical timer when
690 * coming back to the VCPU thread in kvm_timer_vcpu_load().
691 */
bee038a6
CD
692 if (map.emul_ptimer)
693 soft_timer_cancel(&map.emul_ptimer->hrtimer);
bbdd52cf 694
d92a5d1c 695 if (kvm_vcpu_is_blocking(vcpu))
accb99bc 696 kvm_timer_blocking(vcpu);
b103cc3f
CD
697}
698
4c60e360
CD
699/*
700 * With a userspace irqchip we have to check if the guest de-asserted the
701 * timer and if so, unmask the timer irq signal on the host interrupt
702 * controller to ensure that we see future timer signals.
703 */
704static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
b103cc3f
CD
705{
706 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
707
d60d8b64
CD
708 if (!kvm_timer_should_fire(vtimer)) {
709 kvm_timer_update_irq(vcpu, false, vtimer);
710 if (static_branch_likely(&has_gic_active_state))
9e01dc76 711 set_timer_irq_phys_active(vtimer, false);
d60d8b64
CD
712 else
713 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
b103cc3f 714 }
d9e13977
AG
715}
716
3c5ff0c6 717void kvm_timer_sync_user(struct kvm_vcpu *vcpu)
53e72406 718{
e604dd5d 719 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
d60d8b64
CD
720
721 if (unlikely(!timer->enabled))
722 return;
723
724 if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
725 unmask_vtimer_irq_user(vcpu);
53e72406
MZ
726}
727
85e69ad7 728int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
5ae7f87a 729{
e604dd5d 730 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
bee038a6
CD
731 struct timer_map map;
732
733 get_timer_map(vcpu, &map);
5ae7f87a 734
4ad9e16a
CD
735 /*
736 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
737 * and to 0 for ARMv7. We provide an implementation that always
738 * resets the timer to be disabled and unmasked and is compliant with
739 * the ARMv7 architecture.
740 */
41ce82f6
MZ
741 timer_set_ctl(vcpu_vtimer(vcpu), 0);
742 timer_set_ctl(vcpu_ptimer(vcpu), 0);
bee038a6
CD
743
744 if (timer->enabled) {
745 kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
746 kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu));
747
748 if (irqchip_in_kernel(vcpu->kvm)) {
749 kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq);
750 if (map.direct_ptimer)
751 kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq);
752 }
753 }
4ad9e16a 754
bee038a6
CD
755 if (map.emul_ptimer)
756 soft_timer_cancel(&map.emul_ptimer->hrtimer);
413aa807 757
41a54482 758 return 0;
5ae7f87a
AP
759}
760
53e72406
MZ
761void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
762{
e604dd5d 763 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
85e69ad7
CD
764 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
765 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
53e72406 766
41ce82f6 767 vtimer->vcpu = vcpu;
47053904 768 vtimer->offset.vm_offset = &vcpu->kvm->arch.timer_data.voffset;
41ce82f6
MZ
769 ptimer->vcpu = vcpu;
770
90de943a 771 /* Synchronize cntvoff across all vtimers of a VM. */
47053904 772 timer_set_offset(vtimer, kvm_phys_timer_read());
41ce82f6 773 timer_set_offset(ptimer, 0);
90de943a 774
9090825f 775 hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
14d61fa9 776 timer->bg_timer.function = kvm_bg_timer_expire;
85e69ad7 777
9090825f
TG
778 hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
779 hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
bee038a6
CD
780 vtimer->hrtimer.function = kvm_hrtimer_expire;
781 ptimer->hrtimer.function = kvm_hrtimer_expire;
f2a2129e 782
85e69ad7
CD
783 vtimer->irq.irq = default_vtimer_irq.irq;
784 ptimer->irq.irq = default_ptimer_irq.irq;
bee038a6
CD
785
786 vtimer->host_timer_irq = host_vtimer_irq;
9e01dc76 787 ptimer->host_timer_irq = host_ptimer_irq;
bee038a6
CD
788
789 vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
9e01dc76 790 ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
53e72406
MZ
791}
792
466d27e4 793void kvm_timer_cpu_up(void)
53e72406 794{
cabdc5c5 795 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
466d27e4
MZ
796 if (host_ptimer_irq)
797 enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags);
798}
799
800void kvm_timer_cpu_down(void)
801{
802 disable_percpu_irq(host_vtimer_irq);
803 if (host_ptimer_irq)
804 disable_percpu_irq(host_ptimer_irq);
53e72406
MZ
805}
806
39735a3a
AP
807int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
808{
bee038a6 809 struct arch_timer_context *timer;
bee038a6 810
39735a3a
AP
811 switch (regid) {
812 case KVM_REG_ARM_TIMER_CTL:
bee038a6
CD
813 timer = vcpu_vtimer(vcpu);
814 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
39735a3a
AP
815 break;
816 case KVM_REG_ARM_TIMER_CNT:
bee038a6 817 timer = vcpu_vtimer(vcpu);
47053904 818 timer_set_offset(timer, kvm_phys_timer_read() - value);
39735a3a
AP
819 break;
820 case KVM_REG_ARM_TIMER_CVAL:
bee038a6
CD
821 timer = vcpu_vtimer(vcpu);
822 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
39735a3a 823 break;
5c5196da 824 case KVM_REG_ARM_PTIMER_CTL:
bee038a6
CD
825 timer = vcpu_ptimer(vcpu);
826 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
5c5196da
CD
827 break;
828 case KVM_REG_ARM_PTIMER_CVAL:
bee038a6
CD
829 timer = vcpu_ptimer(vcpu);
830 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
5c5196da
CD
831 break;
832
39735a3a
AP
833 default:
834 return -1;
835 }
4b4b4512 836
39735a3a
AP
837 return 0;
838}
839
5c5196da
CD
840static u64 read_timer_ctl(struct arch_timer_context *timer)
841{
842 /*
843 * Set ISTATUS bit if it's expired.
844 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
845 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
846 * regardless of ENABLE bit for our implementation convenience.
847 */
41ce82f6
MZ
848 u32 ctl = timer_get_ctl(timer);
849
5c5196da 850 if (!kvm_timer_compute_delta(timer))
41ce82f6
MZ
851 ctl |= ARCH_TIMER_CTRL_IT_STAT;
852
853 return ctl;
5c5196da
CD
854}
855
39735a3a
AP
856u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
857{
39735a3a
AP
858 switch (regid) {
859 case KVM_REG_ARM_TIMER_CTL:
84135d3d
AP
860 return kvm_arm_timer_read(vcpu,
861 vcpu_vtimer(vcpu), TIMER_REG_CTL);
39735a3a 862 case KVM_REG_ARM_TIMER_CNT:
84135d3d
AP
863 return kvm_arm_timer_read(vcpu,
864 vcpu_vtimer(vcpu), TIMER_REG_CNT);
39735a3a 865 case KVM_REG_ARM_TIMER_CVAL:
84135d3d
AP
866 return kvm_arm_timer_read(vcpu,
867 vcpu_vtimer(vcpu), TIMER_REG_CVAL);
5c5196da 868 case KVM_REG_ARM_PTIMER_CTL:
84135d3d
AP
869 return kvm_arm_timer_read(vcpu,
870 vcpu_ptimer(vcpu), TIMER_REG_CTL);
5c5196da 871 case KVM_REG_ARM_PTIMER_CNT:
84135d3d 872 return kvm_arm_timer_read(vcpu,
76a5db10 873 vcpu_ptimer(vcpu), TIMER_REG_CNT);
84135d3d
AP
874 case KVM_REG_ARM_PTIMER_CVAL:
875 return kvm_arm_timer_read(vcpu,
876 vcpu_ptimer(vcpu), TIMER_REG_CVAL);
39735a3a
AP
877 }
878 return (u64)-1;
879}
53e72406 880
84135d3d
AP
881static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
882 struct arch_timer_context *timer,
883 enum kvm_arch_timer_regs treg)
884{
885 u64 val;
886
887 switch (treg) {
888 case TIMER_REG_TVAL:
41ce82f6
MZ
889 val = timer_get_cval(timer) - kvm_phys_timer_read() + timer_get_offset(timer);
890 val = lower_32_bits(val);
84135d3d
AP
891 break;
892
893 case TIMER_REG_CTL:
894 val = read_timer_ctl(timer);
895 break;
896
897 case TIMER_REG_CVAL:
41ce82f6 898 val = timer_get_cval(timer);
84135d3d
AP
899 break;
900
901 case TIMER_REG_CNT:
41ce82f6 902 val = kvm_phys_timer_read() - timer_get_offset(timer);
84135d3d
AP
903 break;
904
905 default:
906 BUG();
907 }
908
909 return val;
910}
911
912u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
913 enum kvm_arch_timers tmr,
914 enum kvm_arch_timer_regs treg)
915{
fc6ee952
MZ
916 struct arch_timer_context *timer;
917 struct timer_map map;
84135d3d
AP
918 u64 val;
919
fc6ee952
MZ
920 get_timer_map(vcpu, &map);
921 timer = vcpu_get_timer(vcpu, tmr);
922
923 if (timer == map.emul_ptimer)
924 return kvm_arm_timer_read(vcpu, timer, treg);
925
84135d3d 926 preempt_disable();
fc6ee952 927 timer_save_state(timer);
84135d3d 928
fc6ee952 929 val = kvm_arm_timer_read(vcpu, timer, treg);
84135d3d 930
fc6ee952 931 timer_restore_state(timer);
84135d3d
AP
932 preempt_enable();
933
934 return val;
935}
936
937static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
938 struct arch_timer_context *timer,
939 enum kvm_arch_timer_regs treg,
940 u64 val)
941{
942 switch (treg) {
943 case TIMER_REG_TVAL:
41ce82f6 944 timer_set_cval(timer, kvm_phys_timer_read() - timer_get_offset(timer) + (s32)val);
84135d3d
AP
945 break;
946
947 case TIMER_REG_CTL:
41ce82f6 948 timer_set_ctl(timer, val & ~ARCH_TIMER_CTRL_IT_STAT);
84135d3d
AP
949 break;
950
951 case TIMER_REG_CVAL:
41ce82f6 952 timer_set_cval(timer, val);
84135d3d
AP
953 break;
954
955 default:
956 BUG();
957 }
958}
959
960void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
961 enum kvm_arch_timers tmr,
962 enum kvm_arch_timer_regs treg,
963 u64 val)
964{
fc6ee952
MZ
965 struct arch_timer_context *timer;
966 struct timer_map map;
84135d3d 967
fc6ee952
MZ
968 get_timer_map(vcpu, &map);
969 timer = vcpu_get_timer(vcpu, tmr);
970 if (timer == map.emul_ptimer) {
971 soft_timer_cancel(&timer->hrtimer);
972 kvm_arm_timer_write(vcpu, timer, treg, val);
973 timer_emulate(timer);
974 } else {
975 preempt_disable();
976 timer_save_state(timer);
977 kvm_arm_timer_write(vcpu, timer, treg, val);
978 timer_restore_state(timer);
979 preempt_enable();
980 }
84135d3d
AP
981}
982
5f592296
MZ
983static int timer_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
984{
985 if (vcpu)
986 irqd_set_forwarded_to_vcpu(d);
987 else
988 irqd_clr_forwarded_to_vcpu(d);
989
990 return 0;
991}
992
993static int timer_irq_set_irqchip_state(struct irq_data *d,
994 enum irqchip_irq_state which, bool val)
995{
996 if (which != IRQCHIP_STATE_ACTIVE || !irqd_is_forwarded_to_vcpu(d))
997 return irq_chip_set_parent_state(d, which, val);
998
999 if (val)
1000 irq_chip_mask_parent(d);
1001 else
1002 irq_chip_unmask_parent(d);
1003
1004 return 0;
1005}
1006
1007static void timer_irq_eoi(struct irq_data *d)
1008{
1009 if (!irqd_is_forwarded_to_vcpu(d))
1010 irq_chip_eoi_parent(d);
1011}
1012
1013static void timer_irq_ack(struct irq_data *d)
1014{
1015 d = d->parent_data;
1016 if (d->chip->irq_ack)
1017 d->chip->irq_ack(d);
1018}
1019
1020static struct irq_chip timer_chip = {
1021 .name = "KVM",
1022 .irq_ack = timer_irq_ack,
1023 .irq_mask = irq_chip_mask_parent,
1024 .irq_unmask = irq_chip_unmask_parent,
1025 .irq_eoi = timer_irq_eoi,
1026 .irq_set_type = irq_chip_set_type_parent,
1027 .irq_set_vcpu_affinity = timer_irq_set_vcpu_affinity,
1028 .irq_set_irqchip_state = timer_irq_set_irqchip_state,
1029};
1030
1031static int timer_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1032 unsigned int nr_irqs, void *arg)
1033{
1034 irq_hw_number_t hwirq = (uintptr_t)arg;
1035
1036 return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
1037 &timer_chip, NULL);
1038}
1039
1040static void timer_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1041 unsigned int nr_irqs)
1042{
1043}
1044
1045static const struct irq_domain_ops timer_domain_ops = {
1046 .alloc = timer_irq_domain_alloc,
1047 .free = timer_irq_domain_free,
1048};
1049
1050static struct irq_ops arch_timer_irq_ops = {
1051 .get_input_level = kvm_arch_timer_get_input_level,
1052};
1053
2f2f7e39
MZ
1054static void kvm_irq_fixup_flags(unsigned int virq, u32 *flags)
1055{
1056 *flags = irq_get_trigger_type(virq);
1057 if (*flags != IRQF_TRIGGER_HIGH && *flags != IRQF_TRIGGER_LOW) {
1058 kvm_err("Invalid trigger for timer IRQ%d, assuming level low\n",
1059 virq);
1060 *flags = IRQF_TRIGGER_LOW;
1061 }
1062}
1063
1064static int kvm_irq_init(struct arch_timer_kvm_info *info)
1065{
5f592296
MZ
1066 struct irq_domain *domain = NULL;
1067
2f2f7e39
MZ
1068 if (info->virtual_irq <= 0) {
1069 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
1070 info->virtual_irq);
1071 return -ENODEV;
1072 }
1073
1074 host_vtimer_irq = info->virtual_irq;
1075 kvm_irq_fixup_flags(host_vtimer_irq, &host_vtimer_irq_flags);
1076
5f592296
MZ
1077 if (kvm_vgic_global_state.no_hw_deactivation) {
1078 struct fwnode_handle *fwnode;
1079 struct irq_data *data;
1080
1081 fwnode = irq_domain_alloc_named_fwnode("kvm-timer");
1082 if (!fwnode)
1083 return -ENOMEM;
1084
1085 /* Assume both vtimer and ptimer in the same parent */
1086 data = irq_get_irq_data(host_vtimer_irq);
1087 domain = irq_domain_create_hierarchy(data->domain, 0,
1088 NR_KVM_TIMERS, fwnode,
1089 &timer_domain_ops, NULL);
1090 if (!domain) {
1091 irq_domain_free_fwnode(fwnode);
1092 return -ENOMEM;
1093 }
1094
1095 arch_timer_irq_ops.flags |= VGIC_IRQ_SW_RESAMPLE;
1096 WARN_ON(irq_domain_push_irq(domain, host_vtimer_irq,
1097 (void *)TIMER_VTIMER));
1098 }
1099
2f2f7e39
MZ
1100 if (info->physical_irq > 0) {
1101 host_ptimer_irq = info->physical_irq;
1102 kvm_irq_fixup_flags(host_ptimer_irq, &host_ptimer_irq_flags);
5f592296
MZ
1103
1104 if (domain)
1105 WARN_ON(irq_domain_push_irq(domain, host_ptimer_irq,
1106 (void *)TIMER_PTIMER));
2f2f7e39
MZ
1107 }
1108
1109 return 0;
1110}
1111
8d20bd63 1112int __init kvm_timer_hyp_init(bool has_gic)
53e72406 1113{
29c2d6ff 1114 struct arch_timer_kvm_info *info;
53e72406
MZ
1115 int err;
1116
29c2d6ff
JG
1117 info = arch_timer_get_kvm_info();
1118 timecounter = &info->timecounter;
53e72406 1119
8e1a0476
CD
1120 if (!timecounter->cc) {
1121 kvm_err("kvm_arch_timer: uninitialized timecounter\n");
1122 return -ENODEV;
1123 }
1124
2f2f7e39
MZ
1125 err = kvm_irq_init(info);
1126 if (err)
1127 return err;
53e72406 1128
2f2f7e39 1129 /* First, do the virtual EL1 timer irq */
cabdc5c5 1130
29c2d6ff 1131 err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
9e01dc76 1132 "kvm guest vtimer", kvm_get_running_vcpus());
53e72406 1133 if (err) {
9e01dc76 1134 kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
29c2d6ff 1135 host_vtimer_irq, err);
5d947a14 1136 return err;
53e72406
MZ
1137 }
1138
f384dcfe
MZ
1139 if (has_gic) {
1140 err = irq_set_vcpu_affinity(host_vtimer_irq,
1141 kvm_get_running_vcpus());
1142 if (err) {
1143 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1144 goto out_free_irq;
1145 }
d60d8b64
CD
1146
1147 static_branch_enable(&has_gic_active_state);
40f4cba9
CD
1148 }
1149
76600428 1150 kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
53e72406 1151
9e01dc76
CD
1152 /* Now let's do the physical EL1 timer irq */
1153
1154 if (info->physical_irq > 0) {
9e01dc76
CD
1155 err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
1156 "kvm guest ptimer", kvm_get_running_vcpus());
1157 if (err) {
1158 kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
1159 host_ptimer_irq, err);
1160 return err;
1161 }
1162
1163 if (has_gic) {
1164 err = irq_set_vcpu_affinity(host_ptimer_irq,
1165 kvm_get_running_vcpus());
1166 if (err) {
1167 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1168 goto out_free_irq;
1169 }
1170 }
1171
1172 kvm_debug("physical timer IRQ%d\n", host_ptimer_irq);
1173 } else if (has_vhe()) {
1174 kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
1175 info->physical_irq);
1176 err = -ENODEV;
1177 goto out_free_irq;
1178 }
1179
40f4cba9
CD
1180 return 0;
1181out_free_irq:
1182 free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
53e72406
MZ
1183 return err;
1184}
1185
1186void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
1187{
e604dd5d 1188 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
53e72406 1189
8a411b06 1190 soft_timer_cancel(&timer->bg_timer);
53e72406
MZ
1191}
1192
abcb851d 1193static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
99a1db7a 1194{
46808a4c
MZ
1195 int vtimer_irq, ptimer_irq, ret;
1196 unsigned long i;
99a1db7a 1197
99a1db7a 1198 vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
abcb851d
CD
1199 ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
1200 if (ret)
1201 return false;
99a1db7a 1202
abcb851d
CD
1203 ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
1204 ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
1205 if (ret)
99a1db7a
CD
1206 return false;
1207
abcb851d 1208 kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
99a1db7a
CD
1209 if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
1210 vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
1211 return false;
1212 }
1213
1214 return true;
1215}
1216
4c60e360
CD
1217bool kvm_arch_timer_get_input_level(int vintid)
1218{
7495e22b 1219 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
4c60e360
CD
1220 struct arch_timer_context *timer;
1221
efedd01d
MZ
1222 if (WARN(!vcpu, "No vcpu context!\n"))
1223 return false;
1224
4c60e360
CD
1225 if (vintid == vcpu_vtimer(vcpu)->irq.irq)
1226 timer = vcpu_vtimer(vcpu);
9e01dc76
CD
1227 else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
1228 timer = vcpu_ptimer(vcpu);
4c60e360 1229 else
9e01dc76 1230 BUG();
4c60e360 1231
4c60e360
CD
1232 return kvm_timer_should_fire(timer);
1233}
1234
41a54482 1235int kvm_timer_enable(struct kvm_vcpu *vcpu)
53e72406 1236{
e604dd5d 1237 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
bee038a6 1238 struct timer_map map;
41a54482
CD
1239 int ret;
1240
1241 if (timer->enabled)
1242 return 0;
1243
d9e13977
AG
1244 /* Without a VGIC we do not map virtual IRQs to physical IRQs */
1245 if (!irqchip_in_kernel(vcpu->kvm))
1246 goto no_vgic;
1247
f16570ba
AE
1248 /*
1249 * At this stage, we have the guarantee that the vgic is both
1250 * available and initialized.
1251 */
abcb851d 1252 if (!timer_irqs_are_valid(vcpu)) {
99a1db7a
CD
1253 kvm_debug("incorrectly configured timer irqs\n");
1254 return -EINVAL;
1255 }
1256
bee038a6
CD
1257 get_timer_map(vcpu, &map);
1258
1259 ret = kvm_vgic_map_phys_irq(vcpu,
1260 map.direct_vtimer->host_timer_irq,
1261 map.direct_vtimer->irq.irq,
db75f1a3 1262 &arch_timer_irq_ops);
41a54482
CD
1263 if (ret)
1264 return ret;
1265
bee038a6
CD
1266 if (map.direct_ptimer) {
1267 ret = kvm_vgic_map_phys_irq(vcpu,
1268 map.direct_ptimer->host_timer_irq,
1269 map.direct_ptimer->irq.irq,
db75f1a3 1270 &arch_timer_irq_ops);
9e01dc76
CD
1271 }
1272
bee038a6
CD
1273 if (ret)
1274 return ret;
1275
d9e13977 1276no_vgic:
fd5ebf99 1277 timer->enabled = 1;
41a54482 1278 return 0;
05971120 1279}
53e72406 1280
488f94d7 1281/*
9e01dc76
CD
1282 * On VHE system, we only need to configure the EL2 timer trap register once,
1283 * not for every world switch.
488f94d7
JL
1284 * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
1285 * and this makes those bits have no effect for the host kernel execution.
1286 */
1287void kvm_timer_init_vhe(void)
1288{
1289 /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
1290 u32 cnthctl_shift = 10;
1291 u64 val;
1292
1293 /*
9e01dc76
CD
1294 * VHE systems allow the guest direct access to the EL1 physical
1295 * timer/counter.
488f94d7
JL
1296 */
1297 val = read_sysreg(cnthctl_el2);
9e01dc76 1298 val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
488f94d7
JL
1299 val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
1300 write_sysreg(val, cnthctl_el2);
1301}
99a1db7a
CD
1302
1303static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
1304{
1305 struct kvm_vcpu *vcpu;
46808a4c 1306 unsigned long i;
99a1db7a
CD
1307
1308 kvm_for_each_vcpu(i, vcpu, kvm) {
1309 vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
1310 vcpu_ptimer(vcpu)->irq.irq = ptimer_irq;
1311 }
1312}
1313
1314int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1315{
1316 int __user *uaddr = (int __user *)(long)attr->addr;
1317 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
1318 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
1319 int irq;
1320
1321 if (!irqchip_in_kernel(vcpu->kvm))
1322 return -EINVAL;
1323
1324 if (get_user(irq, uaddr))
1325 return -EFAULT;
1326
1327 if (!(irq_is_ppi(irq)))
1328 return -EINVAL;
1329
1330 if (vcpu->arch.timer_cpu.enabled)
1331 return -EBUSY;
1332
1333 switch (attr->attr) {
1334 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1335 set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
1336 break;
1337 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1338 set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
1339 break;
1340 default:
1341 return -ENXIO;
1342 }
1343
1344 return 0;
1345}
1346
1347int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1348{
1349 int __user *uaddr = (int __user *)(long)attr->addr;
1350 struct arch_timer_context *timer;
1351 int irq;
1352
1353 switch (attr->attr) {
1354 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1355 timer = vcpu_vtimer(vcpu);
1356 break;
1357 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1358 timer = vcpu_ptimer(vcpu);
1359 break;
1360 default:
1361 return -ENXIO;
1362 }
1363
1364 irq = timer->irq.irq;
1365 return put_user(irq, uaddr);
1366}
1367
1368int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1369{
1370 switch (attr->attr) {
1371 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1372 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1373 return 0;
1374 }
1375
1376 return -ENXIO;
1377}