Commit | Line | Data |
---|---|---|
53e72406 MZ |
1 | /* |
2 | * Copyright (C) 2012 ARM Ltd. | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
17 | */ | |
18 | ||
19 | #include <linux/cpu.h> | |
53e72406 MZ |
20 | #include <linux/kvm.h> |
21 | #include <linux/kvm_host.h> | |
22 | #include <linux/interrupt.h> | |
b452cb52 | 23 | #include <linux/irq.h> |
99a1db7a | 24 | #include <linux/uaccess.h> |
53e72406 | 25 | |
372b7c1b | 26 | #include <clocksource/arm_arch_timer.h> |
53e72406 | 27 | #include <asm/arch_timer.h> |
488f94d7 | 28 | #include <asm/kvm_hyp.h> |
53e72406 | 29 | |
7275acdf MZ |
30 | #include <kvm/arm_vgic.h> |
31 | #include <kvm/arm_arch_timer.h> | |
53e72406 | 32 | |
e21f0910 CD |
33 | #include "trace.h" |
34 | ||
53e72406 | 35 | static struct timecounter *timecounter; |
5ae7f87a | 36 | static unsigned int host_vtimer_irq; |
cabdc5c5 | 37 | static u32 host_vtimer_irq_flags; |
53e72406 | 38 | |
85e69ad7 CD |
39 | static const struct kvm_irq_level default_ptimer_irq = { |
40 | .irq = 30, | |
41 | .level = 1, | |
42 | }; | |
43 | ||
44 | static const struct kvm_irq_level default_vtimer_irq = { | |
45 | .irq = 27, | |
46 | .level = 1, | |
47 | }; | |
48 | ||
b103cc3f CD |
49 | static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx); |
50 | static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, | |
51 | struct arch_timer_context *timer_ctx); | |
1c88ab7e | 52 | static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx); |
9b4a3004 | 53 | |
7b6b4631 | 54 | u64 kvm_phys_timer_read(void) |
53e72406 MZ |
55 | { |
56 | return timecounter->cc->read(timecounter->cc); | |
57 | } | |
58 | ||
8409a06f | 59 | static void soft_timer_start(struct hrtimer *hrt, u64 ns) |
53e72406 | 60 | { |
8409a06f | 61 | hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns), |
53e72406 MZ |
62 | HRTIMER_MODE_ABS); |
63 | } | |
64 | ||
8409a06f | 65 | static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work) |
53e72406 | 66 | { |
8409a06f | 67 | hrtimer_cancel(hrt); |
f2a2129e CD |
68 | if (work) |
69 | cancel_work_sync(work); | |
53e72406 MZ |
70 | } |
71 | ||
b103cc3f | 72 | static void kvm_vtimer_update_mask_user(struct kvm_vcpu *vcpu) |
53e72406 | 73 | { |
b103cc3f | 74 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
53e72406 MZ |
75 | |
76 | /* | |
b103cc3f CD |
77 | * When using a userspace irqchip with the architected timers, we must |
78 | * prevent continuously exiting from the guest, and therefore mask the | |
79 | * physical interrupt by disabling it on the host interrupt controller | |
80 | * when the virtual level is high, such that the guest can make | |
81 | * forward progress. Once we detect the output level being | |
82 | * de-asserted, we unmask the interrupt again so that we exit from the | |
83 | * guest when the timer fires. | |
53e72406 | 84 | */ |
b103cc3f CD |
85 | if (vtimer->irq.level) |
86 | disable_percpu_irq(host_vtimer_irq); | |
87 | else | |
88 | enable_percpu_irq(host_vtimer_irq, 0); | |
89 | } | |
90 | ||
91 | static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) | |
92 | { | |
93 | struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; | |
94 | struct arch_timer_context *vtimer; | |
36e5cfd4 | 95 | u32 cnt_ctl; |
b103cc3f | 96 | |
36e5cfd4 CD |
97 | /* |
98 | * We may see a timer interrupt after vcpu_put() has been called which | |
99 | * sets the CPU's vcpu pointer to NULL, because even though the timer | |
100 | * has been disabled in vtimer_save_state(), the hardware interrupt | |
101 | * signal may not have been retired from the interrupt controller yet. | |
102 | */ | |
103 | if (!vcpu) | |
104 | return IRQ_HANDLED; | |
b103cc3f | 105 | |
36e5cfd4 | 106 | vtimer = vcpu_vtimer(vcpu); |
b103cc3f | 107 | if (!vtimer->irq.level) { |
36e5cfd4 CD |
108 | cnt_ctl = read_sysreg_el0(cntv_ctl); |
109 | cnt_ctl &= ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT | | |
110 | ARCH_TIMER_CTRL_IT_MASK; | |
111 | if (cnt_ctl == (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT)) | |
b103cc3f CD |
112 | kvm_timer_update_irq(vcpu, true, vtimer); |
113 | } | |
114 | ||
115 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) | |
116 | kvm_vtimer_update_mask_user(vcpu); | |
117 | ||
53e72406 MZ |
118 | return IRQ_HANDLED; |
119 | } | |
120 | ||
1a748478 CD |
121 | /* |
122 | * Work function for handling the backup timer that we schedule when a vcpu is | |
123 | * no longer running, but had a timer programmed to fire in the future. | |
124 | */ | |
53e72406 MZ |
125 | static void kvm_timer_inject_irq_work(struct work_struct *work) |
126 | { | |
127 | struct kvm_vcpu *vcpu; | |
128 | ||
129 | vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); | |
1c5631c7 | 130 | |
1a748478 CD |
131 | /* |
132 | * If the vcpu is blocked we want to wake it up so that it will see | |
133 | * the timer has expired when entering the guest. | |
134 | */ | |
1b6502e5 | 135 | kvm_vcpu_wake_up(vcpu); |
53e72406 MZ |
136 | } |
137 | ||
9171fa2e | 138 | static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx) |
1c5631c7 | 139 | { |
a5a1d1c2 | 140 | u64 cval, now; |
1c5631c7 | 141 | |
9171fa2e JL |
142 | cval = timer_ctx->cnt_cval; |
143 | now = kvm_phys_timer_read() - timer_ctx->cntvoff; | |
1c5631c7 MZ |
144 | |
145 | if (now < cval) { | |
146 | u64 ns; | |
147 | ||
148 | ns = cyclecounter_cyc2ns(timecounter->cc, | |
149 | cval - now, | |
150 | timecounter->mask, | |
151 | &timecounter->frac); | |
152 | return ns; | |
153 | } | |
154 | ||
155 | return 0; | |
156 | } | |
157 | ||
fb280e97 JL |
158 | static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx) |
159 | { | |
160 | return !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) && | |
161 | (timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE); | |
162 | } | |
163 | ||
164 | /* | |
165 | * Returns the earliest expiration time in ns among guest timers. | |
166 | * Note that it will return 0 if none of timers can fire. | |
167 | */ | |
168 | static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu) | |
169 | { | |
170 | u64 min_virt = ULLONG_MAX, min_phys = ULLONG_MAX; | |
171 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | |
172 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | |
173 | ||
174 | if (kvm_timer_irq_can_fire(vtimer)) | |
175 | min_virt = kvm_timer_compute_delta(vtimer); | |
176 | ||
177 | if (kvm_timer_irq_can_fire(ptimer)) | |
178 | min_phys = kvm_timer_compute_delta(ptimer); | |
179 | ||
180 | /* If none of timers can fire, then return 0 */ | |
181 | if ((min_virt == ULLONG_MAX) && (min_phys == ULLONG_MAX)) | |
182 | return 0; | |
183 | ||
184 | return min(min_virt, min_phys); | |
185 | } | |
186 | ||
14d61fa9 | 187 | static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt) |
53e72406 MZ |
188 | { |
189 | struct arch_timer_cpu *timer; | |
1c5631c7 MZ |
190 | struct kvm_vcpu *vcpu; |
191 | u64 ns; | |
192 | ||
14d61fa9 | 193 | timer = container_of(hrt, struct arch_timer_cpu, bg_timer); |
1c5631c7 MZ |
194 | vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu); |
195 | ||
196 | /* | |
197 | * Check that the timer has really expired from the guest's | |
198 | * PoV (NTP on the host may have forced it to expire | |
199 | * early). If we should have slept longer, restart it. | |
200 | */ | |
fb280e97 | 201 | ns = kvm_timer_earliest_exp(vcpu); |
1c5631c7 MZ |
202 | if (unlikely(ns)) { |
203 | hrtimer_forward_now(hrt, ns_to_ktime(ns)); | |
204 | return HRTIMER_RESTART; | |
205 | } | |
206 | ||
3706feac | 207 | schedule_work(&timer->expired); |
53e72406 MZ |
208 | return HRTIMER_NORESTART; |
209 | } | |
210 | ||
f2a2129e CD |
211 | static enum hrtimer_restart kvm_phys_timer_expire(struct hrtimer *hrt) |
212 | { | |
bbdd52cf CD |
213 | struct arch_timer_context *ptimer; |
214 | struct arch_timer_cpu *timer; | |
215 | struct kvm_vcpu *vcpu; | |
216 | u64 ns; | |
217 | ||
218 | timer = container_of(hrt, struct arch_timer_cpu, phys_timer); | |
219 | vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu); | |
220 | ptimer = vcpu_ptimer(vcpu); | |
221 | ||
222 | /* | |
223 | * Check that the timer has really expired from the guest's | |
224 | * PoV (NTP on the host may have forced it to expire | |
225 | * early). If not ready, schedule for a later time. | |
226 | */ | |
227 | ns = kvm_timer_compute_delta(ptimer); | |
228 | if (unlikely(ns)) { | |
229 | hrtimer_forward_now(hrt, ns_to_ktime(ns)); | |
230 | return HRTIMER_RESTART; | |
231 | } | |
232 | ||
233 | kvm_timer_update_irq(vcpu, true, ptimer); | |
f2a2129e CD |
234 | return HRTIMER_NORESTART; |
235 | } | |
236 | ||
1c88ab7e | 237 | static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) |
1a748478 | 238 | { |
a5a1d1c2 | 239 | u64 cval, now; |
1a748478 | 240 | |
9171fa2e | 241 | if (!kvm_timer_irq_can_fire(timer_ctx)) |
1a748478 CD |
242 | return false; |
243 | ||
9171fa2e JL |
244 | cval = timer_ctx->cnt_cval; |
245 | now = kvm_phys_timer_read() - timer_ctx->cntvoff; | |
1a748478 CD |
246 | |
247 | return cval <= now; | |
248 | } | |
249 | ||
1c88ab7e CD |
250 | bool kvm_timer_is_pending(struct kvm_vcpu *vcpu) |
251 | { | |
252 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | |
253 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | |
254 | ||
255 | if (vtimer->irq.level || ptimer->irq.level) | |
256 | return true; | |
257 | ||
258 | /* | |
259 | * When this is called from withing the wait loop of kvm_vcpu_block(), | |
260 | * the software view of the timer state is up to date (timer->loaded | |
261 | * is false), and so we can simply check if the timer should fire now. | |
262 | */ | |
263 | if (!vtimer->loaded && kvm_timer_should_fire(vtimer)) | |
264 | return true; | |
265 | ||
266 | return kvm_timer_should_fire(ptimer); | |
267 | } | |
268 | ||
d9e13977 AG |
269 | /* |
270 | * Reflect the timer output level into the kvm_run structure | |
271 | */ | |
272 | void kvm_timer_update_run(struct kvm_vcpu *vcpu) | |
273 | { | |
274 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | |
275 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | |
276 | struct kvm_sync_regs *regs = &vcpu->run->s.regs; | |
277 | ||
d9e13977 AG |
278 | /* Populate the device bitmap with the timer states */ |
279 | regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER | | |
280 | KVM_ARM_DEV_EL1_PTIMER); | |
281 | if (vtimer->irq.level) | |
282 | regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER; | |
283 | if (ptimer->irq.level) | |
284 | regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER; | |
285 | } | |
286 | ||
9171fa2e JL |
287 | static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, |
288 | struct arch_timer_context *timer_ctx) | |
4b4b4512 CD |
289 | { |
290 | int ret; | |
4b4b4512 | 291 | |
9171fa2e JL |
292 | timer_ctx->irq.level = new_level; |
293 | trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq, | |
294 | timer_ctx->irq.level); | |
11710dec | 295 | |
d9e13977 AG |
296 | if (likely(irqchip_in_kernel(vcpu->kvm))) { |
297 | ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, | |
298 | timer_ctx->irq.irq, | |
cb3f0ad8 CD |
299 | timer_ctx->irq.level, |
300 | timer_ctx); | |
d9e13977 AG |
301 | WARN_ON(ret); |
302 | } | |
4b4b4512 CD |
303 | } |
304 | ||
cda93b7a | 305 | /* Schedule the background timer for the emulated timer. */ |
bbdd52cf | 306 | static void phys_timer_emulate(struct kvm_vcpu *vcpu) |
cda93b7a CD |
307 | { |
308 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
bbdd52cf | 309 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
cda93b7a | 310 | |
bbdd52cf CD |
311 | /* |
312 | * If the timer can fire now we have just raised the IRQ line and we | |
313 | * don't need to have a soft timer scheduled for the future. If the | |
314 | * timer cannot fire at all, then we also don't need a soft timer. | |
315 | */ | |
316 | if (kvm_timer_should_fire(ptimer) || !kvm_timer_irq_can_fire(ptimer)) { | |
317 | soft_timer_cancel(&timer->phys_timer, NULL); | |
cda93b7a | 318 | return; |
bbdd52cf | 319 | } |
cda93b7a | 320 | |
bbdd52cf | 321 | soft_timer_start(&timer->phys_timer, kvm_timer_compute_delta(ptimer)); |
cda93b7a CD |
322 | } |
323 | ||
4b4b4512 | 324 | /* |
bbdd52cf CD |
325 | * Check if there was a change in the timer state, so that we should either |
326 | * raise or lower the line level to the GIC or schedule a background timer to | |
327 | * emulate the physical timer. | |
4b4b4512 | 328 | */ |
b22e7df2 | 329 | static void kvm_timer_update_state(struct kvm_vcpu *vcpu) |
4b4b4512 CD |
330 | { |
331 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
fbb4aeec | 332 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
58e0c973 | 333 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
4b4b4512 | 334 | |
d9e13977 | 335 | if (unlikely(!timer->enabled)) |
b22e7df2 | 336 | return; |
4b4b4512 | 337 | |
9171fa2e JL |
338 | if (kvm_timer_should_fire(vtimer) != vtimer->irq.level) |
339 | kvm_timer_update_irq(vcpu, !vtimer->irq.level, vtimer); | |
b3aff6cc | 340 | |
58e0c973 JL |
341 | if (kvm_timer_should_fire(ptimer) != ptimer->irq.level) |
342 | kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer); | |
bbdd52cf CD |
343 | |
344 | phys_timer_emulate(vcpu); | |
4b4b4512 CD |
345 | } |
346 | ||
b103cc3f | 347 | static void vtimer_save_state(struct kvm_vcpu *vcpu) |
688c50aa CD |
348 | { |
349 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
350 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | |
b103cc3f CD |
351 | unsigned long flags; |
352 | ||
353 | local_irq_save(flags); | |
354 | ||
355 | if (!vtimer->loaded) | |
356 | goto out; | |
688c50aa CD |
357 | |
358 | if (timer->enabled) { | |
359 | vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl); | |
360 | vtimer->cnt_cval = read_sysreg_el0(cntv_cval); | |
361 | } | |
362 | ||
363 | /* Disable the virtual timer */ | |
364 | write_sysreg_el0(0, cntv_ctl); | |
36e5cfd4 | 365 | isb(); |
b103cc3f CD |
366 | |
367 | vtimer->loaded = false; | |
368 | out: | |
369 | local_irq_restore(flags); | |
688c50aa CD |
370 | } |
371 | ||
d35268da CD |
372 | /* |
373 | * Schedule the background timer before calling kvm_vcpu_block, so that this | |
374 | * thread is removed from its waitqueue and made runnable when there's a timer | |
375 | * interrupt to handle. | |
376 | */ | |
377 | void kvm_timer_schedule(struct kvm_vcpu *vcpu) | |
378 | { | |
379 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
9171fa2e | 380 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
fb280e97 | 381 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
d35268da | 382 | |
b103cc3f CD |
383 | vtimer_save_state(vcpu); |
384 | ||
d35268da | 385 | /* |
fb280e97 | 386 | * No need to schedule a background timer if any guest timer has |
d35268da CD |
387 | * already expired, because kvm_vcpu_block will return before putting |
388 | * the thread to sleep. | |
389 | */ | |
fb280e97 | 390 | if (kvm_timer_should_fire(vtimer) || kvm_timer_should_fire(ptimer)) |
d35268da CD |
391 | return; |
392 | ||
393 | /* | |
fb280e97 | 394 | * If both timers are not capable of raising interrupts (disabled or |
d35268da CD |
395 | * masked), then there's no more work for us to do. |
396 | */ | |
fb280e97 | 397 | if (!kvm_timer_irq_can_fire(vtimer) && !kvm_timer_irq_can_fire(ptimer)) |
d35268da CD |
398 | return; |
399 | ||
fb280e97 JL |
400 | /* |
401 | * The guest timers have not yet expired, schedule a background timer. | |
402 | * Set the earliest expiration time among the guest timers. | |
403 | */ | |
14d61fa9 | 404 | soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu)); |
d35268da CD |
405 | } |
406 | ||
b103cc3f | 407 | static void vtimer_restore_state(struct kvm_vcpu *vcpu) |
688c50aa CD |
408 | { |
409 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
410 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | |
b103cc3f CD |
411 | unsigned long flags; |
412 | ||
413 | local_irq_save(flags); | |
414 | ||
415 | if (vtimer->loaded) | |
416 | goto out; | |
688c50aa CD |
417 | |
418 | if (timer->enabled) { | |
419 | write_sysreg_el0(vtimer->cnt_cval, cntv_cval); | |
420 | isb(); | |
421 | write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl); | |
422 | } | |
b103cc3f CD |
423 | |
424 | vtimer->loaded = true; | |
425 | out: | |
426 | local_irq_restore(flags); | |
688c50aa CD |
427 | } |
428 | ||
d35268da CD |
429 | void kvm_timer_unschedule(struct kvm_vcpu *vcpu) |
430 | { | |
431 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
8409a06f | 432 | |
b103cc3f CD |
433 | vtimer_restore_state(vcpu); |
434 | ||
14d61fa9 | 435 | soft_timer_cancel(&timer->bg_timer, &timer->expired); |
d35268da CD |
436 | } |
437 | ||
688c50aa CD |
438 | static void set_cntvoff(u64 cntvoff) |
439 | { | |
440 | u32 low = lower_32_bits(cntvoff); | |
441 | u32 high = upper_32_bits(cntvoff); | |
442 | ||
443 | /* | |
444 | * Since kvm_call_hyp doesn't fully support the ARM PCS especially on | |
445 | * 32-bit systems, but rather passes register by register shifted one | |
446 | * place (we put the function address in r0/x0), we cannot simply pass | |
447 | * a 64-bit value as an argument, but have to split the value in two | |
448 | * 32-bit halves. | |
449 | */ | |
450 | kvm_call_hyp(__kvm_timer_set_cntvoff, low, high); | |
451 | } | |
452 | ||
b103cc3f | 453 | static void kvm_timer_vcpu_load_vgic(struct kvm_vcpu *vcpu) |
53e72406 | 454 | { |
fbb4aeec | 455 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
cff9211e CD |
456 | bool phys_active; |
457 | int ret; | |
53e72406 | 458 | |
fbb4aeec | 459 | phys_active = vtimer->irq.level || |
b103cc3f | 460 | kvm_vgic_map_is_active(vcpu, vtimer->irq.irq); |
9b4a3004 | 461 | |
b452cb52 | 462 | ret = irq_set_irqchip_state(host_vtimer_irq, |
cff9211e CD |
463 | IRQCHIP_STATE_ACTIVE, |
464 | phys_active); | |
465 | WARN_ON(ret); | |
b103cc3f | 466 | } |
9b4a3004 | 467 | |
b103cc3f CD |
468 | static void kvm_timer_vcpu_load_user(struct kvm_vcpu *vcpu) |
469 | { | |
470 | kvm_vtimer_update_mask_user(vcpu); | |
471 | } | |
472 | ||
473 | void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) | |
474 | { | |
475 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
476 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | |
477 | ||
478 | if (unlikely(!timer->enabled)) | |
479 | return; | |
480 | ||
481 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) | |
482 | kvm_timer_vcpu_load_user(vcpu); | |
483 | else | |
484 | kvm_timer_vcpu_load_vgic(vcpu); | |
485 | ||
486 | set_cntvoff(vtimer->cntvoff); | |
487 | ||
488 | vtimer_restore_state(vcpu); | |
489 | ||
bbdd52cf CD |
490 | /* Set the background timer for the physical timer emulation. */ |
491 | phys_timer_emulate(vcpu); | |
53e72406 MZ |
492 | } |
493 | ||
d9e13977 AG |
494 | bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu) |
495 | { | |
496 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | |
497 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | |
498 | struct kvm_sync_regs *sregs = &vcpu->run->s.regs; | |
499 | bool vlevel, plevel; | |
500 | ||
501 | if (likely(irqchip_in_kernel(vcpu->kvm))) | |
502 | return false; | |
503 | ||
504 | vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER; | |
505 | plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER; | |
506 | ||
507 | return vtimer->irq.level != vlevel || | |
508 | ptimer->irq.level != plevel; | |
509 | } | |
510 | ||
b103cc3f CD |
511 | void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) |
512 | { | |
513 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
688c50aa | 514 | |
b103cc3f CD |
515 | if (unlikely(!timer->enabled)) |
516 | return; | |
517 | ||
b103cc3f CD |
518 | vtimer_save_state(vcpu); |
519 | ||
bbdd52cf CD |
520 | /* |
521 | * Cancel the physical timer emulation, because the only case where we | |
522 | * need it after a vcpu_put is in the context of a sleeping VCPU, and | |
523 | * in that case we already factor in the deadline for the physical | |
524 | * timer when scheduling the bg_timer. | |
525 | * | |
526 | * In any case, we re-schedule the hrtimer for the physical timer when | |
527 | * coming back to the VCPU thread in kvm_timer_vcpu_load(). | |
528 | */ | |
529 | soft_timer_cancel(&timer->phys_timer, NULL); | |
530 | ||
b103cc3f CD |
531 | /* |
532 | * The kernel may decide to run userspace after calling vcpu_put, so | |
533 | * we reset cntvoff to 0 to ensure a consistent read between user | |
534 | * accesses to the virtual counter and kernel access to the physical | |
535 | * counter. | |
536 | */ | |
537 | set_cntvoff(0); | |
538 | } | |
539 | ||
540 | static void unmask_vtimer_irq(struct kvm_vcpu *vcpu) | |
541 | { | |
542 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | |
543 | ||
544 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { | |
545 | kvm_vtimer_update_mask_user(vcpu); | |
546 | return; | |
547 | } | |
548 | ||
549 | /* | |
550 | * If the guest disabled the timer without acking the interrupt, then | |
551 | * we must make sure the physical and virtual active states are in | |
552 | * sync by deactivating the physical interrupt, because otherwise we | |
553 | * wouldn't see the next timer interrupt in the host. | |
554 | */ | |
555 | if (!kvm_vgic_map_is_active(vcpu, vtimer->irq.irq)) { | |
556 | int ret; | |
557 | ret = irq_set_irqchip_state(host_vtimer_irq, | |
558 | IRQCHIP_STATE_ACTIVE, | |
559 | false); | |
560 | WARN_ON(ret); | |
561 | } | |
d9e13977 AG |
562 | } |
563 | ||
53e72406 MZ |
564 | /** |
565 | * kvm_timer_sync_hwstate - sync timer state from cpu | |
566 | * @vcpu: The vcpu pointer | |
567 | * | |
d9e13977 | 568 | * Check if any of the timers have expired while we were running in the guest, |
d35268da | 569 | * and inject an interrupt if that was the case. |
53e72406 MZ |
570 | */ |
571 | void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) | |
572 | { | |
b103cc3f | 573 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
53e72406 | 574 | |
4b4b4512 | 575 | /* |
b103cc3f CD |
576 | * If we entered the guest with the vtimer output asserted we have to |
577 | * check if the guest has modified the timer so that we should lower | |
578 | * the line at this point. | |
4b4b4512 | 579 | */ |
b103cc3f CD |
580 | if (vtimer->irq.level) { |
581 | vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl); | |
582 | vtimer->cnt_cval = read_sysreg_el0(cntv_cval); | |
583 | if (!kvm_timer_should_fire(vtimer)) { | |
584 | kvm_timer_update_irq(vcpu, false, vtimer); | |
585 | unmask_vtimer_irq(vcpu); | |
586 | } | |
587 | } | |
53e72406 MZ |
588 | } |
589 | ||
85e69ad7 | 590 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) |
5ae7f87a | 591 | { |
fbb4aeec | 592 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
a91d1855 | 593 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
5ae7f87a | 594 | |
4ad9e16a CD |
595 | /* |
596 | * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8 | |
597 | * and to 0 for ARMv7. We provide an implementation that always | |
598 | * resets the timer to be disabled and unmasked and is compliant with | |
599 | * the ARMv7 architecture. | |
600 | */ | |
fbb4aeec | 601 | vtimer->cnt_ctl = 0; |
a91d1855 | 602 | ptimer->cnt_ctl = 0; |
4b4b4512 | 603 | kvm_timer_update_state(vcpu); |
4ad9e16a | 604 | |
41a54482 | 605 | return 0; |
5ae7f87a AP |
606 | } |
607 | ||
90de943a JL |
608 | /* Make the updates of cntvoff for all vtimer contexts atomic */ |
609 | static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff) | |
610 | { | |
611 | int i; | |
612 | struct kvm *kvm = vcpu->kvm; | |
613 | struct kvm_vcpu *tmp; | |
614 | ||
615 | mutex_lock(&kvm->lock); | |
616 | kvm_for_each_vcpu(i, tmp, kvm) | |
617 | vcpu_vtimer(tmp)->cntvoff = cntvoff; | |
618 | ||
619 | /* | |
620 | * When called from the vcpu create path, the CPU being created is not | |
621 | * included in the loop above, so we just set it here as well. | |
622 | */ | |
623 | vcpu_vtimer(vcpu)->cntvoff = cntvoff; | |
624 | mutex_unlock(&kvm->lock); | |
625 | } | |
626 | ||
53e72406 MZ |
627 | void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) |
628 | { | |
629 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
85e69ad7 CD |
630 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
631 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | |
53e72406 | 632 | |
90de943a JL |
633 | /* Synchronize cntvoff across all vtimers of a VM. */ |
634 | update_vtimer_cntvoff(vcpu, kvm_phys_timer_read()); | |
a91d1855 | 635 | vcpu_ptimer(vcpu)->cntvoff = 0; |
90de943a | 636 | |
53e72406 | 637 | INIT_WORK(&timer->expired, kvm_timer_inject_irq_work); |
14d61fa9 CD |
638 | hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
639 | timer->bg_timer.function = kvm_bg_timer_expire; | |
85e69ad7 | 640 | |
f2a2129e CD |
641 | hrtimer_init(&timer->phys_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
642 | timer->phys_timer.function = kvm_phys_timer_expire; | |
643 | ||
85e69ad7 CD |
644 | vtimer->irq.irq = default_vtimer_irq.irq; |
645 | ptimer->irq.irq = default_ptimer_irq.irq; | |
53e72406 MZ |
646 | } |
647 | ||
648 | static void kvm_timer_init_interrupt(void *info) | |
649 | { | |
cabdc5c5 | 650 | enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); |
53e72406 MZ |
651 | } |
652 | ||
39735a3a AP |
653 | int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) |
654 | { | |
fbb4aeec | 655 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
5c5196da | 656 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
39735a3a AP |
657 | |
658 | switch (regid) { | |
659 | case KVM_REG_ARM_TIMER_CTL: | |
5c5196da | 660 | vtimer->cnt_ctl = value & ~ARCH_TIMER_CTRL_IT_STAT; |
39735a3a AP |
661 | break; |
662 | case KVM_REG_ARM_TIMER_CNT: | |
90de943a | 663 | update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value); |
39735a3a AP |
664 | break; |
665 | case KVM_REG_ARM_TIMER_CVAL: | |
fbb4aeec | 666 | vtimer->cnt_cval = value; |
39735a3a | 667 | break; |
5c5196da CD |
668 | case KVM_REG_ARM_PTIMER_CTL: |
669 | ptimer->cnt_ctl = value & ~ARCH_TIMER_CTRL_IT_STAT; | |
670 | break; | |
671 | case KVM_REG_ARM_PTIMER_CVAL: | |
672 | ptimer->cnt_cval = value; | |
673 | break; | |
674 | ||
39735a3a AP |
675 | default: |
676 | return -1; | |
677 | } | |
4b4b4512 CD |
678 | |
679 | kvm_timer_update_state(vcpu); | |
39735a3a AP |
680 | return 0; |
681 | } | |
682 | ||
5c5196da CD |
683 | static u64 read_timer_ctl(struct arch_timer_context *timer) |
684 | { | |
685 | /* | |
686 | * Set ISTATUS bit if it's expired. | |
687 | * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is | |
688 | * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit | |
689 | * regardless of ENABLE bit for our implementation convenience. | |
690 | */ | |
691 | if (!kvm_timer_compute_delta(timer)) | |
692 | return timer->cnt_ctl | ARCH_TIMER_CTRL_IT_STAT; | |
693 | else | |
694 | return timer->cnt_ctl; | |
695 | } | |
696 | ||
39735a3a AP |
697 | u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) |
698 | { | |
5c5196da | 699 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
fbb4aeec | 700 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
39735a3a AP |
701 | |
702 | switch (regid) { | |
703 | case KVM_REG_ARM_TIMER_CTL: | |
5c5196da | 704 | return read_timer_ctl(vtimer); |
39735a3a | 705 | case KVM_REG_ARM_TIMER_CNT: |
90de943a | 706 | return kvm_phys_timer_read() - vtimer->cntvoff; |
39735a3a | 707 | case KVM_REG_ARM_TIMER_CVAL: |
fbb4aeec | 708 | return vtimer->cnt_cval; |
5c5196da CD |
709 | case KVM_REG_ARM_PTIMER_CTL: |
710 | return read_timer_ctl(ptimer); | |
711 | case KVM_REG_ARM_PTIMER_CVAL: | |
712 | return ptimer->cnt_cval; | |
713 | case KVM_REG_ARM_PTIMER_CNT: | |
714 | return kvm_phys_timer_read(); | |
39735a3a AP |
715 | } |
716 | return (u64)-1; | |
717 | } | |
53e72406 | 718 | |
b3c9950a | 719 | static int kvm_timer_starting_cpu(unsigned int cpu) |
53e72406 | 720 | { |
b3c9950a RC |
721 | kvm_timer_init_interrupt(NULL); |
722 | return 0; | |
53e72406 MZ |
723 | } |
724 | ||
b3c9950a RC |
725 | static int kvm_timer_dying_cpu(unsigned int cpu) |
726 | { | |
727 | disable_percpu_irq(host_vtimer_irq); | |
728 | return 0; | |
729 | } | |
53e72406 | 730 | |
f384dcfe | 731 | int kvm_timer_hyp_init(bool has_gic) |
53e72406 | 732 | { |
29c2d6ff | 733 | struct arch_timer_kvm_info *info; |
53e72406 MZ |
734 | int err; |
735 | ||
29c2d6ff JG |
736 | info = arch_timer_get_kvm_info(); |
737 | timecounter = &info->timecounter; | |
53e72406 | 738 | |
8e1a0476 CD |
739 | if (!timecounter->cc) { |
740 | kvm_err("kvm_arch_timer: uninitialized timecounter\n"); | |
741 | return -ENODEV; | |
742 | } | |
743 | ||
29c2d6ff JG |
744 | if (info->virtual_irq <= 0) { |
745 | kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n", | |
746 | info->virtual_irq); | |
53e72406 MZ |
747 | return -ENODEV; |
748 | } | |
29c2d6ff | 749 | host_vtimer_irq = info->virtual_irq; |
53e72406 | 750 | |
cabdc5c5 MZ |
751 | host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq); |
752 | if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH && | |
753 | host_vtimer_irq_flags != IRQF_TRIGGER_LOW) { | |
754 | kvm_err("Invalid trigger for IRQ%d, assuming level low\n", | |
755 | host_vtimer_irq); | |
756 | host_vtimer_irq_flags = IRQF_TRIGGER_LOW; | |
757 | } | |
758 | ||
29c2d6ff | 759 | err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler, |
53e72406 MZ |
760 | "kvm guest timer", kvm_get_running_vcpus()); |
761 | if (err) { | |
762 | kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n", | |
29c2d6ff | 763 | host_vtimer_irq, err); |
5d947a14 | 764 | return err; |
53e72406 MZ |
765 | } |
766 | ||
f384dcfe MZ |
767 | if (has_gic) { |
768 | err = irq_set_vcpu_affinity(host_vtimer_irq, | |
769 | kvm_get_running_vcpus()); | |
770 | if (err) { | |
771 | kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); | |
772 | goto out_free_irq; | |
773 | } | |
40f4cba9 CD |
774 | } |
775 | ||
29c2d6ff | 776 | kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); |
53e72406 | 777 | |
b3c9950a | 778 | cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, |
73c1b41e | 779 | "kvm/arm/timer:starting", kvm_timer_starting_cpu, |
b3c9950a | 780 | kvm_timer_dying_cpu); |
40f4cba9 CD |
781 | return 0; |
782 | out_free_irq: | |
783 | free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus()); | |
53e72406 MZ |
784 | return err; |
785 | } | |
786 | ||
787 | void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) | |
788 | { | |
789 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
fbb4aeec | 790 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
53e72406 | 791 | |
14d61fa9 | 792 | soft_timer_cancel(&timer->bg_timer, &timer->expired); |
f2a2129e | 793 | soft_timer_cancel(&timer->phys_timer, NULL); |
fbb4aeec | 794 | kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq); |
53e72406 MZ |
795 | } |
796 | ||
abcb851d | 797 | static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu) |
99a1db7a | 798 | { |
99a1db7a | 799 | int vtimer_irq, ptimer_irq; |
abcb851d | 800 | int i, ret; |
99a1db7a | 801 | |
99a1db7a | 802 | vtimer_irq = vcpu_vtimer(vcpu)->irq.irq; |
abcb851d CD |
803 | ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu)); |
804 | if (ret) | |
805 | return false; | |
99a1db7a | 806 | |
abcb851d CD |
807 | ptimer_irq = vcpu_ptimer(vcpu)->irq.irq; |
808 | ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu)); | |
809 | if (ret) | |
99a1db7a CD |
810 | return false; |
811 | ||
abcb851d | 812 | kvm_for_each_vcpu(i, vcpu, vcpu->kvm) { |
99a1db7a CD |
813 | if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq || |
814 | vcpu_ptimer(vcpu)->irq.irq != ptimer_irq) | |
815 | return false; | |
816 | } | |
817 | ||
818 | return true; | |
819 | } | |
820 | ||
41a54482 | 821 | int kvm_timer_enable(struct kvm_vcpu *vcpu) |
53e72406 | 822 | { |
41a54482 | 823 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; |
fbb4aeec | 824 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
41a54482 CD |
825 | int ret; |
826 | ||
827 | if (timer->enabled) | |
828 | return 0; | |
829 | ||
d9e13977 AG |
830 | /* Without a VGIC we do not map virtual IRQs to physical IRQs */ |
831 | if (!irqchip_in_kernel(vcpu->kvm)) | |
832 | goto no_vgic; | |
833 | ||
834 | if (!vgic_initialized(vcpu->kvm)) | |
835 | return -ENODEV; | |
836 | ||
abcb851d | 837 | if (!timer_irqs_are_valid(vcpu)) { |
99a1db7a CD |
838 | kvm_debug("incorrectly configured timer irqs\n"); |
839 | return -EINVAL; | |
840 | } | |
841 | ||
47bbd31f | 842 | ret = kvm_vgic_map_phys_irq(vcpu, host_vtimer_irq, vtimer->irq.irq); |
41a54482 CD |
843 | if (ret) |
844 | return ret; | |
845 | ||
d9e13977 | 846 | no_vgic: |
4a2c4da1 | 847 | preempt_disable(); |
fd5ebf99 | 848 | timer->enabled = 1; |
0eb7c33c | 849 | kvm_timer_vcpu_load(vcpu); |
4a2c4da1 CD |
850 | preempt_enable(); |
851 | ||
41a54482 | 852 | return 0; |
05971120 | 853 | } |
53e72406 | 854 | |
488f94d7 JL |
855 | /* |
856 | * On VHE system, we only need to configure trap on physical timer and counter | |
857 | * accesses in EL0 and EL1 once, not for every world switch. | |
858 | * The host kernel runs at EL2 with HCR_EL2.TGE == 1, | |
859 | * and this makes those bits have no effect for the host kernel execution. | |
860 | */ | |
861 | void kvm_timer_init_vhe(void) | |
862 | { | |
863 | /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */ | |
864 | u32 cnthctl_shift = 10; | |
865 | u64 val; | |
866 | ||
867 | /* | |
868 | * Disallow physical timer access for the guest. | |
869 | * Physical counter access is allowed. | |
870 | */ | |
871 | val = read_sysreg(cnthctl_el2); | |
872 | val &= ~(CNTHCTL_EL1PCEN << cnthctl_shift); | |
873 | val |= (CNTHCTL_EL1PCTEN << cnthctl_shift); | |
874 | write_sysreg(val, cnthctl_el2); | |
875 | } | |
99a1db7a CD |
876 | |
877 | static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq) | |
878 | { | |
879 | struct kvm_vcpu *vcpu; | |
880 | int i; | |
881 | ||
882 | kvm_for_each_vcpu(i, vcpu, kvm) { | |
883 | vcpu_vtimer(vcpu)->irq.irq = vtimer_irq; | |
884 | vcpu_ptimer(vcpu)->irq.irq = ptimer_irq; | |
885 | } | |
886 | } | |
887 | ||
888 | int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) | |
889 | { | |
890 | int __user *uaddr = (int __user *)(long)attr->addr; | |
891 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | |
892 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | |
893 | int irq; | |
894 | ||
895 | if (!irqchip_in_kernel(vcpu->kvm)) | |
896 | return -EINVAL; | |
897 | ||
898 | if (get_user(irq, uaddr)) | |
899 | return -EFAULT; | |
900 | ||
901 | if (!(irq_is_ppi(irq))) | |
902 | return -EINVAL; | |
903 | ||
904 | if (vcpu->arch.timer_cpu.enabled) | |
905 | return -EBUSY; | |
906 | ||
907 | switch (attr->attr) { | |
908 | case KVM_ARM_VCPU_TIMER_IRQ_VTIMER: | |
909 | set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq); | |
910 | break; | |
911 | case KVM_ARM_VCPU_TIMER_IRQ_PTIMER: | |
912 | set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq); | |
913 | break; | |
914 | default: | |
915 | return -ENXIO; | |
916 | } | |
917 | ||
918 | return 0; | |
919 | } | |
920 | ||
921 | int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) | |
922 | { | |
923 | int __user *uaddr = (int __user *)(long)attr->addr; | |
924 | struct arch_timer_context *timer; | |
925 | int irq; | |
926 | ||
927 | switch (attr->attr) { | |
928 | case KVM_ARM_VCPU_TIMER_IRQ_VTIMER: | |
929 | timer = vcpu_vtimer(vcpu); | |
930 | break; | |
931 | case KVM_ARM_VCPU_TIMER_IRQ_PTIMER: | |
932 | timer = vcpu_ptimer(vcpu); | |
933 | break; | |
934 | default: | |
935 | return -ENXIO; | |
936 | } | |
937 | ||
938 | irq = timer->irq.irq; | |
939 | return put_user(irq, uaddr); | |
940 | } | |
941 | ||
942 | int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) | |
943 | { | |
944 | switch (attr->attr) { | |
945 | case KVM_ARM_VCPU_TIMER_IRQ_VTIMER: | |
946 | case KVM_ARM_VCPU_TIMER_IRQ_PTIMER: | |
947 | return 0; | |
948 | } | |
949 | ||
950 | return -ENXIO; | |
951 | } |