1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012 ARM Ltd.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #include <linux/cpu.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/uaccess.h>
14
15 #include <clocksource/arm_arch_timer.h>
16 #include <asm/arch_timer.h>
17 #include <asm/kvm_emulate.h>
18 #include <asm/kvm_hyp.h>
19
20 #include <kvm/arm_vgic.h>
21 #include <kvm/arm_arch_timer.h>
22
23 #include "trace.h"
24
25 static struct timecounter *timecounter;
26 static unsigned int host_vtimer_irq;
27 static unsigned int host_ptimer_irq;
28 static u32 host_vtimer_irq_flags;
29 static u32 host_ptimer_irq_flags;
30
31 static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
32
33 static const struct kvm_irq_level default_ptimer_irq = {
34 .irq = 30,
35 .level = 1,
36 };
37
38 static const struct kvm_irq_level default_vtimer_irq = {
39 .irq = 27,
40 .level = 1,
41 };
42
43 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
44 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
45 struct arch_timer_context *timer_ctx);
46 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
47 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
48 struct arch_timer_context *timer,
49 enum kvm_arch_timer_regs treg,
50 u64 val);
51 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
52 struct arch_timer_context *timer,
53 enum kvm_arch_timer_regs treg);
54
timer_get_ctl(struct arch_timer_context * ctxt)55 u32 timer_get_ctl(struct arch_timer_context *ctxt)
56 {
57 struct kvm_vcpu *vcpu = ctxt->vcpu;
58
59 switch(arch_timer_ctx_index(ctxt)) {
60 case TIMER_VTIMER:
61 return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
62 case TIMER_PTIMER:
63 return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0);
64 default:
65 WARN_ON(1);
66 return 0;
67 }
68 }
69
timer_get_cval(struct arch_timer_context * ctxt)70 u64 timer_get_cval(struct arch_timer_context *ctxt)
71 {
72 struct kvm_vcpu *vcpu = ctxt->vcpu;
73
74 switch(arch_timer_ctx_index(ctxt)) {
75 case TIMER_VTIMER:
76 return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
77 case TIMER_PTIMER:
78 return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
79 default:
80 WARN_ON(1);
81 return 0;
82 }
83 }
84
timer_get_offset(struct arch_timer_context * ctxt)85 static u64 timer_get_offset(struct arch_timer_context *ctxt)
86 {
87 struct kvm_vcpu *vcpu = ctxt->vcpu;
88
89 switch(arch_timer_ctx_index(ctxt)) {
90 case TIMER_VTIMER:
91 return __vcpu_sys_reg(vcpu, CNTVOFF_EL2);
92 default:
93 return 0;
94 }
95 }
96
timer_set_ctl(struct arch_timer_context * ctxt,u32 ctl)97 static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
98 {
99 struct kvm_vcpu *vcpu = ctxt->vcpu;
100
101 switch(arch_timer_ctx_index(ctxt)) {
102 case TIMER_VTIMER:
103 __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
104 break;
105 case TIMER_PTIMER:
106 __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
107 break;
108 default:
109 WARN_ON(1);
110 }
111 }
112
timer_set_cval(struct arch_timer_context * ctxt,u64 cval)113 static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
114 {
115 struct kvm_vcpu *vcpu = ctxt->vcpu;
116
117 switch(arch_timer_ctx_index(ctxt)) {
118 case TIMER_VTIMER:
119 __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
120 break;
121 case TIMER_PTIMER:
122 __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
123 break;
124 default:
125 WARN_ON(1);
126 }
127 }
128
timer_set_offset(struct arch_timer_context * ctxt,u64 offset)129 static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
130 {
131 struct kvm_vcpu *vcpu = ctxt->vcpu;
132
133 switch(arch_timer_ctx_index(ctxt)) {
134 case TIMER_VTIMER:
135 __vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset;
136 break;
137 default:
138 WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
139 }
140 }
141
kvm_phys_timer_read(void)142 u64 kvm_phys_timer_read(void)
143 {
144 return timecounter->cc->read(timecounter->cc);
145 }
146
get_timer_map(struct kvm_vcpu * vcpu,struct timer_map * map)147 static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
148 {
149 if (has_vhe()) {
150 map->direct_vtimer = vcpu_vtimer(vcpu);
151 map->direct_ptimer = vcpu_ptimer(vcpu);
152 map->emul_ptimer = NULL;
153 } else {
154 map->direct_vtimer = vcpu_vtimer(vcpu);
155 map->direct_ptimer = NULL;
156 map->emul_ptimer = vcpu_ptimer(vcpu);
157 }
158
159 trace_kvm_get_timer_map(vcpu->vcpu_id, map);
160 }
161
userspace_irqchip(struct kvm * kvm)162 static inline bool userspace_irqchip(struct kvm *kvm)
163 {
164 return static_branch_unlikely(&userspace_irqchip_in_use) &&
165 unlikely(!irqchip_in_kernel(kvm));
166 }
167
soft_timer_start(struct hrtimer * hrt,u64 ns)168 static void soft_timer_start(struct hrtimer *hrt, u64 ns)
169 {
170 hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
171 HRTIMER_MODE_ABS_HARD);
172 }
173
soft_timer_cancel(struct hrtimer * hrt)174 static void soft_timer_cancel(struct hrtimer *hrt)
175 {
176 hrtimer_cancel(hrt);
177 }
178
kvm_arch_timer_handler(int irq,void * dev_id)179 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
180 {
181 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
182 struct arch_timer_context *ctx;
183 struct timer_map map;
184
185 /*
186 * We may see a timer interrupt after vcpu_put() has been called which
187 * sets the CPU's vcpu pointer to NULL, because even though the timer
188 * has been disabled in timer_save_state(), the hardware interrupt
189 * signal may not have been retired from the interrupt controller yet.
190 */
191 if (!vcpu)
192 return IRQ_HANDLED;
193
194 get_timer_map(vcpu, &map);
195
196 if (irq == host_vtimer_irq)
197 ctx = map.direct_vtimer;
198 else
199 ctx = map.direct_ptimer;
200
201 if (kvm_timer_should_fire(ctx))
202 kvm_timer_update_irq(vcpu, true, ctx);
203
204 if (userspace_irqchip(vcpu->kvm) &&
205 !static_branch_unlikely(&has_gic_active_state))
206 disable_percpu_irq(host_vtimer_irq);
207
208 return IRQ_HANDLED;
209 }
210
kvm_counter_compute_delta(struct arch_timer_context * timer_ctx,u64 val)211 static u64 kvm_counter_compute_delta(struct arch_timer_context *timer_ctx,
212 u64 val)
213 {
214 u64 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
215
216 if (now < val) {
217 u64 ns;
218
219 ns = cyclecounter_cyc2ns(timecounter->cc,
220 val - now,
221 timecounter->mask,
222 &timecounter->frac);
223 return ns;
224 }
225
226 return 0;
227 }
228
kvm_timer_compute_delta(struct arch_timer_context * timer_ctx)229 static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
230 {
231 return kvm_counter_compute_delta(timer_ctx, timer_get_cval(timer_ctx));
232 }
233
kvm_timer_irq_can_fire(struct arch_timer_context * timer_ctx)234 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
235 {
236 WARN_ON(timer_ctx && timer_ctx->loaded);
237 return timer_ctx &&
238 ((timer_get_ctl(timer_ctx) &
239 (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
240 }
241
vcpu_has_wfit_active(struct kvm_vcpu * vcpu)242 static bool vcpu_has_wfit_active(struct kvm_vcpu *vcpu)
243 {
244 return (cpus_have_final_cap(ARM64_HAS_WFXT) &&
245 vcpu_get_flag(vcpu, IN_WFIT));
246 }
247
wfit_delay_ns(struct kvm_vcpu * vcpu)248 static u64 wfit_delay_ns(struct kvm_vcpu *vcpu)
249 {
250 struct arch_timer_context *ctx = vcpu_vtimer(vcpu);
251 u64 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
252
253 return kvm_counter_compute_delta(ctx, val);
254 }
255
256 /*
257 * Returns the earliest expiration time in ns among guest timers.
258 * Note that it will return 0 if none of timers can fire.
259 */
kvm_timer_earliest_exp(struct kvm_vcpu * vcpu)260 static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
261 {
262 u64 min_delta = ULLONG_MAX;
263 int i;
264
265 for (i = 0; i < NR_KVM_TIMERS; i++) {
266 struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
267
268 WARN(ctx->loaded, "timer %d loaded\n", i);
269 if (kvm_timer_irq_can_fire(ctx))
270 min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
271 }
272
273 if (vcpu_has_wfit_active(vcpu))
274 min_delta = min(min_delta, wfit_delay_ns(vcpu));
275
276 /* If none of timers can fire, then return 0 */
277 if (min_delta == ULLONG_MAX)
278 return 0;
279
280 return min_delta;
281 }
282
kvm_bg_timer_expire(struct hrtimer * hrt)283 static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
284 {
285 struct arch_timer_cpu *timer;
286 struct kvm_vcpu *vcpu;
287 u64 ns;
288
289 timer = container_of(hrt, struct arch_timer_cpu, bg_timer);
290 vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
291
292 /*
293 * Check that the timer has really expired from the guest's
294 * PoV (NTP on the host may have forced it to expire
295 * early). If we should have slept longer, restart it.
296 */
297 ns = kvm_timer_earliest_exp(vcpu);
298 if (unlikely(ns)) {
299 hrtimer_forward_now(hrt, ns_to_ktime(ns));
300 return HRTIMER_RESTART;
301 }
302
303 kvm_vcpu_wake_up(vcpu);
304 return HRTIMER_NORESTART;
305 }
306
kvm_hrtimer_expire(struct hrtimer * hrt)307 static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
308 {
309 struct arch_timer_context *ctx;
310 struct kvm_vcpu *vcpu;
311 u64 ns;
312
313 ctx = container_of(hrt, struct arch_timer_context, hrtimer);
314 vcpu = ctx->vcpu;
315
316 trace_kvm_timer_hrtimer_expire(ctx);
317
318 /*
319 * Check that the timer has really expired from the guest's
320 * PoV (NTP on the host may have forced it to expire
321 * early). If not ready, schedule for a later time.
322 */
323 ns = kvm_timer_compute_delta(ctx);
324 if (unlikely(ns)) {
325 hrtimer_forward_now(hrt, ns_to_ktime(ns));
326 return HRTIMER_RESTART;
327 }
328
329 kvm_timer_update_irq(vcpu, true, ctx);
330 return HRTIMER_NORESTART;
331 }
332
kvm_timer_should_fire(struct arch_timer_context * timer_ctx)333 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
334 {
335 enum kvm_arch_timers index;
336 u64 cval, now;
337
338 if (!timer_ctx)
339 return false;
340
341 index = arch_timer_ctx_index(timer_ctx);
342
343 if (timer_ctx->loaded) {
344 u32 cnt_ctl = 0;
345
346 switch (index) {
347 case TIMER_VTIMER:
348 cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
349 break;
350 case TIMER_PTIMER:
351 cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
352 break;
353 case NR_KVM_TIMERS:
354 /* GCC is braindead */
355 cnt_ctl = 0;
356 break;
357 }
358
359 return (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
360 (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
361 !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
362 }
363
364 if (!kvm_timer_irq_can_fire(timer_ctx))
365 return false;
366
367 cval = timer_get_cval(timer_ctx);
368 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
369
370 return cval <= now;
371 }
372
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)373 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
374 {
375 return vcpu_has_wfit_active(vcpu) && wfit_delay_ns(vcpu) == 0;
376 }
377
378 /*
379 * Reflect the timer output level into the kvm_run structure
380 */
kvm_timer_update_run(struct kvm_vcpu * vcpu)381 void kvm_timer_update_run(struct kvm_vcpu *vcpu)
382 {
383 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
384 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
385 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
386
387 /* Populate the device bitmap with the timer states */
388 regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
389 KVM_ARM_DEV_EL1_PTIMER);
390 if (kvm_timer_should_fire(vtimer))
391 regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
392 if (kvm_timer_should_fire(ptimer))
393 regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
394 }
395
kvm_timer_update_irq(struct kvm_vcpu * vcpu,bool new_level,struct arch_timer_context * timer_ctx)396 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
397 struct arch_timer_context *timer_ctx)
398 {
399 int ret;
400
401 timer_ctx->irq.level = new_level;
402 trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
403 timer_ctx->irq.level);
404
405 if (!userspace_irqchip(vcpu->kvm)) {
406 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
407 timer_ctx->irq.irq,
408 timer_ctx->irq.level,
409 timer_ctx);
410 WARN_ON(ret);
411 }
412 }
413
414 /* Only called for a fully emulated timer */
timer_emulate(struct arch_timer_context * ctx)415 static void timer_emulate(struct arch_timer_context *ctx)
416 {
417 bool should_fire = kvm_timer_should_fire(ctx);
418
419 trace_kvm_timer_emulate(ctx, should_fire);
420
421 if (should_fire != ctx->irq.level) {
422 kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
423 return;
424 }
425
426 /*
427 * If the timer can fire now, we don't need to have a soft timer
428 * scheduled for the future. If the timer cannot fire at all,
429 * then we also don't need a soft timer.
430 */
431 if (!kvm_timer_irq_can_fire(ctx)) {
432 soft_timer_cancel(&ctx->hrtimer);
433 return;
434 }
435
436 soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
437 }
438
timer_save_state(struct arch_timer_context * ctx)439 static void timer_save_state(struct arch_timer_context *ctx)
440 {
441 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
442 enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
443 unsigned long flags;
444
445 if (!timer->enabled)
446 return;
447
448 local_irq_save(flags);
449
450 if (!ctx->loaded)
451 goto out;
452
453 switch (index) {
454 case TIMER_VTIMER:
455 timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL));
456 timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL));
457
458 /* Disable the timer */
459 write_sysreg_el0(0, SYS_CNTV_CTL);
460 isb();
461
462 break;
463 case TIMER_PTIMER:
464 timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
465 timer_set_cval(ctx, read_sysreg_el0(SYS_CNTP_CVAL));
466
467 /* Disable the timer */
468 write_sysreg_el0(0, SYS_CNTP_CTL);
469 isb();
470
471 break;
472 case NR_KVM_TIMERS:
473 BUG();
474 }
475
476 trace_kvm_timer_save_state(ctx);
477
478 ctx->loaded = false;
479 out:
480 local_irq_restore(flags);
481 }
482
483 /*
484 * Schedule the background timer before calling kvm_vcpu_halt, so that this
485 * thread is removed from its waitqueue and made runnable when there's a timer
486 * interrupt to handle.
487 */
kvm_timer_blocking(struct kvm_vcpu * vcpu)488 static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
489 {
490 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
491 struct timer_map map;
492
493 get_timer_map(vcpu, &map);
494
495 /*
496 * If no timers are capable of raising interrupts (disabled or
497 * masked), then there's no more work for us to do.
498 */
499 if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
500 !kvm_timer_irq_can_fire(map.direct_ptimer) &&
501 !kvm_timer_irq_can_fire(map.emul_ptimer) &&
502 !vcpu_has_wfit_active(vcpu))
503 return;
504
505 /*
506 * At least one guest time will expire. Schedule a background timer.
507 * Set the earliest expiration time among the guest timers.
508 */
509 soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
510 }
511
kvm_timer_unblocking(struct kvm_vcpu * vcpu)512 static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
513 {
514 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
515
516 soft_timer_cancel(&timer->bg_timer);
517 }
518
timer_restore_state(struct arch_timer_context * ctx)519 static void timer_restore_state(struct arch_timer_context *ctx)
520 {
521 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
522 enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
523 unsigned long flags;
524
525 if (!timer->enabled)
526 return;
527
528 local_irq_save(flags);
529
530 if (ctx->loaded)
531 goto out;
532
533 switch (index) {
534 case TIMER_VTIMER:
535 write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL);
536 isb();
537 write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
538 break;
539 case TIMER_PTIMER:
540 write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL);
541 isb();
542 write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
543 break;
544 case NR_KVM_TIMERS:
545 BUG();
546 }
547
548 trace_kvm_timer_restore_state(ctx);
549
550 ctx->loaded = true;
551 out:
552 local_irq_restore(flags);
553 }
554
set_cntvoff(u64 cntvoff)555 static void set_cntvoff(u64 cntvoff)
556 {
557 kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff);
558 }
559
set_timer_irq_phys_active(struct arch_timer_context * ctx,bool active)560 static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
561 {
562 int r;
563 r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active);
564 WARN_ON(r);
565 }
566
kvm_timer_vcpu_load_gic(struct arch_timer_context * ctx)567 static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
568 {
569 struct kvm_vcpu *vcpu = ctx->vcpu;
570 bool phys_active = false;
571
572 /*
573 * Update the timer output so that it is likely to match the
574 * state we're about to restore. If the timer expires between
575 * this point and the register restoration, we'll take the
576 * interrupt anyway.
577 */
578 kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
579
580 if (irqchip_in_kernel(vcpu->kvm))
581 phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq);
582
583 phys_active |= ctx->irq.level;
584
585 set_timer_irq_phys_active(ctx, phys_active);
586 }
587
kvm_timer_vcpu_load_nogic(struct kvm_vcpu * vcpu)588 static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
589 {
590 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
591
592 /*
593 * Update the timer output so that it is likely to match the
594 * state we're about to restore. If the timer expires between
595 * this point and the register restoration, we'll take the
596 * interrupt anyway.
597 */
598 kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
599
600 /*
601 * When using a userspace irqchip with the architected timers and a
602 * host interrupt controller that doesn't support an active state, we
603 * must still prevent continuously exiting from the guest, and
604 * therefore mask the physical interrupt by disabling it on the host
605 * interrupt controller when the virtual level is high, such that the
606 * guest can make forward progress. Once we detect the output level
607 * being de-asserted, we unmask the interrupt again so that we exit
608 * from the guest when the timer fires.
609 */
610 if (vtimer->irq.level)
611 disable_percpu_irq(host_vtimer_irq);
612 else
613 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
614 }
615
kvm_timer_vcpu_load(struct kvm_vcpu * vcpu)616 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
617 {
618 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
619 struct timer_map map;
620
621 if (unlikely(!timer->enabled))
622 return;
623
624 get_timer_map(vcpu, &map);
625
626 if (static_branch_likely(&has_gic_active_state)) {
627 kvm_timer_vcpu_load_gic(map.direct_vtimer);
628 if (map.direct_ptimer)
629 kvm_timer_vcpu_load_gic(map.direct_ptimer);
630 } else {
631 kvm_timer_vcpu_load_nogic(vcpu);
632 }
633
634 set_cntvoff(timer_get_offset(map.direct_vtimer));
635
636 kvm_timer_unblocking(vcpu);
637
638 timer_restore_state(map.direct_vtimer);
639 if (map.direct_ptimer)
640 timer_restore_state(map.direct_ptimer);
641
642 if (map.emul_ptimer)
643 timer_emulate(map.emul_ptimer);
644 }
645
kvm_timer_should_notify_user(struct kvm_vcpu * vcpu)646 bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
647 {
648 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
649 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
650 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
651 bool vlevel, plevel;
652
653 if (likely(irqchip_in_kernel(vcpu->kvm)))
654 return false;
655
656 vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
657 plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
658
659 return kvm_timer_should_fire(vtimer) != vlevel ||
660 kvm_timer_should_fire(ptimer) != plevel;
661 }
662
kvm_timer_vcpu_put(struct kvm_vcpu * vcpu)663 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
664 {
665 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
666 struct timer_map map;
667
668 if (unlikely(!timer->enabled))
669 return;
670
671 get_timer_map(vcpu, &map);
672
673 timer_save_state(map.direct_vtimer);
674 if (map.direct_ptimer)
675 timer_save_state(map.direct_ptimer);
676
677 /*
678 * Cancel soft timer emulation, because the only case where we
679 * need it after a vcpu_put is in the context of a sleeping VCPU, and
680 * in that case we already factor in the deadline for the physical
681 * timer when scheduling the bg_timer.
682 *
683 * In any case, we re-schedule the hrtimer for the physical timer when
684 * coming back to the VCPU thread in kvm_timer_vcpu_load().
685 */
686 if (map.emul_ptimer)
687 soft_timer_cancel(&map.emul_ptimer->hrtimer);
688
689 if (kvm_vcpu_is_blocking(vcpu))
690 kvm_timer_blocking(vcpu);
691
692 /*
693 * The kernel may decide to run userspace after calling vcpu_put, so
694 * we reset cntvoff to 0 to ensure a consistent read between user
695 * accesses to the virtual counter and kernel access to the physical
696 * counter of non-VHE case. For VHE, the virtual counter uses a fixed
697 * virtual offset of zero, so no need to zero CNTVOFF_EL2 register.
698 */
699 set_cntvoff(0);
700 }
701
702 /*
703 * With a userspace irqchip we have to check if the guest de-asserted the
704 * timer and if so, unmask the timer irq signal on the host interrupt
705 * controller to ensure that we see future timer signals.
706 */
unmask_vtimer_irq_user(struct kvm_vcpu * vcpu)707 static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
708 {
709 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
710
711 if (!kvm_timer_should_fire(vtimer)) {
712 kvm_timer_update_irq(vcpu, false, vtimer);
713 if (static_branch_likely(&has_gic_active_state))
714 set_timer_irq_phys_active(vtimer, false);
715 else
716 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
717 }
718 }
719
kvm_timer_sync_user(struct kvm_vcpu * vcpu)720 void kvm_timer_sync_user(struct kvm_vcpu *vcpu)
721 {
722 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
723
724 if (unlikely(!timer->enabled))
725 return;
726
727 if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
728 unmask_vtimer_irq_user(vcpu);
729 }
730
kvm_timer_vcpu_reset(struct kvm_vcpu * vcpu)731 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
732 {
733 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
734 struct timer_map map;
735
736 get_timer_map(vcpu, &map);
737
738 /*
739 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
740 * and to 0 for ARMv7. We provide an implementation that always
741 * resets the timer to be disabled and unmasked and is compliant with
742 * the ARMv7 architecture.
743 */
744 timer_set_ctl(vcpu_vtimer(vcpu), 0);
745 timer_set_ctl(vcpu_ptimer(vcpu), 0);
746
747 if (timer->enabled) {
748 kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
749 kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu));
750
751 if (irqchip_in_kernel(vcpu->kvm)) {
752 kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq);
753 if (map.direct_ptimer)
754 kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq);
755 }
756 }
757
758 if (map.emul_ptimer)
759 soft_timer_cancel(&map.emul_ptimer->hrtimer);
760
761 return 0;
762 }
763
764 /* Make the updates of cntvoff for all vtimer contexts atomic */
update_vtimer_cntvoff(struct kvm_vcpu * vcpu,u64 cntvoff)765 static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
766 {
767 unsigned long i;
768 struct kvm *kvm = vcpu->kvm;
769 struct kvm_vcpu *tmp;
770
771 mutex_lock(&kvm->lock);
772 kvm_for_each_vcpu(i, tmp, kvm)
773 timer_set_offset(vcpu_vtimer(tmp), cntvoff);
774
775 /*
776 * When called from the vcpu create path, the CPU being created is not
777 * included in the loop above, so we just set it here as well.
778 */
779 timer_set_offset(vcpu_vtimer(vcpu), cntvoff);
780 mutex_unlock(&kvm->lock);
781 }
782
kvm_timer_vcpu_init(struct kvm_vcpu * vcpu)783 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
784 {
785 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
786 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
787 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
788
789 vtimer->vcpu = vcpu;
790 ptimer->vcpu = vcpu;
791
792 /* Synchronize cntvoff across all vtimers of a VM. */
793 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
794 timer_set_offset(ptimer, 0);
795
796 hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
797 timer->bg_timer.function = kvm_bg_timer_expire;
798
799 hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
800 hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
801 vtimer->hrtimer.function = kvm_hrtimer_expire;
802 ptimer->hrtimer.function = kvm_hrtimer_expire;
803
804 vtimer->irq.irq = default_vtimer_irq.irq;
805 ptimer->irq.irq = default_ptimer_irq.irq;
806
807 vtimer->host_timer_irq = host_vtimer_irq;
808 ptimer->host_timer_irq = host_ptimer_irq;
809
810 vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
811 ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
812 }
813
kvm_timer_init_interrupt(void * info)814 static void kvm_timer_init_interrupt(void *info)
815 {
816 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
817 enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags);
818 }
819
kvm_arm_timer_set_reg(struct kvm_vcpu * vcpu,u64 regid,u64 value)820 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
821 {
822 struct arch_timer_context *timer;
823
824 switch (regid) {
825 case KVM_REG_ARM_TIMER_CTL:
826 timer = vcpu_vtimer(vcpu);
827 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
828 break;
829 case KVM_REG_ARM_TIMER_CNT:
830 timer = vcpu_vtimer(vcpu);
831 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
832 break;
833 case KVM_REG_ARM_TIMER_CVAL:
834 timer = vcpu_vtimer(vcpu);
835 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
836 break;
837 case KVM_REG_ARM_PTIMER_CTL:
838 timer = vcpu_ptimer(vcpu);
839 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
840 break;
841 case KVM_REG_ARM_PTIMER_CVAL:
842 timer = vcpu_ptimer(vcpu);
843 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
844 break;
845
846 default:
847 return -1;
848 }
849
850 return 0;
851 }
852
read_timer_ctl(struct arch_timer_context * timer)853 static u64 read_timer_ctl(struct arch_timer_context *timer)
854 {
855 /*
856 * Set ISTATUS bit if it's expired.
857 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
858 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
859 * regardless of ENABLE bit for our implementation convenience.
860 */
861 u32 ctl = timer_get_ctl(timer);
862
863 if (!kvm_timer_compute_delta(timer))
864 ctl |= ARCH_TIMER_CTRL_IT_STAT;
865
866 return ctl;
867 }
868
kvm_arm_timer_get_reg(struct kvm_vcpu * vcpu,u64 regid)869 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
870 {
871 switch (regid) {
872 case KVM_REG_ARM_TIMER_CTL:
873 return kvm_arm_timer_read(vcpu,
874 vcpu_vtimer(vcpu), TIMER_REG_CTL);
875 case KVM_REG_ARM_TIMER_CNT:
876 return kvm_arm_timer_read(vcpu,
877 vcpu_vtimer(vcpu), TIMER_REG_CNT);
878 case KVM_REG_ARM_TIMER_CVAL:
879 return kvm_arm_timer_read(vcpu,
880 vcpu_vtimer(vcpu), TIMER_REG_CVAL);
881 case KVM_REG_ARM_PTIMER_CTL:
882 return kvm_arm_timer_read(vcpu,
883 vcpu_ptimer(vcpu), TIMER_REG_CTL);
884 case KVM_REG_ARM_PTIMER_CNT:
885 return kvm_arm_timer_read(vcpu,
886 vcpu_ptimer(vcpu), TIMER_REG_CNT);
887 case KVM_REG_ARM_PTIMER_CVAL:
888 return kvm_arm_timer_read(vcpu,
889 vcpu_ptimer(vcpu), TIMER_REG_CVAL);
890 }
891 return (u64)-1;
892 }
893
kvm_arm_timer_read(struct kvm_vcpu * vcpu,struct arch_timer_context * timer,enum kvm_arch_timer_regs treg)894 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
895 struct arch_timer_context *timer,
896 enum kvm_arch_timer_regs treg)
897 {
898 u64 val;
899
900 switch (treg) {
901 case TIMER_REG_TVAL:
902 val = timer_get_cval(timer) - kvm_phys_timer_read() + timer_get_offset(timer);
903 val = lower_32_bits(val);
904 break;
905
906 case TIMER_REG_CTL:
907 val = read_timer_ctl(timer);
908 break;
909
910 case TIMER_REG_CVAL:
911 val = timer_get_cval(timer);
912 break;
913
914 case TIMER_REG_CNT:
915 val = kvm_phys_timer_read() - timer_get_offset(timer);
916 break;
917
918 default:
919 BUG();
920 }
921
922 return val;
923 }
924
kvm_arm_timer_read_sysreg(struct kvm_vcpu * vcpu,enum kvm_arch_timers tmr,enum kvm_arch_timer_regs treg)925 u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
926 enum kvm_arch_timers tmr,
927 enum kvm_arch_timer_regs treg)
928 {
929 u64 val;
930
931 preempt_disable();
932 kvm_timer_vcpu_put(vcpu);
933
934 val = kvm_arm_timer_read(vcpu, vcpu_get_timer(vcpu, tmr), treg);
935
936 kvm_timer_vcpu_load(vcpu);
937 preempt_enable();
938
939 return val;
940 }
941
kvm_arm_timer_write(struct kvm_vcpu * vcpu,struct arch_timer_context * timer,enum kvm_arch_timer_regs treg,u64 val)942 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
943 struct arch_timer_context *timer,
944 enum kvm_arch_timer_regs treg,
945 u64 val)
946 {
947 switch (treg) {
948 case TIMER_REG_TVAL:
949 timer_set_cval(timer, kvm_phys_timer_read() - timer_get_offset(timer) + (s32)val);
950 break;
951
952 case TIMER_REG_CTL:
953 timer_set_ctl(timer, val & ~ARCH_TIMER_CTRL_IT_STAT);
954 break;
955
956 case TIMER_REG_CVAL:
957 timer_set_cval(timer, val);
958 break;
959
960 default:
961 BUG();
962 }
963 }
964
kvm_arm_timer_write_sysreg(struct kvm_vcpu * vcpu,enum kvm_arch_timers tmr,enum kvm_arch_timer_regs treg,u64 val)965 void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
966 enum kvm_arch_timers tmr,
967 enum kvm_arch_timer_regs treg,
968 u64 val)
969 {
970 preempt_disable();
971 kvm_timer_vcpu_put(vcpu);
972
973 kvm_arm_timer_write(vcpu, vcpu_get_timer(vcpu, tmr), treg, val);
974
975 kvm_timer_vcpu_load(vcpu);
976 preempt_enable();
977 }
978
kvm_timer_starting_cpu(unsigned int cpu)979 static int kvm_timer_starting_cpu(unsigned int cpu)
980 {
981 kvm_timer_init_interrupt(NULL);
982 return 0;
983 }
984
kvm_timer_dying_cpu(unsigned int cpu)985 static int kvm_timer_dying_cpu(unsigned int cpu)
986 {
987 disable_percpu_irq(host_vtimer_irq);
988 return 0;
989 }
990
timer_irq_set_vcpu_affinity(struct irq_data * d,void * vcpu)991 static int timer_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
992 {
993 if (vcpu)
994 irqd_set_forwarded_to_vcpu(d);
995 else
996 irqd_clr_forwarded_to_vcpu(d);
997
998 return 0;
999 }
1000
timer_irq_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool val)1001 static int timer_irq_set_irqchip_state(struct irq_data *d,
1002 enum irqchip_irq_state which, bool val)
1003 {
1004 if (which != IRQCHIP_STATE_ACTIVE || !irqd_is_forwarded_to_vcpu(d))
1005 return irq_chip_set_parent_state(d, which, val);
1006
1007 if (val)
1008 irq_chip_mask_parent(d);
1009 else
1010 irq_chip_unmask_parent(d);
1011
1012 return 0;
1013 }
1014
timer_irq_eoi(struct irq_data * d)1015 static void timer_irq_eoi(struct irq_data *d)
1016 {
1017 if (!irqd_is_forwarded_to_vcpu(d))
1018 irq_chip_eoi_parent(d);
1019 }
1020
timer_irq_ack(struct irq_data * d)1021 static void timer_irq_ack(struct irq_data *d)
1022 {
1023 d = d->parent_data;
1024 if (d->chip->irq_ack)
1025 d->chip->irq_ack(d);
1026 }
1027
1028 static struct irq_chip timer_chip = {
1029 .name = "KVM",
1030 .irq_ack = timer_irq_ack,
1031 .irq_mask = irq_chip_mask_parent,
1032 .irq_unmask = irq_chip_unmask_parent,
1033 .irq_eoi = timer_irq_eoi,
1034 .irq_set_type = irq_chip_set_type_parent,
1035 .irq_set_vcpu_affinity = timer_irq_set_vcpu_affinity,
1036 .irq_set_irqchip_state = timer_irq_set_irqchip_state,
1037 };
1038
timer_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)1039 static int timer_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1040 unsigned int nr_irqs, void *arg)
1041 {
1042 irq_hw_number_t hwirq = (uintptr_t)arg;
1043
1044 return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
1045 &timer_chip, NULL);
1046 }
1047
timer_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)1048 static void timer_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1049 unsigned int nr_irqs)
1050 {
1051 }
1052
1053 static const struct irq_domain_ops timer_domain_ops = {
1054 .alloc = timer_irq_domain_alloc,
1055 .free = timer_irq_domain_free,
1056 };
1057
1058 static struct irq_ops arch_timer_irq_ops = {
1059 .get_input_level = kvm_arch_timer_get_input_level,
1060 };
1061
kvm_irq_fixup_flags(unsigned int virq,u32 * flags)1062 static void kvm_irq_fixup_flags(unsigned int virq, u32 *flags)
1063 {
1064 *flags = irq_get_trigger_type(virq);
1065 if (*flags != IRQF_TRIGGER_HIGH && *flags != IRQF_TRIGGER_LOW) {
1066 kvm_err("Invalid trigger for timer IRQ%d, assuming level low\n",
1067 virq);
1068 *flags = IRQF_TRIGGER_LOW;
1069 }
1070 }
1071
kvm_irq_init(struct arch_timer_kvm_info * info)1072 static int kvm_irq_init(struct arch_timer_kvm_info *info)
1073 {
1074 struct irq_domain *domain = NULL;
1075
1076 if (info->virtual_irq <= 0) {
1077 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
1078 info->virtual_irq);
1079 return -ENODEV;
1080 }
1081
1082 host_vtimer_irq = info->virtual_irq;
1083 kvm_irq_fixup_flags(host_vtimer_irq, &host_vtimer_irq_flags);
1084
1085 if (kvm_vgic_global_state.no_hw_deactivation) {
1086 struct fwnode_handle *fwnode;
1087 struct irq_data *data;
1088
1089 fwnode = irq_domain_alloc_named_fwnode("kvm-timer");
1090 if (!fwnode)
1091 return -ENOMEM;
1092
1093 /* Assume both vtimer and ptimer in the same parent */
1094 data = irq_get_irq_data(host_vtimer_irq);
1095 domain = irq_domain_create_hierarchy(data->domain, 0,
1096 NR_KVM_TIMERS, fwnode,
1097 &timer_domain_ops, NULL);
1098 if (!domain) {
1099 irq_domain_free_fwnode(fwnode);
1100 return -ENOMEM;
1101 }
1102
1103 arch_timer_irq_ops.flags |= VGIC_IRQ_SW_RESAMPLE;
1104 WARN_ON(irq_domain_push_irq(domain, host_vtimer_irq,
1105 (void *)TIMER_VTIMER));
1106 }
1107
1108 if (info->physical_irq > 0) {
1109 host_ptimer_irq = info->physical_irq;
1110 kvm_irq_fixup_flags(host_ptimer_irq, &host_ptimer_irq_flags);
1111
1112 if (domain)
1113 WARN_ON(irq_domain_push_irq(domain, host_ptimer_irq,
1114 (void *)TIMER_PTIMER));
1115 }
1116
1117 return 0;
1118 }
1119
kvm_timer_hyp_init(bool has_gic)1120 int kvm_timer_hyp_init(bool has_gic)
1121 {
1122 struct arch_timer_kvm_info *info;
1123 int err;
1124
1125 info = arch_timer_get_kvm_info();
1126 timecounter = &info->timecounter;
1127
1128 if (!timecounter->cc) {
1129 kvm_err("kvm_arch_timer: uninitialized timecounter\n");
1130 return -ENODEV;
1131 }
1132
1133 err = kvm_irq_init(info);
1134 if (err)
1135 return err;
1136
1137 /* First, do the virtual EL1 timer irq */
1138
1139 err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
1140 "kvm guest vtimer", kvm_get_running_vcpus());
1141 if (err) {
1142 kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
1143 host_vtimer_irq, err);
1144 return err;
1145 }
1146
1147 if (has_gic) {
1148 err = irq_set_vcpu_affinity(host_vtimer_irq,
1149 kvm_get_running_vcpus());
1150 if (err) {
1151 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1152 goto out_free_irq;
1153 }
1154
1155 static_branch_enable(&has_gic_active_state);
1156 }
1157
1158 kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
1159
1160 /* Now let's do the physical EL1 timer irq */
1161
1162 if (info->physical_irq > 0) {
1163 err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
1164 "kvm guest ptimer", kvm_get_running_vcpus());
1165 if (err) {
1166 kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
1167 host_ptimer_irq, err);
1168 return err;
1169 }
1170
1171 if (has_gic) {
1172 err = irq_set_vcpu_affinity(host_ptimer_irq,
1173 kvm_get_running_vcpus());
1174 if (err) {
1175 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1176 goto out_free_irq;
1177 }
1178 }
1179
1180 kvm_debug("physical timer IRQ%d\n", host_ptimer_irq);
1181 } else if (has_vhe()) {
1182 kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
1183 info->physical_irq);
1184 err = -ENODEV;
1185 goto out_free_irq;
1186 }
1187
1188 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
1189 "kvm/arm/timer:starting", kvm_timer_starting_cpu,
1190 kvm_timer_dying_cpu);
1191 return 0;
1192 out_free_irq:
1193 free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
1194 return err;
1195 }
1196
kvm_timer_vcpu_terminate(struct kvm_vcpu * vcpu)1197 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
1198 {
1199 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1200
1201 soft_timer_cancel(&timer->bg_timer);
1202 }
1203
timer_irqs_are_valid(struct kvm_vcpu * vcpu)1204 static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
1205 {
1206 int vtimer_irq, ptimer_irq, ret;
1207 unsigned long i;
1208
1209 vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
1210 ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
1211 if (ret)
1212 return false;
1213
1214 ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
1215 ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
1216 if (ret)
1217 return false;
1218
1219 kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
1220 if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
1221 vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
1222 return false;
1223 }
1224
1225 return true;
1226 }
1227
kvm_arch_timer_get_input_level(int vintid)1228 bool kvm_arch_timer_get_input_level(int vintid)
1229 {
1230 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
1231 struct arch_timer_context *timer;
1232
1233 if (WARN(!vcpu, "No vcpu context!\n"))
1234 return false;
1235
1236 if (vintid == vcpu_vtimer(vcpu)->irq.irq)
1237 timer = vcpu_vtimer(vcpu);
1238 else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
1239 timer = vcpu_ptimer(vcpu);
1240 else
1241 BUG();
1242
1243 return kvm_timer_should_fire(timer);
1244 }
1245
kvm_timer_enable(struct kvm_vcpu * vcpu)1246 int kvm_timer_enable(struct kvm_vcpu *vcpu)
1247 {
1248 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1249 struct timer_map map;
1250 int ret;
1251
1252 if (timer->enabled)
1253 return 0;
1254
1255 /* Without a VGIC we do not map virtual IRQs to physical IRQs */
1256 if (!irqchip_in_kernel(vcpu->kvm))
1257 goto no_vgic;
1258
1259 /*
1260 * At this stage, we have the guarantee that the vgic is both
1261 * available and initialized.
1262 */
1263 if (!timer_irqs_are_valid(vcpu)) {
1264 kvm_debug("incorrectly configured timer irqs\n");
1265 return -EINVAL;
1266 }
1267
1268 get_timer_map(vcpu, &map);
1269
1270 ret = kvm_vgic_map_phys_irq(vcpu,
1271 map.direct_vtimer->host_timer_irq,
1272 map.direct_vtimer->irq.irq,
1273 &arch_timer_irq_ops);
1274 if (ret)
1275 return ret;
1276
1277 if (map.direct_ptimer) {
1278 ret = kvm_vgic_map_phys_irq(vcpu,
1279 map.direct_ptimer->host_timer_irq,
1280 map.direct_ptimer->irq.irq,
1281 &arch_timer_irq_ops);
1282 }
1283
1284 if (ret)
1285 return ret;
1286
1287 no_vgic:
1288 timer->enabled = 1;
1289 return 0;
1290 }
1291
1292 /*
1293 * On VHE system, we only need to configure the EL2 timer trap register once,
1294 * not for every world switch.
1295 * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
1296 * and this makes those bits have no effect for the host kernel execution.
1297 */
kvm_timer_init_vhe(void)1298 void kvm_timer_init_vhe(void)
1299 {
1300 /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
1301 u32 cnthctl_shift = 10;
1302 u64 val;
1303
1304 /*
1305 * VHE systems allow the guest direct access to the EL1 physical
1306 * timer/counter.
1307 */
1308 val = read_sysreg(cnthctl_el2);
1309 val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
1310 val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
1311 write_sysreg(val, cnthctl_el2);
1312 }
1313
set_timer_irqs(struct kvm * kvm,int vtimer_irq,int ptimer_irq)1314 static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
1315 {
1316 struct kvm_vcpu *vcpu;
1317 unsigned long i;
1318
1319 kvm_for_each_vcpu(i, vcpu, kvm) {
1320 vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
1321 vcpu_ptimer(vcpu)->irq.irq = ptimer_irq;
1322 }
1323 }
1324
kvm_arm_timer_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1325 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1326 {
1327 int __user *uaddr = (int __user *)(long)attr->addr;
1328 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
1329 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
1330 int irq;
1331
1332 if (!irqchip_in_kernel(vcpu->kvm))
1333 return -EINVAL;
1334
1335 if (get_user(irq, uaddr))
1336 return -EFAULT;
1337
1338 if (!(irq_is_ppi(irq)))
1339 return -EINVAL;
1340
1341 if (vcpu->arch.timer_cpu.enabled)
1342 return -EBUSY;
1343
1344 switch (attr->attr) {
1345 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1346 set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
1347 break;
1348 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1349 set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
1350 break;
1351 default:
1352 return -ENXIO;
1353 }
1354
1355 return 0;
1356 }
1357
kvm_arm_timer_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1358 int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1359 {
1360 int __user *uaddr = (int __user *)(long)attr->addr;
1361 struct arch_timer_context *timer;
1362 int irq;
1363
1364 switch (attr->attr) {
1365 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1366 timer = vcpu_vtimer(vcpu);
1367 break;
1368 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1369 timer = vcpu_ptimer(vcpu);
1370 break;
1371 default:
1372 return -ENXIO;
1373 }
1374
1375 irq = timer->irq.irq;
1376 return put_user(irq, uaddr);
1377 }
1378
kvm_arm_timer_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1379 int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1380 {
1381 switch (attr->attr) {
1382 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1383 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1384 return 0;
1385 }
1386
1387 return -ENXIO;
1388 }
1389