1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <linux/uaccess.h>
13 #include <clocksource/timer-riscv.h>
14 #include <asm/csr.h>
15 #include <asm/delay.h>
16 #include <asm/kvm_vcpu_timer.h>
17 
kvm_riscv_current_cycles(struct kvm_guest_timer * gt)18 static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt)
19 {
20 	return get_cycles64() + gt->time_delta;
21 }
22 
kvm_riscv_delta_cycles2ns(u64 cycles,struct kvm_guest_timer * gt,struct kvm_vcpu_timer * t)23 static u64 kvm_riscv_delta_cycles2ns(u64 cycles,
24 				     struct kvm_guest_timer *gt,
25 				     struct kvm_vcpu_timer *t)
26 {
27 	unsigned long flags;
28 	u64 cycles_now, cycles_delta, delta_ns;
29 
30 	local_irq_save(flags);
31 	cycles_now = kvm_riscv_current_cycles(gt);
32 	if (cycles_now < cycles)
33 		cycles_delta = cycles - cycles_now;
34 	else
35 		cycles_delta = 0;
36 	delta_ns = (cycles_delta * gt->nsec_mult) >> gt->nsec_shift;
37 	local_irq_restore(flags);
38 
39 	return delta_ns;
40 }
41 
kvm_riscv_vcpu_hrtimer_expired(struct hrtimer * h)42 static enum hrtimer_restart kvm_riscv_vcpu_hrtimer_expired(struct hrtimer *h)
43 {
44 	u64 delta_ns;
45 	struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
46 	struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
47 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
48 
49 	if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
50 		delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
51 		hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
52 		return HRTIMER_RESTART;
53 	}
54 
55 	t->next_set = false;
56 	kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_TIMER);
57 
58 	return HRTIMER_NORESTART;
59 }
60 
kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer * t)61 static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t)
62 {
63 	if (!t->init_done || !t->next_set)
64 		return -EINVAL;
65 
66 	hrtimer_cancel(&t->hrt);
67 	t->next_set = false;
68 
69 	return 0;
70 }
71 
kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu * vcpu,u64 ncycles)72 int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
73 {
74 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
75 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
76 	u64 delta_ns;
77 
78 	if (!t->init_done)
79 		return -EINVAL;
80 
81 	kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_TIMER);
82 
83 	delta_ns = kvm_riscv_delta_cycles2ns(ncycles, gt, t);
84 	t->next_cycles = ncycles;
85 	hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
86 	t->next_set = true;
87 
88 	return 0;
89 }
90 
kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)91 int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
92 				 const struct kvm_one_reg *reg)
93 {
94 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
95 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
96 	u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
97 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
98 					    KVM_REG_SIZE_MASK |
99 					    KVM_REG_RISCV_TIMER);
100 	u64 reg_val;
101 
102 	if (KVM_REG_SIZE(reg->id) != sizeof(u64))
103 		return -EINVAL;
104 	if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
105 		return -EINVAL;
106 
107 	switch (reg_num) {
108 	case KVM_REG_RISCV_TIMER_REG(frequency):
109 		reg_val = riscv_timebase;
110 		break;
111 	case KVM_REG_RISCV_TIMER_REG(time):
112 		reg_val = kvm_riscv_current_cycles(gt);
113 		break;
114 	case KVM_REG_RISCV_TIMER_REG(compare):
115 		reg_val = t->next_cycles;
116 		break;
117 	case KVM_REG_RISCV_TIMER_REG(state):
118 		reg_val = (t->next_set) ? KVM_RISCV_TIMER_STATE_ON :
119 					  KVM_RISCV_TIMER_STATE_OFF;
120 		break;
121 	default:
122 		return -EINVAL;
123 	}
124 
125 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
126 		return -EFAULT;
127 
128 	return 0;
129 }
130 
kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)131 int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu,
132 				 const struct kvm_one_reg *reg)
133 {
134 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
135 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
136 	u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
137 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
138 					    KVM_REG_SIZE_MASK |
139 					    KVM_REG_RISCV_TIMER);
140 	u64 reg_val;
141 	int ret = 0;
142 
143 	if (KVM_REG_SIZE(reg->id) != sizeof(u64))
144 		return -EINVAL;
145 	if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
146 		return -EINVAL;
147 
148 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
149 		return -EFAULT;
150 
151 	switch (reg_num) {
152 	case KVM_REG_RISCV_TIMER_REG(frequency):
153 		ret = -EOPNOTSUPP;
154 		break;
155 	case KVM_REG_RISCV_TIMER_REG(time):
156 		gt->time_delta = reg_val - get_cycles64();
157 		break;
158 	case KVM_REG_RISCV_TIMER_REG(compare):
159 		t->next_cycles = reg_val;
160 		break;
161 	case KVM_REG_RISCV_TIMER_REG(state):
162 		if (reg_val == KVM_RISCV_TIMER_STATE_ON)
163 			ret = kvm_riscv_vcpu_timer_next_event(vcpu, reg_val);
164 		else
165 			ret = kvm_riscv_vcpu_timer_cancel(t);
166 		break;
167 	default:
168 		ret = -EINVAL;
169 		break;
170 	}
171 
172 	return ret;
173 }
174 
kvm_riscv_vcpu_timer_init(struct kvm_vcpu * vcpu)175 int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu)
176 {
177 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
178 
179 	if (t->init_done)
180 		return -EINVAL;
181 
182 	hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
183 	t->hrt.function = kvm_riscv_vcpu_hrtimer_expired;
184 	t->init_done = true;
185 	t->next_set = false;
186 
187 	return 0;
188 }
189 
kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu * vcpu)190 int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu)
191 {
192 	int ret;
193 
194 	ret = kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
195 	vcpu->arch.timer.init_done = false;
196 
197 	return ret;
198 }
199 
kvm_riscv_vcpu_timer_reset(struct kvm_vcpu * vcpu)200 int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu)
201 {
202 	return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
203 }
204 
kvm_riscv_vcpu_timer_restore(struct kvm_vcpu * vcpu)205 void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
206 {
207 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
208 
209 #ifdef CONFIG_64BIT
210 	csr_write(CSR_HTIMEDELTA, gt->time_delta);
211 #else
212 	csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta));
213 	csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32));
214 #endif
215 }
216 
kvm_riscv_guest_timer_init(struct kvm * kvm)217 int kvm_riscv_guest_timer_init(struct kvm *kvm)
218 {
219 	struct kvm_guest_timer *gt = &kvm->arch.timer;
220 
221 	riscv_cs_get_mult_shift(&gt->nsec_mult, &gt->nsec_shift);
222 	gt->time_delta = -get_cycles64();
223 
224 	return 0;
225 }
226