1 // SPDX-License-Identifier: GPL-2.0-only
2
3 /* hypercalls: Check the ARM64's psuedo-firmware bitmap register interface.
4 *
5 * The test validates the basic hypercall functionalities that are exposed
6 * via the psuedo-firmware bitmap register. This includes the registers'
7 * read/write behavior before and after the VM has started, and if the
8 * hypercalls are properly masked or unmasked to the guest when disabled or
9 * enabled from the KVM userspace, respectively.
10 */
11
12 #include <errno.h>
13 #include <linux/arm-smccc.h>
14 #include <asm/kvm.h>
15 #include <kvm_util.h>
16
17 #include "processor.h"
18
19 #define FW_REG_ULIMIT_VAL(max_feat_bit) (GENMASK(max_feat_bit, 0))
20
21 /* Last valid bits of the bitmapped firmware registers */
22 #define KVM_REG_ARM_STD_BMAP_BIT_MAX 0
23 #define KVM_REG_ARM_STD_HYP_BMAP_BIT_MAX 0
24 #define KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_MAX 1
25
26 struct kvm_fw_reg_info {
27 uint64_t reg; /* Register definition */
28 uint64_t max_feat_bit; /* Bit that represents the upper limit of the feature-map */
29 };
30
31 #define FW_REG_INFO(r) \
32 { \
33 .reg = r, \
34 .max_feat_bit = r##_BIT_MAX, \
35 }
36
37 static const struct kvm_fw_reg_info fw_reg_info[] = {
38 FW_REG_INFO(KVM_REG_ARM_STD_BMAP),
39 FW_REG_INFO(KVM_REG_ARM_STD_HYP_BMAP),
40 FW_REG_INFO(KVM_REG_ARM_VENDOR_HYP_BMAP),
41 };
42
43 enum test_stage {
44 TEST_STAGE_REG_IFACE,
45 TEST_STAGE_HVC_IFACE_FEAT_DISABLED,
46 TEST_STAGE_HVC_IFACE_FEAT_ENABLED,
47 TEST_STAGE_HVC_IFACE_FALSE_INFO,
48 TEST_STAGE_END,
49 };
50
51 static int stage = TEST_STAGE_REG_IFACE;
52
53 struct test_hvc_info {
54 uint32_t func_id;
55 uint64_t arg1;
56 };
57
58 #define TEST_HVC_INFO(f, a1) \
59 { \
60 .func_id = f, \
61 .arg1 = a1, \
62 }
63
64 static const struct test_hvc_info hvc_info[] = {
65 /* KVM_REG_ARM_STD_BMAP */
66 TEST_HVC_INFO(ARM_SMCCC_TRNG_VERSION, 0),
67 TEST_HVC_INFO(ARM_SMCCC_TRNG_FEATURES, ARM_SMCCC_TRNG_RND64),
68 TEST_HVC_INFO(ARM_SMCCC_TRNG_GET_UUID, 0),
69 TEST_HVC_INFO(ARM_SMCCC_TRNG_RND32, 0),
70 TEST_HVC_INFO(ARM_SMCCC_TRNG_RND64, 0),
71
72 /* KVM_REG_ARM_STD_HYP_BMAP */
73 TEST_HVC_INFO(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_HV_PV_TIME_FEATURES),
74 TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_FEATURES, ARM_SMCCC_HV_PV_TIME_ST),
75 TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_ST, 0),
76
77 /* KVM_REG_ARM_VENDOR_HYP_BMAP */
78 TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID,
79 ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID),
80 TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, 0),
81 TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID, KVM_PTP_VIRT_COUNTER),
82 };
83
84 /* Feed false hypercall info to test the KVM behavior */
85 static const struct test_hvc_info false_hvc_info[] = {
86 /* Feature support check against a different family of hypercalls */
87 TEST_HVC_INFO(ARM_SMCCC_TRNG_FEATURES, ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID),
88 TEST_HVC_INFO(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_TRNG_RND64),
89 TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_FEATURES, ARM_SMCCC_TRNG_RND64),
90 };
91
guest_test_hvc(const struct test_hvc_info * hc_info)92 static void guest_test_hvc(const struct test_hvc_info *hc_info)
93 {
94 unsigned int i;
95 struct arm_smccc_res res;
96 unsigned int hvc_info_arr_sz;
97
98 hvc_info_arr_sz =
99 hc_info == hvc_info ? ARRAY_SIZE(hvc_info) : ARRAY_SIZE(false_hvc_info);
100
101 for (i = 0; i < hvc_info_arr_sz; i++, hc_info++) {
102 memset(&res, 0, sizeof(res));
103 smccc_hvc(hc_info->func_id, hc_info->arg1, 0, 0, 0, 0, 0, 0, &res);
104
105 switch (stage) {
106 case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
107 case TEST_STAGE_HVC_IFACE_FALSE_INFO:
108 GUEST_ASSERT_3(res.a0 == SMCCC_RET_NOT_SUPPORTED,
109 res.a0, hc_info->func_id, hc_info->arg1);
110 break;
111 case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
112 GUEST_ASSERT_3(res.a0 != SMCCC_RET_NOT_SUPPORTED,
113 res.a0, hc_info->func_id, hc_info->arg1);
114 break;
115 default:
116 GUEST_ASSERT_1(0, stage);
117 }
118 }
119 }
120
guest_code(void)121 static void guest_code(void)
122 {
123 while (stage != TEST_STAGE_END) {
124 switch (stage) {
125 case TEST_STAGE_REG_IFACE:
126 break;
127 case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
128 case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
129 guest_test_hvc(hvc_info);
130 break;
131 case TEST_STAGE_HVC_IFACE_FALSE_INFO:
132 guest_test_hvc(false_hvc_info);
133 break;
134 default:
135 GUEST_ASSERT_1(0, stage);
136 }
137
138 GUEST_SYNC(stage);
139 }
140
141 GUEST_DONE();
142 }
143
144 struct st_time {
145 uint32_t rev;
146 uint32_t attr;
147 uint64_t st_time;
148 };
149
150 #define STEAL_TIME_SIZE ((sizeof(struct st_time) + 63) & ~63)
151 #define ST_GPA_BASE (1 << 30)
152
steal_time_init(struct kvm_vcpu * vcpu)153 static void steal_time_init(struct kvm_vcpu *vcpu)
154 {
155 uint64_t st_ipa = (ulong)ST_GPA_BASE;
156 unsigned int gpages;
157
158 gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE);
159 vm_userspace_mem_region_add(vcpu->vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
160
161 vcpu_device_attr_set(vcpu, KVM_ARM_VCPU_PVTIME_CTRL,
162 KVM_ARM_VCPU_PVTIME_IPA, &st_ipa);
163 }
164
test_fw_regs_before_vm_start(struct kvm_vcpu * vcpu)165 static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
166 {
167 uint64_t val;
168 unsigned int i;
169 int ret;
170
171 for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
172 const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
173
174 /* First 'read' should be an upper limit of the features supported */
175 vcpu_get_reg(vcpu, reg_info->reg, &val);
176 TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit),
177 "Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx\n",
178 reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val);
179
180 /* Test a 'write' by disabling all the features of the register map */
181 ret = __vcpu_set_reg(vcpu, reg_info->reg, 0);
182 TEST_ASSERT(ret == 0,
183 "Failed to clear all the features of reg: 0x%lx; ret: %d\n",
184 reg_info->reg, errno);
185
186 vcpu_get_reg(vcpu, reg_info->reg, &val);
187 TEST_ASSERT(val == 0,
188 "Expected all the features to be cleared for reg: 0x%lx\n", reg_info->reg);
189
190 /*
191 * Test enabling a feature that's not supported.
192 * Avoid this check if all the bits are occupied.
193 */
194 if (reg_info->max_feat_bit < 63) {
195 ret = __vcpu_set_reg(vcpu, reg_info->reg, BIT(reg_info->max_feat_bit + 1));
196 TEST_ASSERT(ret != 0 && errno == EINVAL,
197 "Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx\n",
198 errno, reg_info->reg);
199 }
200 }
201 }
202
test_fw_regs_after_vm_start(struct kvm_vcpu * vcpu)203 static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
204 {
205 uint64_t val;
206 unsigned int i;
207 int ret;
208
209 for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
210 const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
211
212 /*
213 * Before starting the VM, the test clears all the bits.
214 * Check if that's still the case.
215 */
216 vcpu_get_reg(vcpu, reg_info->reg, &val);
217 TEST_ASSERT(val == 0,
218 "Expected all the features to be cleared for reg: 0x%lx\n",
219 reg_info->reg);
220
221 /*
222 * Since the VM has run at least once, KVM shouldn't allow modification of
223 * the registers and should return EBUSY. Set the registers and check for
224 * the expected errno.
225 */
226 ret = __vcpu_set_reg(vcpu, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit));
227 TEST_ASSERT(ret != 0 && errno == EBUSY,
228 "Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx\n",
229 errno, reg_info->reg);
230 }
231 }
232
test_vm_create(struct kvm_vcpu ** vcpu)233 static struct kvm_vm *test_vm_create(struct kvm_vcpu **vcpu)
234 {
235 struct kvm_vm *vm;
236
237 vm = vm_create_with_one_vcpu(vcpu, guest_code);
238
239 ucall_init(vm, NULL);
240 steal_time_init(*vcpu);
241
242 return vm;
243 }
244
test_guest_stage(struct kvm_vm ** vm,struct kvm_vcpu ** vcpu)245 static void test_guest_stage(struct kvm_vm **vm, struct kvm_vcpu **vcpu)
246 {
247 int prev_stage = stage;
248
249 pr_debug("Stage: %d\n", prev_stage);
250
251 /* Sync the stage early, the VM might be freed below. */
252 stage++;
253 sync_global_to_guest(*vm, stage);
254
255 switch (prev_stage) {
256 case TEST_STAGE_REG_IFACE:
257 test_fw_regs_after_vm_start(*vcpu);
258 break;
259 case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
260 /* Start a new VM so that all the features are now enabled by default */
261 kvm_vm_free(*vm);
262 *vm = test_vm_create(vcpu);
263 break;
264 case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
265 case TEST_STAGE_HVC_IFACE_FALSE_INFO:
266 break;
267 default:
268 TEST_FAIL("Unknown test stage: %d\n", prev_stage);
269 }
270 }
271
test_run(void)272 static void test_run(void)
273 {
274 struct kvm_vcpu *vcpu;
275 struct kvm_vm *vm;
276 struct ucall uc;
277 bool guest_done = false;
278
279 vm = test_vm_create(&vcpu);
280
281 test_fw_regs_before_vm_start(vcpu);
282
283 while (!guest_done) {
284 vcpu_run(vcpu);
285
286 switch (get_ucall(vcpu, &uc)) {
287 case UCALL_SYNC:
288 test_guest_stage(&vm, &vcpu);
289 break;
290 case UCALL_DONE:
291 guest_done = true;
292 break;
293 case UCALL_ABORT:
294 REPORT_GUEST_ASSERT_N(uc, "values: 0x%lx, 0x%lx; 0x%lx, stage: %u",
295 GUEST_ASSERT_ARG(uc, 0),
296 GUEST_ASSERT_ARG(uc, 1),
297 GUEST_ASSERT_ARG(uc, 2), stage);
298 break;
299 default:
300 TEST_FAIL("Unexpected guest exit\n");
301 }
302 }
303
304 kvm_vm_free(vm);
305 }
306
main(void)307 int main(void)
308 {
309 setbuf(stdout, NULL);
310
311 test_run();
312 return 0;
313 }
314