1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Test TEST PROTECTION emulation.
4 *
5 * Copyright IBM Corp. 2021
6 */
7
8 #include <sys/mman.h>
9 #include "test_util.h"
10 #include "kvm_util.h"
11 #include "kselftest.h"
12
13 #define PAGE_SHIFT 12
14 #define PAGE_SIZE (1 << PAGE_SHIFT)
15 #define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
16 #define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
17
18 static __aligned(PAGE_SIZE) uint8_t pages[2][PAGE_SIZE];
19 static uint8_t *const page_store_prot = pages[0];
20 static uint8_t *const page_fetch_prot = pages[1];
21
22 /* Nonzero return value indicates that address not mapped */
set_storage_key(void * addr,uint8_t key)23 static int set_storage_key(void *addr, uint8_t key)
24 {
25 int not_mapped = 0;
26
27 asm volatile (
28 "lra %[addr], 0(0,%[addr])\n"
29 " jz 0f\n"
30 " llill %[not_mapped],1\n"
31 " j 1f\n"
32 "0: sske %[key], %[addr]\n"
33 "1:"
34 : [addr] "+&a" (addr), [not_mapped] "+r" (not_mapped)
35 : [key] "r" (key)
36 : "cc"
37 );
38 return -not_mapped;
39 }
40
41 enum permission {
42 READ_WRITE = 0,
43 READ = 1,
44 RW_PROTECTED = 2,
45 TRANSL_UNAVAIL = 3,
46 };
47
test_protection(void * addr,uint8_t key)48 static enum permission test_protection(void *addr, uint8_t key)
49 {
50 uint64_t mask;
51
52 asm volatile (
53 "tprot %[addr], 0(%[key])\n"
54 " ipm %[mask]\n"
55 : [mask] "=r" (mask)
56 : [addr] "Q" (*(char *)addr),
57 [key] "a" (key)
58 : "cc"
59 );
60
61 return (enum permission)(mask >> 28);
62 }
63
64 enum stage {
65 STAGE_INIT_SIMPLE,
66 TEST_SIMPLE,
67 STAGE_INIT_FETCH_PROT_OVERRIDE,
68 TEST_FETCH_PROT_OVERRIDE,
69 TEST_STORAGE_PROT_OVERRIDE,
70 STAGE_END /* must be the last entry (it's the amount of tests) */
71 };
72
73 struct test {
74 enum stage stage;
75 void *addr;
76 uint8_t key;
77 enum permission expected;
78 } tests[] = {
79 /*
80 * We perform each test in the array by executing TEST PROTECTION on
81 * the specified addr with the specified key and checking if the returned
82 * permissions match the expected value.
83 * Both guest and host cooperate to set up the required test conditions.
84 * A central condition is that the page targeted by addr has to be DAT
85 * protected in the host mappings, in order for KVM to emulate the
86 * TEST PROTECTION instruction.
87 * Since the page tables are shared, the host uses mprotect to achieve
88 * this.
89 *
90 * Test resulting in RW_PROTECTED/TRANSL_UNAVAIL will be interpreted
91 * by SIE, not KVM, but there is no harm in testing them also.
92 * See Enhanced Suppression-on-Protection Facilities in the
93 * Interpretive-Execution Mode
94 */
95 /*
96 * guest: set storage key of page_store_prot to 1
97 * storage key of page_fetch_prot to 9 and enable
98 * protection for it
99 * STAGE_INIT_SIMPLE
100 * host: write protect both via mprotect
101 */
102 /* access key 0 matches any storage key -> RW */
103 { TEST_SIMPLE, page_store_prot, 0x00, READ_WRITE },
104 /* access key matches storage key -> RW */
105 { TEST_SIMPLE, page_store_prot, 0x10, READ_WRITE },
106 /* mismatched keys, but no fetch protection -> RO */
107 { TEST_SIMPLE, page_store_prot, 0x20, READ },
108 /* access key 0 matches any storage key -> RW */
109 { TEST_SIMPLE, page_fetch_prot, 0x00, READ_WRITE },
110 /* access key matches storage key -> RW */
111 { TEST_SIMPLE, page_fetch_prot, 0x90, READ_WRITE },
112 /* mismatched keys, fetch protection -> inaccessible */
113 { TEST_SIMPLE, page_fetch_prot, 0x10, RW_PROTECTED },
114 /* page 0 not mapped yet -> translation not available */
115 { TEST_SIMPLE, (void *)0x00, 0x10, TRANSL_UNAVAIL },
116 /*
117 * host: try to map page 0
118 * guest: set storage key of page 0 to 9 and enable fetch protection
119 * STAGE_INIT_FETCH_PROT_OVERRIDE
120 * host: write protect page 0
121 * enable fetch protection override
122 */
123 /* mismatched keys, fetch protection, but override applies -> RO */
124 { TEST_FETCH_PROT_OVERRIDE, (void *)0x00, 0x10, READ },
125 /* mismatched keys, fetch protection, override applies to 0-2048 only -> inaccessible */
126 { TEST_FETCH_PROT_OVERRIDE, (void *)2049, 0x10, RW_PROTECTED },
127 /*
128 * host: enable storage protection override
129 */
130 /* mismatched keys, but override applies (storage key 9) -> RW */
131 { TEST_STORAGE_PROT_OVERRIDE, page_fetch_prot, 0x10, READ_WRITE },
132 /* mismatched keys, no fetch protection, override doesn't apply -> RO */
133 { TEST_STORAGE_PROT_OVERRIDE, page_store_prot, 0x20, READ },
134 /* mismatched keys, but override applies (storage key 9) -> RW */
135 { TEST_STORAGE_PROT_OVERRIDE, (void *)2049, 0x10, READ_WRITE },
136 /* end marker */
137 { STAGE_END, 0, 0, 0 },
138 };
139
perform_next_stage(int * i,bool mapped_0)140 static enum stage perform_next_stage(int *i, bool mapped_0)
141 {
142 enum stage stage = tests[*i].stage;
143 enum permission result;
144 bool skip;
145
146 for (; tests[*i].stage == stage; (*i)++) {
147 /*
148 * Some fetch protection override tests require that page 0
149 * be mapped, however, when the hosts tries to map that page via
150 * vm_vaddr_alloc, it may happen that some other page gets mapped
151 * instead.
152 * In order to skip these tests we detect this inside the guest
153 */
154 skip = tests[*i].addr < (void *)4096 &&
155 tests[*i].expected != TRANSL_UNAVAIL &&
156 !mapped_0;
157 if (!skip) {
158 result = test_protection(tests[*i].addr, tests[*i].key);
159 GUEST_ASSERT_2(result == tests[*i].expected, *i, result);
160 }
161 }
162 return stage;
163 }
164
guest_code(void)165 static void guest_code(void)
166 {
167 bool mapped_0;
168 int i = 0;
169
170 GUEST_ASSERT_EQ(set_storage_key(page_store_prot, 0x10), 0);
171 GUEST_ASSERT_EQ(set_storage_key(page_fetch_prot, 0x98), 0);
172 GUEST_SYNC(STAGE_INIT_SIMPLE);
173 GUEST_SYNC(perform_next_stage(&i, false));
174
175 /* Fetch-protection override */
176 mapped_0 = !set_storage_key((void *)0, 0x98);
177 GUEST_SYNC(STAGE_INIT_FETCH_PROT_OVERRIDE);
178 GUEST_SYNC(perform_next_stage(&i, mapped_0));
179
180 /* Storage-protection override */
181 GUEST_SYNC(perform_next_stage(&i, mapped_0));
182 }
183
184 #define HOST_SYNC_NO_TAP(vcpup, stage) \
185 ({ \
186 struct kvm_vcpu *__vcpu = (vcpup); \
187 struct ucall uc; \
188 int __stage = (stage); \
189 \
190 vcpu_run(__vcpu); \
191 get_ucall(__vcpu, &uc); \
192 if (uc.cmd == UCALL_ABORT) \
193 REPORT_GUEST_ASSERT_2(uc, "hints: %lu, %lu"); \
194 ASSERT_EQ(uc.cmd, UCALL_SYNC); \
195 ASSERT_EQ(uc.args[1], __stage); \
196 })
197
198 #define HOST_SYNC(vcpu, stage) \
199 ({ \
200 HOST_SYNC_NO_TAP(vcpu, stage); \
201 ksft_test_result_pass("" #stage "\n"); \
202 })
203
main(int argc,char * argv[])204 int main(int argc, char *argv[])
205 {
206 struct kvm_vcpu *vcpu;
207 struct kvm_vm *vm;
208 struct kvm_run *run;
209 vm_vaddr_t guest_0_page;
210
211 ksft_print_header();
212 ksft_set_plan(STAGE_END);
213
214 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
215 run = vcpu->run;
216
217 HOST_SYNC(vcpu, STAGE_INIT_SIMPLE);
218 mprotect(addr_gva2hva(vm, (vm_vaddr_t)pages), PAGE_SIZE * 2, PROT_READ);
219 HOST_SYNC(vcpu, TEST_SIMPLE);
220
221 guest_0_page = vm_vaddr_alloc(vm, PAGE_SIZE, 0);
222 if (guest_0_page != 0) {
223 /* Use NO_TAP so we don't get a PASS print */
224 HOST_SYNC_NO_TAP(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE);
225 ksft_test_result_skip("STAGE_INIT_FETCH_PROT_OVERRIDE - "
226 "Did not allocate page at 0\n");
227 } else {
228 HOST_SYNC(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE);
229 }
230 if (guest_0_page == 0)
231 mprotect(addr_gva2hva(vm, (vm_vaddr_t)0), PAGE_SIZE, PROT_READ);
232 run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
233 run->kvm_dirty_regs = KVM_SYNC_CRS;
234 HOST_SYNC(vcpu, TEST_FETCH_PROT_OVERRIDE);
235
236 run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
237 run->kvm_dirty_regs = KVM_SYNC_CRS;
238 HOST_SYNC(vcpu, TEST_STORAGE_PROT_OVERRIDE);
239
240 kvm_vm_free(vm);
241
242 ksft_finished(); /* Print results and exit() accordingly */
243 }
244