1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #ifndef __SELFTEST_IOMMUFD_UTILS
4 #define __SELFTEST_IOMMUFD_UTILS
5
6 #include <unistd.h>
7 #include <stddef.h>
8 #include <sys/fcntl.h>
9 #include <sys/ioctl.h>
10 #include <stdint.h>
11 #include <assert.h>
12
13 #include "../kselftest_harness.h"
14 #include "../../../../drivers/iommu/iommufd/iommufd_test.h"
15
16 /* Hack to make assertions more readable */
17 #define _IOMMU_TEST_CMD(x) IOMMU_TEST_CMD
18
19 static void *buffer;
20 static unsigned long BUFFER_SIZE;
21
22 static unsigned long PAGE_SIZE;
23
24 #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
25 #define offsetofend(TYPE, MEMBER) \
26 (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
27
28 /*
29 * Have the kernel check the refcount on pages. I don't know why a freshly
30 * mmap'd anon non-compound page starts out with a ref of 3
31 */
32 #define check_refs(_ptr, _length, _refs) \
33 ({ \
34 struct iommu_test_cmd test_cmd = { \
35 .size = sizeof(test_cmd), \
36 .op = IOMMU_TEST_OP_MD_CHECK_REFS, \
37 .check_refs = { .length = _length, \
38 .uptr = (uintptr_t)(_ptr), \
39 .refs = _refs }, \
40 }; \
41 ASSERT_EQ(0, \
42 ioctl(self->fd, \
43 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS), \
44 &test_cmd)); \
45 })
46
_test_cmd_mock_domain(int fd,unsigned int ioas_id,__u32 * stdev_id,__u32 * hwpt_id,__u32 * idev_id)47 static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id,
48 __u32 *hwpt_id, __u32 *idev_id)
49 {
50 struct iommu_test_cmd cmd = {
51 .size = sizeof(cmd),
52 .op = IOMMU_TEST_OP_MOCK_DOMAIN,
53 .id = ioas_id,
54 .mock_domain = {},
55 };
56 int ret;
57
58 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
59 if (ret)
60 return ret;
61 if (stdev_id)
62 *stdev_id = cmd.mock_domain.out_stdev_id;
63 assert(cmd.id != 0);
64 if (hwpt_id)
65 *hwpt_id = cmd.mock_domain.out_hwpt_id;
66 if (idev_id)
67 *idev_id = cmd.mock_domain.out_idev_id;
68 return 0;
69 }
70 #define test_cmd_mock_domain(ioas_id, stdev_id, hwpt_id, idev_id) \
71 ASSERT_EQ(0, _test_cmd_mock_domain(self->fd, ioas_id, stdev_id, \
72 hwpt_id, idev_id))
73 #define test_err_mock_domain(_errno, ioas_id, stdev_id, hwpt_id) \
74 EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \
75 stdev_id, hwpt_id, NULL))
76
_test_cmd_mock_domain_replace(int fd,__u32 stdev_id,__u32 pt_id,__u32 * hwpt_id)77 static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id,
78 __u32 *hwpt_id)
79 {
80 struct iommu_test_cmd cmd = {
81 .size = sizeof(cmd),
82 .op = IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE,
83 .id = stdev_id,
84 .mock_domain_replace = {
85 .pt_id = pt_id,
86 },
87 };
88 int ret;
89
90 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
91 if (ret)
92 return ret;
93 if (hwpt_id)
94 *hwpt_id = cmd.mock_domain_replace.pt_id;
95 return 0;
96 }
97
98 #define test_cmd_mock_domain_replace(stdev_id, pt_id) \
99 ASSERT_EQ(0, _test_cmd_mock_domain_replace(self->fd, stdev_id, pt_id, \
100 NULL))
101 #define test_err_mock_domain_replace(_errno, stdev_id, pt_id) \
102 EXPECT_ERRNO(_errno, _test_cmd_mock_domain_replace(self->fd, stdev_id, \
103 pt_id, NULL))
104
_test_cmd_hwpt_alloc(int fd,__u32 device_id,__u32 pt_id,__u32 * hwpt_id)105 static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id,
106 __u32 *hwpt_id)
107 {
108 struct iommu_hwpt_alloc cmd = {
109 .size = sizeof(cmd),
110 .dev_id = device_id,
111 .pt_id = pt_id,
112 };
113 int ret;
114
115 ret = ioctl(fd, IOMMU_HWPT_ALLOC, &cmd);
116 if (ret)
117 return ret;
118 if (hwpt_id)
119 *hwpt_id = cmd.out_hwpt_id;
120 return 0;
121 }
122
123 #define test_cmd_hwpt_alloc(device_id, pt_id, hwpt_id) \
124 ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, hwpt_id))
125
_test_cmd_access_replace_ioas(int fd,__u32 access_id,unsigned int ioas_id)126 static int _test_cmd_access_replace_ioas(int fd, __u32 access_id,
127 unsigned int ioas_id)
128 {
129 struct iommu_test_cmd cmd = {
130 .size = sizeof(cmd),
131 .op = IOMMU_TEST_OP_ACCESS_REPLACE_IOAS,
132 .id = access_id,
133 .access_replace_ioas = { .ioas_id = ioas_id },
134 };
135 int ret;
136
137 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
138 if (ret)
139 return ret;
140 return 0;
141 }
142 #define test_cmd_access_replace_ioas(access_id, ioas_id) \
143 ASSERT_EQ(0, _test_cmd_access_replace_ioas(self->fd, access_id, ioas_id))
144
_test_cmd_create_access(int fd,unsigned int ioas_id,__u32 * access_id,unsigned int flags)145 static int _test_cmd_create_access(int fd, unsigned int ioas_id,
146 __u32 *access_id, unsigned int flags)
147 {
148 struct iommu_test_cmd cmd = {
149 .size = sizeof(cmd),
150 .op = IOMMU_TEST_OP_CREATE_ACCESS,
151 .id = ioas_id,
152 .create_access = { .flags = flags },
153 };
154 int ret;
155
156 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
157 if (ret)
158 return ret;
159 *access_id = cmd.create_access.out_access_fd;
160 return 0;
161 }
162 #define test_cmd_create_access(ioas_id, access_id, flags) \
163 ASSERT_EQ(0, _test_cmd_create_access(self->fd, ioas_id, access_id, \
164 flags))
165
_test_cmd_destroy_access(unsigned int access_id)166 static int _test_cmd_destroy_access(unsigned int access_id)
167 {
168 return close(access_id);
169 }
170 #define test_cmd_destroy_access(access_id) \
171 ASSERT_EQ(0, _test_cmd_destroy_access(access_id))
172
_test_cmd_destroy_access_pages(int fd,unsigned int access_id,unsigned int access_pages_id)173 static int _test_cmd_destroy_access_pages(int fd, unsigned int access_id,
174 unsigned int access_pages_id)
175 {
176 struct iommu_test_cmd cmd = {
177 .size = sizeof(cmd),
178 .op = IOMMU_TEST_OP_DESTROY_ACCESS_PAGES,
179 .id = access_id,
180 .destroy_access_pages = { .access_pages_id = access_pages_id },
181 };
182 return ioctl(fd, IOMMU_TEST_CMD, &cmd);
183 }
184 #define test_cmd_destroy_access_pages(access_id, access_pages_id) \
185 ASSERT_EQ(0, _test_cmd_destroy_access_pages(self->fd, access_id, \
186 access_pages_id))
187 #define test_err_destroy_access_pages(_errno, access_id, access_pages_id) \
188 EXPECT_ERRNO(_errno, _test_cmd_destroy_access_pages( \
189 self->fd, access_id, access_pages_id))
190
_test_ioctl_destroy(int fd,unsigned int id)191 static int _test_ioctl_destroy(int fd, unsigned int id)
192 {
193 struct iommu_destroy cmd = {
194 .size = sizeof(cmd),
195 .id = id,
196 };
197 return ioctl(fd, IOMMU_DESTROY, &cmd);
198 }
199 #define test_ioctl_destroy(id) ASSERT_EQ(0, _test_ioctl_destroy(self->fd, id))
200
_test_ioctl_ioas_alloc(int fd,__u32 * id)201 static int _test_ioctl_ioas_alloc(int fd, __u32 *id)
202 {
203 struct iommu_ioas_alloc cmd = {
204 .size = sizeof(cmd),
205 };
206 int ret;
207
208 ret = ioctl(fd, IOMMU_IOAS_ALLOC, &cmd);
209 if (ret)
210 return ret;
211 *id = cmd.out_ioas_id;
212 return 0;
213 }
214 #define test_ioctl_ioas_alloc(id) \
215 ({ \
216 ASSERT_EQ(0, _test_ioctl_ioas_alloc(self->fd, id)); \
217 ASSERT_NE(0, *(id)); \
218 })
219
_test_ioctl_ioas_map(int fd,unsigned int ioas_id,void * buffer,size_t length,__u64 * iova,unsigned int flags)220 static int _test_ioctl_ioas_map(int fd, unsigned int ioas_id, void *buffer,
221 size_t length, __u64 *iova, unsigned int flags)
222 {
223 struct iommu_ioas_map cmd = {
224 .size = sizeof(cmd),
225 .flags = flags,
226 .ioas_id = ioas_id,
227 .user_va = (uintptr_t)buffer,
228 .length = length,
229 };
230 int ret;
231
232 if (flags & IOMMU_IOAS_MAP_FIXED_IOVA)
233 cmd.iova = *iova;
234
235 ret = ioctl(fd, IOMMU_IOAS_MAP, &cmd);
236 *iova = cmd.iova;
237 return ret;
238 }
239 #define test_ioctl_ioas_map(buffer, length, iova_p) \
240 ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
241 length, iova_p, \
242 IOMMU_IOAS_MAP_WRITEABLE | \
243 IOMMU_IOAS_MAP_READABLE))
244
245 #define test_err_ioctl_ioas_map(_errno, buffer, length, iova_p) \
246 EXPECT_ERRNO(_errno, \
247 _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
248 length, iova_p, \
249 IOMMU_IOAS_MAP_WRITEABLE | \
250 IOMMU_IOAS_MAP_READABLE))
251
252 #define test_ioctl_ioas_map_id(ioas_id, buffer, length, iova_p) \
253 ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, ioas_id, buffer, length, \
254 iova_p, \
255 IOMMU_IOAS_MAP_WRITEABLE | \
256 IOMMU_IOAS_MAP_READABLE))
257
258 #define test_ioctl_ioas_map_fixed(buffer, length, iova) \
259 ({ \
260 __u64 __iova = iova; \
261 ASSERT_EQ(0, _test_ioctl_ioas_map( \
262 self->fd, self->ioas_id, buffer, length, \
263 &__iova, \
264 IOMMU_IOAS_MAP_FIXED_IOVA | \
265 IOMMU_IOAS_MAP_WRITEABLE | \
266 IOMMU_IOAS_MAP_READABLE)); \
267 })
268
269 #define test_err_ioctl_ioas_map_fixed(_errno, buffer, length, iova) \
270 ({ \
271 __u64 __iova = iova; \
272 EXPECT_ERRNO(_errno, \
273 _test_ioctl_ioas_map( \
274 self->fd, self->ioas_id, buffer, length, \
275 &__iova, \
276 IOMMU_IOAS_MAP_FIXED_IOVA | \
277 IOMMU_IOAS_MAP_WRITEABLE | \
278 IOMMU_IOAS_MAP_READABLE)); \
279 })
280
_test_ioctl_ioas_unmap(int fd,unsigned int ioas_id,uint64_t iova,size_t length,uint64_t * out_len)281 static int _test_ioctl_ioas_unmap(int fd, unsigned int ioas_id, uint64_t iova,
282 size_t length, uint64_t *out_len)
283 {
284 struct iommu_ioas_unmap cmd = {
285 .size = sizeof(cmd),
286 .ioas_id = ioas_id,
287 .iova = iova,
288 .length = length,
289 };
290 int ret;
291
292 ret = ioctl(fd, IOMMU_IOAS_UNMAP, &cmd);
293 if (out_len)
294 *out_len = cmd.length;
295 return ret;
296 }
297 #define test_ioctl_ioas_unmap(iova, length) \
298 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, iova, \
299 length, NULL))
300
301 #define test_ioctl_ioas_unmap_id(ioas_id, iova, length) \
302 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, ioas_id, iova, length, \
303 NULL))
304
305 #define test_err_ioctl_ioas_unmap(_errno, iova, length) \
306 EXPECT_ERRNO(_errno, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, \
307 iova, length, NULL))
308
_test_ioctl_set_temp_memory_limit(int fd,unsigned int limit)309 static int _test_ioctl_set_temp_memory_limit(int fd, unsigned int limit)
310 {
311 struct iommu_test_cmd memlimit_cmd = {
312 .size = sizeof(memlimit_cmd),
313 .op = IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT,
314 .memory_limit = { .limit = limit },
315 };
316
317 return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT),
318 &memlimit_cmd);
319 }
320
321 #define test_ioctl_set_temp_memory_limit(limit) \
322 ASSERT_EQ(0, _test_ioctl_set_temp_memory_limit(self->fd, limit))
323
324 #define test_ioctl_set_default_memory_limit() \
325 test_ioctl_set_temp_memory_limit(65536)
326
teardown_iommufd(int fd,struct __test_metadata * _metadata)327 static void teardown_iommufd(int fd, struct __test_metadata *_metadata)
328 {
329 struct iommu_test_cmd test_cmd = {
330 .size = sizeof(test_cmd),
331 .op = IOMMU_TEST_OP_MD_CHECK_REFS,
332 .check_refs = { .length = BUFFER_SIZE,
333 .uptr = (uintptr_t)buffer },
334 };
335
336 if (fd == -1)
337 return;
338
339 EXPECT_EQ(0, close(fd));
340
341 fd = open("/dev/iommu", O_RDWR);
342 EXPECT_NE(-1, fd);
343 EXPECT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS),
344 &test_cmd));
345 EXPECT_EQ(0, close(fd));
346 }
347
348 #define EXPECT_ERRNO(expected_errno, cmd) \
349 ({ \
350 ASSERT_EQ(-1, cmd); \
351 EXPECT_EQ(expected_errno, errno); \
352 })
353
354 #endif
355
356 /* @data can be NULL */
_test_cmd_get_hw_info(int fd,__u32 device_id,void * data,size_t data_len)357 static int _test_cmd_get_hw_info(int fd, __u32 device_id,
358 void *data, size_t data_len)
359 {
360 struct iommu_test_hw_info *info = (struct iommu_test_hw_info *)data;
361 struct iommu_hw_info cmd = {
362 .size = sizeof(cmd),
363 .dev_id = device_id,
364 .data_len = data_len,
365 .data_uptr = (uint64_t)data,
366 };
367 int ret;
368
369 ret = ioctl(fd, IOMMU_GET_HW_INFO, &cmd);
370 if (ret)
371 return ret;
372
373 assert(cmd.out_data_type == IOMMU_HW_INFO_TYPE_SELFTEST);
374
375 /*
376 * The struct iommu_test_hw_info should be the one defined
377 * by the current kernel.
378 */
379 assert(cmd.data_len == sizeof(struct iommu_test_hw_info));
380
381 /*
382 * Trailing bytes should be 0 if user buffer is larger than
383 * the data that kernel reports.
384 */
385 if (data_len > cmd.data_len) {
386 char *ptr = (char *)(data + cmd.data_len);
387 int idx = 0;
388
389 while (idx < data_len - cmd.data_len) {
390 assert(!*(ptr + idx));
391 idx++;
392 }
393 }
394
395 if (info) {
396 if (data_len >= offsetofend(struct iommu_test_hw_info, test_reg))
397 assert(info->test_reg == IOMMU_HW_INFO_SELFTEST_REGVAL);
398 if (data_len >= offsetofend(struct iommu_test_hw_info, flags))
399 assert(!info->flags);
400 }
401
402 return 0;
403 }
404
405 #define test_cmd_get_hw_info(device_id, data, data_len) \
406 ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, \
407 data, data_len))
408
409 #define test_err_get_hw_info(_errno, device_id, data, data_len) \
410 EXPECT_ERRNO(_errno, \
411 _test_cmd_get_hw_info(self->fd, device_id, \
412 data, data_len))
413