1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #include <asm/unistd.h>
4 #include <stdlib.h>
5 #include <sys/capability.h>
6 #include <sys/mman.h>
7 #include <sys/eventfd.h>
8
9 #define __EXPORTED_HEADERS__
10 #include <linux/vfio.h>
11
12 #include "iommufd_utils.h"
13
14 static unsigned long HUGEPAGE_SIZE;
15
16 #define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
17 #define MOCK_HUGE_PAGE_SIZE (512 * MOCK_PAGE_SIZE)
18
get_huge_page_size(void)19 static unsigned long get_huge_page_size(void)
20 {
21 char buf[80];
22 int ret;
23 int fd;
24
25 fd = open("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size",
26 O_RDONLY);
27 if (fd < 0)
28 return 2 * 1024 * 1024;
29
30 ret = read(fd, buf, sizeof(buf));
31 close(fd);
32 if (ret <= 0 || ret == sizeof(buf))
33 return 2 * 1024 * 1024;
34 buf[ret] = 0;
35 return strtoul(buf, NULL, 10);
36 }
37
setup_sizes(void)38 static __attribute__((constructor)) void setup_sizes(void)
39 {
40 void *vrc;
41 int rc;
42
43 PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
44 HUGEPAGE_SIZE = get_huge_page_size();
45
46 BUFFER_SIZE = PAGE_SIZE * 16;
47 rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
48 assert(!rc);
49 assert(buffer);
50 assert((uintptr_t)buffer % HUGEPAGE_SIZE == 0);
51 vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE,
52 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
53 assert(vrc == buffer);
54
55 mfd_buffer = memfd_mmap(BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
56 &mfd);
57 }
58
FIXTURE(iommufd)59 FIXTURE(iommufd)
60 {
61 int fd;
62 };
63
FIXTURE_SETUP(iommufd)64 FIXTURE_SETUP(iommufd)
65 {
66 self->fd = open("/dev/iommu", O_RDWR);
67 ASSERT_NE(-1, self->fd);
68 }
69
FIXTURE_TEARDOWN(iommufd)70 FIXTURE_TEARDOWN(iommufd)
71 {
72 teardown_iommufd(self->fd, _metadata);
73 }
74
TEST_F(iommufd,simple_close)75 TEST_F(iommufd, simple_close)
76 {
77 }
78
TEST_F(iommufd,cmd_fail)79 TEST_F(iommufd, cmd_fail)
80 {
81 struct iommu_destroy cmd = { .size = sizeof(cmd), .id = 0 };
82
83 /* object id is invalid */
84 EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, 0));
85 /* Bad pointer */
86 EXPECT_ERRNO(EFAULT, ioctl(self->fd, IOMMU_DESTROY, NULL));
87 /* Unknown ioctl */
88 EXPECT_ERRNO(ENOTTY,
89 ioctl(self->fd, _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE - 1),
90 &cmd));
91 }
92
TEST_F(iommufd,cmd_length)93 TEST_F(iommufd, cmd_length)
94 {
95 #define TEST_LENGTH(_struct, _ioctl, _last) \
96 { \
97 size_t min_size = offsetofend(struct _struct, _last); \
98 struct { \
99 struct _struct cmd; \
100 uint8_t extra; \
101 } cmd = { .cmd = { .size = min_size - 1 }, \
102 .extra = UINT8_MAX }; \
103 int old_errno; \
104 int rc; \
105 \
106 EXPECT_ERRNO(EINVAL, ioctl(self->fd, _ioctl, &cmd)); \
107 cmd.cmd.size = sizeof(struct _struct) + 1; \
108 EXPECT_ERRNO(E2BIG, ioctl(self->fd, _ioctl, &cmd)); \
109 cmd.cmd.size = sizeof(struct _struct); \
110 rc = ioctl(self->fd, _ioctl, &cmd); \
111 old_errno = errno; \
112 cmd.cmd.size = sizeof(struct _struct) + 1; \
113 cmd.extra = 0; \
114 if (rc) { \
115 EXPECT_ERRNO(old_errno, \
116 ioctl(self->fd, _ioctl, &cmd)); \
117 } else { \
118 ASSERT_EQ(0, ioctl(self->fd, _ioctl, &cmd)); \
119 } \
120 }
121
122 TEST_LENGTH(iommu_destroy, IOMMU_DESTROY, id);
123 TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO, __reserved);
124 TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC, __reserved);
125 TEST_LENGTH(iommu_hwpt_invalidate, IOMMU_HWPT_INVALIDATE, __reserved);
126 TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC, out_ioas_id);
127 TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES,
128 out_iova_alignment);
129 TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS,
130 allowed_iovas);
131 TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP, iova);
132 TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY, src_iova);
133 TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP, length);
134 TEST_LENGTH(iommu_option, IOMMU_OPTION, val64);
135 TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS, __reserved);
136 TEST_LENGTH(iommu_ioas_map_file, IOMMU_IOAS_MAP_FILE, iova);
137 TEST_LENGTH(iommu_viommu_alloc, IOMMU_VIOMMU_ALLOC, out_viommu_id);
138 TEST_LENGTH(iommu_vdevice_alloc, IOMMU_VDEVICE_ALLOC, virt_id);
139 TEST_LENGTH(iommu_ioas_change_process, IOMMU_IOAS_CHANGE_PROCESS,
140 __reserved);
141 #undef TEST_LENGTH
142 }
143
TEST_F(iommufd,cmd_ex_fail)144 TEST_F(iommufd, cmd_ex_fail)
145 {
146 struct {
147 struct iommu_destroy cmd;
148 __u64 future;
149 } cmd = { .cmd = { .size = sizeof(cmd), .id = 0 } };
150
151 /* object id is invalid and command is longer */
152 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
153 /* future area is non-zero */
154 cmd.future = 1;
155 EXPECT_ERRNO(E2BIG, ioctl(self->fd, IOMMU_DESTROY, &cmd));
156 /* Original command "works" */
157 cmd.cmd.size = sizeof(cmd.cmd);
158 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
159 /* Short command fails */
160 cmd.cmd.size = sizeof(cmd.cmd) - 1;
161 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_DESTROY, &cmd));
162 }
163
TEST_F(iommufd,global_options)164 TEST_F(iommufd, global_options)
165 {
166 struct iommu_option cmd = {
167 .size = sizeof(cmd),
168 .option_id = IOMMU_OPTION_RLIMIT_MODE,
169 .op = IOMMU_OPTION_OP_GET,
170 .val64 = 1,
171 };
172
173 cmd.option_id = IOMMU_OPTION_RLIMIT_MODE;
174 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
175 ASSERT_EQ(0, cmd.val64);
176
177 /* This requires root */
178 cmd.op = IOMMU_OPTION_OP_SET;
179 cmd.val64 = 1;
180 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
181 cmd.val64 = 2;
182 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
183
184 cmd.op = IOMMU_OPTION_OP_GET;
185 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
186 ASSERT_EQ(1, cmd.val64);
187
188 cmd.op = IOMMU_OPTION_OP_SET;
189 cmd.val64 = 0;
190 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
191
192 cmd.op = IOMMU_OPTION_OP_GET;
193 cmd.option_id = IOMMU_OPTION_HUGE_PAGES;
194 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
195 cmd.op = IOMMU_OPTION_OP_SET;
196 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
197 }
198
drop_cap_ipc_lock(struct __test_metadata * _metadata)199 static void drop_cap_ipc_lock(struct __test_metadata *_metadata)
200 {
201 cap_t caps;
202 cap_value_t cap_list[1] = { CAP_IPC_LOCK };
203
204 caps = cap_get_proc();
205 ASSERT_NE(caps, NULL);
206 ASSERT_NE(-1,
207 cap_set_flag(caps, CAP_EFFECTIVE, 1, cap_list, CAP_CLEAR));
208 ASSERT_NE(-1, cap_set_proc(caps));
209 cap_free(caps);
210 }
211
get_proc_status_value(pid_t pid,const char * var)212 static long get_proc_status_value(pid_t pid, const char *var)
213 {
214 FILE *fp;
215 char buf[80], tag[80];
216 long val = -1;
217
218 snprintf(buf, sizeof(buf), "/proc/%d/status", pid);
219 fp = fopen(buf, "r");
220 if (!fp)
221 return val;
222
223 while (fgets(buf, sizeof(buf), fp))
224 if (fscanf(fp, "%s %ld\n", tag, &val) == 2 && !strcmp(tag, var))
225 break;
226
227 fclose(fp);
228 return val;
229 }
230
get_vm_pinned(pid_t pid)231 static long get_vm_pinned(pid_t pid)
232 {
233 return get_proc_status_value(pid, "VmPin:");
234 }
235
get_vm_locked(pid_t pid)236 static long get_vm_locked(pid_t pid)
237 {
238 return get_proc_status_value(pid, "VmLck:");
239 }
240
FIXTURE(change_process)241 FIXTURE(change_process)
242 {
243 int fd;
244 uint32_t ioas_id;
245 };
246
FIXTURE_VARIANT(change_process)247 FIXTURE_VARIANT(change_process)
248 {
249 int accounting;
250 };
251
FIXTURE_SETUP(change_process)252 FIXTURE_SETUP(change_process)
253 {
254 self->fd = open("/dev/iommu", O_RDWR);
255 ASSERT_NE(-1, self->fd);
256
257 drop_cap_ipc_lock(_metadata);
258 if (variant->accounting != IOPT_PAGES_ACCOUNT_NONE) {
259 struct iommu_option set_limit_cmd = {
260 .size = sizeof(set_limit_cmd),
261 .option_id = IOMMU_OPTION_RLIMIT_MODE,
262 .op = IOMMU_OPTION_OP_SET,
263 .val64 = (variant->accounting == IOPT_PAGES_ACCOUNT_MM),
264 };
265 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &set_limit_cmd));
266 }
267
268 test_ioctl_ioas_alloc(&self->ioas_id);
269 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
270 }
271
FIXTURE_TEARDOWN(change_process)272 FIXTURE_TEARDOWN(change_process)
273 {
274 teardown_iommufd(self->fd, _metadata);
275 }
276
FIXTURE_VARIANT_ADD(change_process,account_none)277 FIXTURE_VARIANT_ADD(change_process, account_none)
278 {
279 .accounting = IOPT_PAGES_ACCOUNT_NONE,
280 };
281
FIXTURE_VARIANT_ADD(change_process,account_user)282 FIXTURE_VARIANT_ADD(change_process, account_user)
283 {
284 .accounting = IOPT_PAGES_ACCOUNT_USER,
285 };
286
FIXTURE_VARIANT_ADD(change_process,account_mm)287 FIXTURE_VARIANT_ADD(change_process, account_mm)
288 {
289 .accounting = IOPT_PAGES_ACCOUNT_MM,
290 };
291
TEST_F(change_process,basic)292 TEST_F(change_process, basic)
293 {
294 pid_t parent = getpid();
295 pid_t child;
296 __u64 iova;
297 struct iommu_ioas_change_process cmd = {
298 .size = sizeof(cmd),
299 };
300
301 /* Expect failure if non-file maps exist */
302 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
303 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
304 test_ioctl_ioas_unmap(iova, PAGE_SIZE);
305
306 /* Change process works in current process. */
307 test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova);
308 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
309
310 /* Change process works in another process */
311 child = fork();
312 if (!child) {
313 int nlock = PAGE_SIZE / 1024;
314
315 /* Parent accounts for locked memory before */
316 ASSERT_EQ(nlock, get_vm_pinned(parent));
317 if (variant->accounting == IOPT_PAGES_ACCOUNT_MM)
318 ASSERT_EQ(nlock, get_vm_locked(parent));
319 ASSERT_EQ(0, get_vm_pinned(getpid()));
320 ASSERT_EQ(0, get_vm_locked(getpid()));
321
322 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
323
324 /* Child accounts for locked memory after */
325 ASSERT_EQ(0, get_vm_pinned(parent));
326 ASSERT_EQ(0, get_vm_locked(parent));
327 ASSERT_EQ(nlock, get_vm_pinned(getpid()));
328 if (variant->accounting == IOPT_PAGES_ACCOUNT_MM)
329 ASSERT_EQ(nlock, get_vm_locked(getpid()));
330
331 exit(0);
332 }
333 ASSERT_NE(-1, child);
334 ASSERT_EQ(child, waitpid(child, NULL, 0));
335 }
336
FIXTURE(iommufd_ioas)337 FIXTURE(iommufd_ioas)
338 {
339 int fd;
340 uint32_t ioas_id;
341 uint32_t stdev_id;
342 uint32_t hwpt_id;
343 uint32_t device_id;
344 uint64_t base_iova;
345 uint32_t device_pasid_id;
346 };
347
FIXTURE_VARIANT(iommufd_ioas)348 FIXTURE_VARIANT(iommufd_ioas)
349 {
350 unsigned int mock_domains;
351 unsigned int memory_limit;
352 bool pasid_capable;
353 };
354
FIXTURE_SETUP(iommufd_ioas)355 FIXTURE_SETUP(iommufd_ioas)
356 {
357 unsigned int i;
358
359
360 self->fd = open("/dev/iommu", O_RDWR);
361 ASSERT_NE(-1, self->fd);
362 test_ioctl_ioas_alloc(&self->ioas_id);
363
364 if (!variant->memory_limit) {
365 test_ioctl_set_default_memory_limit();
366 } else {
367 test_ioctl_set_temp_memory_limit(variant->memory_limit);
368 }
369
370 for (i = 0; i != variant->mock_domains; i++) {
371 test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
372 &self->hwpt_id, &self->device_id);
373 test_cmd_dev_check_cache_all(self->device_id,
374 IOMMU_TEST_DEV_CACHE_DEFAULT);
375 self->base_iova = MOCK_APERTURE_START;
376 }
377
378 if (variant->pasid_capable)
379 test_cmd_mock_domain_flags(self->ioas_id,
380 MOCK_FLAGS_DEVICE_PASID,
381 NULL, NULL,
382 &self->device_pasid_id);
383 }
384
FIXTURE_TEARDOWN(iommufd_ioas)385 FIXTURE_TEARDOWN(iommufd_ioas)
386 {
387 test_ioctl_set_default_memory_limit();
388 teardown_iommufd(self->fd, _metadata);
389 }
390
FIXTURE_VARIANT_ADD(iommufd_ioas,no_domain)391 FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain)
392 {
393 };
394
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain)395 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain)
396 {
397 .mock_domains = 1,
398 .pasid_capable = true,
399 };
400
FIXTURE_VARIANT_ADD(iommufd_ioas,two_mock_domain)401 FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain)
402 {
403 .mock_domains = 2,
404 };
405
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain_limit)406 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain_limit)
407 {
408 .mock_domains = 1,
409 .memory_limit = 16,
410 };
411
TEST_F(iommufd_ioas,ioas_auto_destroy)412 TEST_F(iommufd_ioas, ioas_auto_destroy)
413 {
414 }
415
TEST_F(iommufd_ioas,ioas_destroy)416 TEST_F(iommufd_ioas, ioas_destroy)
417 {
418 if (self->stdev_id) {
419 /* IOAS cannot be freed while a device has a HWPT using it */
420 EXPECT_ERRNO(EBUSY,
421 _test_ioctl_destroy(self->fd, self->ioas_id));
422 } else {
423 /* Can allocate and manually free an IOAS table */
424 test_ioctl_destroy(self->ioas_id);
425 }
426 }
427
TEST_F(iommufd_ioas,alloc_hwpt_nested)428 TEST_F(iommufd_ioas, alloc_hwpt_nested)
429 {
430 const uint32_t min_data_len =
431 offsetofend(struct iommu_hwpt_selftest, iotlb);
432 struct iommu_hwpt_selftest data = {
433 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
434 };
435 struct iommu_hwpt_invalidate_selftest inv_reqs[2] = {};
436 uint32_t nested_hwpt_id[2] = {};
437 uint32_t num_inv;
438 uint32_t parent_hwpt_id = 0;
439 uint32_t parent_hwpt_id_not_work = 0;
440 uint32_t test_hwpt_id = 0;
441 uint32_t iopf_hwpt_id;
442 uint32_t fault_id;
443 uint32_t fault_fd;
444
445 if (self->device_id) {
446 /* Negative tests */
447 test_err_hwpt_alloc(ENOENT, self->ioas_id, self->device_id, 0,
448 &test_hwpt_id);
449 test_err_hwpt_alloc(EINVAL, self->device_id, self->device_id, 0,
450 &test_hwpt_id);
451 test_err_hwpt_alloc(EOPNOTSUPP, self->device_id, self->ioas_id,
452 IOMMU_HWPT_ALLOC_NEST_PARENT |
453 IOMMU_HWPT_FAULT_ID_VALID,
454 &test_hwpt_id);
455
456 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
457 IOMMU_HWPT_ALLOC_NEST_PARENT,
458 &parent_hwpt_id);
459
460 test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0,
461 &parent_hwpt_id_not_work);
462
463 /* Negative nested tests */
464 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
465 parent_hwpt_id, 0,
466 &nested_hwpt_id[0],
467 IOMMU_HWPT_DATA_NONE, &data,
468 sizeof(data));
469 test_err_hwpt_alloc_nested(EOPNOTSUPP, self->device_id,
470 parent_hwpt_id, 0,
471 &nested_hwpt_id[0],
472 IOMMU_HWPT_DATA_SELFTEST + 1, &data,
473 sizeof(data));
474 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
475 parent_hwpt_id, 0,
476 &nested_hwpt_id[0],
477 IOMMU_HWPT_DATA_SELFTEST, &data,
478 min_data_len - 1);
479 test_err_hwpt_alloc_nested(EFAULT, self->device_id,
480 parent_hwpt_id, 0,
481 &nested_hwpt_id[0],
482 IOMMU_HWPT_DATA_SELFTEST, NULL,
483 sizeof(data));
484 test_err_hwpt_alloc_nested(
485 EOPNOTSUPP, self->device_id, parent_hwpt_id,
486 IOMMU_HWPT_ALLOC_NEST_PARENT, &nested_hwpt_id[0],
487 IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
488 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
489 parent_hwpt_id_not_work, 0,
490 &nested_hwpt_id[0],
491 IOMMU_HWPT_DATA_SELFTEST, &data,
492 sizeof(data));
493
494 /* Allocate two nested hwpts sharing one common parent hwpt */
495 test_ioctl_fault_alloc(&fault_id, &fault_fd);
496 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
497 &nested_hwpt_id[0],
498 IOMMU_HWPT_DATA_SELFTEST, &data,
499 sizeof(data));
500 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
501 &nested_hwpt_id[1],
502 IOMMU_HWPT_DATA_SELFTEST, &data,
503 sizeof(data));
504 test_err_hwpt_alloc_iopf(ENOENT, self->device_id, parent_hwpt_id,
505 UINT32_MAX, IOMMU_HWPT_FAULT_ID_VALID,
506 &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST,
507 &data, sizeof(data));
508 test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id,
509 IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
510 IOMMU_HWPT_DATA_SELFTEST, &data,
511 sizeof(data));
512 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0],
513 IOMMU_TEST_IOTLB_DEFAULT);
514 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1],
515 IOMMU_TEST_IOTLB_DEFAULT);
516
517 /* Negative test: a nested hwpt on top of a nested hwpt */
518 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
519 nested_hwpt_id[0], 0, &test_hwpt_id,
520 IOMMU_HWPT_DATA_SELFTEST, &data,
521 sizeof(data));
522 /* Negative test: parent hwpt now cannot be freed */
523 EXPECT_ERRNO(EBUSY,
524 _test_ioctl_destroy(self->fd, parent_hwpt_id));
525
526 /* hwpt_invalidate does not support a parent hwpt */
527 num_inv = 1;
528 test_err_hwpt_invalidate(EINVAL, parent_hwpt_id, inv_reqs,
529 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
530 sizeof(*inv_reqs), &num_inv);
531 assert(!num_inv);
532
533 /* Check data_type by passing zero-length array */
534 num_inv = 0;
535 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
536 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
537 sizeof(*inv_reqs), &num_inv);
538 assert(!num_inv);
539
540 /* Negative test: Invalid data_type */
541 num_inv = 1;
542 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
543 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST_INVALID,
544 sizeof(*inv_reqs), &num_inv);
545 assert(!num_inv);
546
547 /* Negative test: structure size sanity */
548 num_inv = 1;
549 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
550 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
551 sizeof(*inv_reqs) + 1, &num_inv);
552 assert(!num_inv);
553
554 num_inv = 1;
555 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
556 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
557 1, &num_inv);
558 assert(!num_inv);
559
560 /* Negative test: invalid flag is passed */
561 num_inv = 1;
562 inv_reqs[0].flags = 0xffffffff;
563 test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
564 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
565 sizeof(*inv_reqs), &num_inv);
566 assert(!num_inv);
567
568 /* Negative test: invalid data_uptr when array is not empty */
569 num_inv = 1;
570 inv_reqs[0].flags = 0;
571 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], NULL,
572 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
573 sizeof(*inv_reqs), &num_inv);
574 assert(!num_inv);
575
576 /* Negative test: invalid entry_len when array is not empty */
577 num_inv = 1;
578 inv_reqs[0].flags = 0;
579 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
580 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
581 0, &num_inv);
582 assert(!num_inv);
583
584 /* Negative test: invalid iotlb_id */
585 num_inv = 1;
586 inv_reqs[0].flags = 0;
587 inv_reqs[0].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
588 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
589 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
590 sizeof(*inv_reqs), &num_inv);
591 assert(!num_inv);
592
593 /*
594 * Invalidate the 1st iotlb entry but fail the 2nd request
595 * due to invalid flags configuration in the 2nd request.
596 */
597 num_inv = 2;
598 inv_reqs[0].flags = 0;
599 inv_reqs[0].iotlb_id = 0;
600 inv_reqs[1].flags = 0xffffffff;
601 inv_reqs[1].iotlb_id = 1;
602 test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
603 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
604 sizeof(*inv_reqs), &num_inv);
605 assert(num_inv == 1);
606 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
607 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
608 IOMMU_TEST_IOTLB_DEFAULT);
609 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
610 IOMMU_TEST_IOTLB_DEFAULT);
611 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
612 IOMMU_TEST_IOTLB_DEFAULT);
613
614 /*
615 * Invalidate the 1st iotlb entry but fail the 2nd request
616 * due to invalid iotlb_id configuration in the 2nd request.
617 */
618 num_inv = 2;
619 inv_reqs[0].flags = 0;
620 inv_reqs[0].iotlb_id = 0;
621 inv_reqs[1].flags = 0;
622 inv_reqs[1].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
623 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
624 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
625 sizeof(*inv_reqs), &num_inv);
626 assert(num_inv == 1);
627 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
628 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
629 IOMMU_TEST_IOTLB_DEFAULT);
630 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
631 IOMMU_TEST_IOTLB_DEFAULT);
632 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
633 IOMMU_TEST_IOTLB_DEFAULT);
634
635 /* Invalidate the 2nd iotlb entry and verify */
636 num_inv = 1;
637 inv_reqs[0].flags = 0;
638 inv_reqs[0].iotlb_id = 1;
639 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
640 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
641 sizeof(*inv_reqs), &num_inv);
642 assert(num_inv == 1);
643 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
644 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1, 0);
645 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
646 IOMMU_TEST_IOTLB_DEFAULT);
647 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
648 IOMMU_TEST_IOTLB_DEFAULT);
649
650 /* Invalidate the 3rd and 4th iotlb entries and verify */
651 num_inv = 2;
652 inv_reqs[0].flags = 0;
653 inv_reqs[0].iotlb_id = 2;
654 inv_reqs[1].flags = 0;
655 inv_reqs[1].iotlb_id = 3;
656 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
657 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
658 sizeof(*inv_reqs), &num_inv);
659 assert(num_inv == 2);
660 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0], 0);
661
662 /* Invalidate all iotlb entries for nested_hwpt_id[1] and verify */
663 num_inv = 1;
664 inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
665 test_cmd_hwpt_invalidate(nested_hwpt_id[1], inv_reqs,
666 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
667 sizeof(*inv_reqs), &num_inv);
668 assert(num_inv == 1);
669 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1], 0);
670
671 /* Attach device to nested_hwpt_id[0] that then will be busy */
672 test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[0]);
673 EXPECT_ERRNO(EBUSY,
674 _test_ioctl_destroy(self->fd, nested_hwpt_id[0]));
675
676 /* Switch from nested_hwpt_id[0] to nested_hwpt_id[1] */
677 test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[1]);
678 EXPECT_ERRNO(EBUSY,
679 _test_ioctl_destroy(self->fd, nested_hwpt_id[1]));
680 test_ioctl_destroy(nested_hwpt_id[0]);
681
682 /* Switch from nested_hwpt_id[1] to iopf_hwpt_id */
683 test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
684 EXPECT_ERRNO(EBUSY,
685 _test_ioctl_destroy(self->fd, iopf_hwpt_id));
686 /* Trigger an IOPF on the device */
687 test_cmd_trigger_iopf(self->device_id, fault_fd);
688
689 /* Detach from nested_hwpt_id[1] and destroy it */
690 test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
691 test_ioctl_destroy(nested_hwpt_id[1]);
692 test_ioctl_destroy(iopf_hwpt_id);
693
694 /* Detach from the parent hw_pagetable and destroy it */
695 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
696 test_ioctl_destroy(parent_hwpt_id);
697 test_ioctl_destroy(parent_hwpt_id_not_work);
698 close(fault_fd);
699 test_ioctl_destroy(fault_id);
700 } else {
701 test_err_hwpt_alloc(ENOENT, self->device_id, self->ioas_id, 0,
702 &parent_hwpt_id);
703 test_err_hwpt_alloc_nested(ENOENT, self->device_id,
704 parent_hwpt_id, 0,
705 &nested_hwpt_id[0],
706 IOMMU_HWPT_DATA_SELFTEST, &data,
707 sizeof(data));
708 test_err_hwpt_alloc_nested(ENOENT, self->device_id,
709 parent_hwpt_id, 0,
710 &nested_hwpt_id[1],
711 IOMMU_HWPT_DATA_SELFTEST, &data,
712 sizeof(data));
713 test_err_mock_domain_replace(ENOENT, self->stdev_id,
714 nested_hwpt_id[0]);
715 test_err_mock_domain_replace(ENOENT, self->stdev_id,
716 nested_hwpt_id[1]);
717 }
718 }
719
TEST_F(iommufd_ioas,hwpt_attach)720 TEST_F(iommufd_ioas, hwpt_attach)
721 {
722 /* Create a device attached directly to a hwpt */
723 if (self->stdev_id) {
724 test_cmd_mock_domain(self->hwpt_id, NULL, NULL, NULL);
725 } else {
726 test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
727 }
728 }
729
TEST_F(iommufd_ioas,ioas_area_destroy)730 TEST_F(iommufd_ioas, ioas_area_destroy)
731 {
732 /* Adding an area does not change ability to destroy */
733 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
734 if (self->stdev_id)
735 EXPECT_ERRNO(EBUSY,
736 _test_ioctl_destroy(self->fd, self->ioas_id));
737 else
738 test_ioctl_destroy(self->ioas_id);
739 }
740
TEST_F(iommufd_ioas,ioas_area_auto_destroy)741 TEST_F(iommufd_ioas, ioas_area_auto_destroy)
742 {
743 int i;
744
745 /* Can allocate and automatically free an IOAS table with many areas */
746 for (i = 0; i != 10; i++) {
747 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
748 self->base_iova + i * PAGE_SIZE);
749 }
750 }
751
TEST_F(iommufd_ioas,get_hw_info)752 TEST_F(iommufd_ioas, get_hw_info)
753 {
754 struct iommu_test_hw_info buffer_exact;
755 struct iommu_test_hw_info_buffer_larger {
756 struct iommu_test_hw_info info;
757 uint64_t trailing_bytes;
758 } buffer_larger;
759 struct iommu_test_hw_info_buffer_smaller {
760 __u32 flags;
761 } buffer_smaller;
762
763 if (self->device_id) {
764 uint8_t max_pasid = 0;
765
766 /* Provide a zero-size user_buffer */
767 test_cmd_get_hw_info(self->device_id, NULL, 0);
768 /* Provide a user_buffer with exact size */
769 test_cmd_get_hw_info(self->device_id, &buffer_exact, sizeof(buffer_exact));
770 /*
771 * Provide a user_buffer with size larger than the exact size to check if
772 * kernel zero the trailing bytes.
773 */
774 test_cmd_get_hw_info(self->device_id, &buffer_larger, sizeof(buffer_larger));
775 /*
776 * Provide a user_buffer with size smaller than the exact size to check if
777 * the fields within the size range still gets updated.
778 */
779 test_cmd_get_hw_info(self->device_id, &buffer_smaller, sizeof(buffer_smaller));
780 test_cmd_get_hw_info_pasid(self->device_id, &max_pasid);
781 ASSERT_EQ(0, max_pasid);
782 if (variant->pasid_capable) {
783 test_cmd_get_hw_info_pasid(self->device_pasid_id,
784 &max_pasid);
785 ASSERT_EQ(MOCK_PASID_WIDTH, max_pasid);
786 }
787 } else {
788 test_err_get_hw_info(ENOENT, self->device_id,
789 &buffer_exact, sizeof(buffer_exact));
790 test_err_get_hw_info(ENOENT, self->device_id,
791 &buffer_larger, sizeof(buffer_larger));
792 }
793 }
794
TEST_F(iommufd_ioas,area)795 TEST_F(iommufd_ioas, area)
796 {
797 int i;
798
799 /* Unmap fails if nothing is mapped */
800 for (i = 0; i != 10; i++)
801 test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE);
802
803 /* Unmap works */
804 for (i = 0; i != 10; i++)
805 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
806 self->base_iova + i * PAGE_SIZE);
807 for (i = 0; i != 10; i++)
808 test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE,
809 PAGE_SIZE);
810
811 /* Split fails */
812 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2,
813 self->base_iova + 16 * PAGE_SIZE);
814 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE,
815 PAGE_SIZE);
816 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE,
817 PAGE_SIZE);
818
819 /* Over map fails */
820 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
821 self->base_iova + 16 * PAGE_SIZE);
822 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
823 self->base_iova + 16 * PAGE_SIZE);
824 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
825 self->base_iova + 17 * PAGE_SIZE);
826 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
827 self->base_iova + 15 * PAGE_SIZE);
828 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3,
829 self->base_iova + 15 * PAGE_SIZE);
830
831 /* unmap all works */
832 test_ioctl_ioas_unmap(0, UINT64_MAX);
833
834 /* Unmap all succeeds on an empty IOAS */
835 test_ioctl_ioas_unmap(0, UINT64_MAX);
836 }
837
TEST_F(iommufd_ioas,unmap_fully_contained_areas)838 TEST_F(iommufd_ioas, unmap_fully_contained_areas)
839 {
840 uint64_t unmap_len;
841 int i;
842
843 /* Give no_domain some space to rewind base_iova */
844 self->base_iova += 4 * PAGE_SIZE;
845
846 for (i = 0; i != 4; i++)
847 test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE,
848 self->base_iova + i * 16 * PAGE_SIZE);
849
850 /* Unmap not fully contained area doesn't work */
851 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE,
852 8 * PAGE_SIZE);
853 test_err_ioctl_ioas_unmap(ENOENT,
854 self->base_iova + 3 * 16 * PAGE_SIZE +
855 8 * PAGE_SIZE - 4 * PAGE_SIZE,
856 8 * PAGE_SIZE);
857
858 /* Unmap fully contained areas works */
859 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id,
860 self->base_iova - 4 * PAGE_SIZE,
861 3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE +
862 4 * PAGE_SIZE,
863 &unmap_len));
864 ASSERT_EQ(32 * PAGE_SIZE, unmap_len);
865 }
866
TEST_F(iommufd_ioas,area_auto_iova)867 TEST_F(iommufd_ioas, area_auto_iova)
868 {
869 struct iommu_test_cmd test_cmd = {
870 .size = sizeof(test_cmd),
871 .op = IOMMU_TEST_OP_ADD_RESERVED,
872 .id = self->ioas_id,
873 .add_reserved = { .start = PAGE_SIZE * 4,
874 .length = PAGE_SIZE * 100 },
875 };
876 struct iommu_iova_range ranges[1] = {};
877 struct iommu_ioas_allow_iovas allow_cmd = {
878 .size = sizeof(allow_cmd),
879 .ioas_id = self->ioas_id,
880 .num_iovas = 1,
881 .allowed_iovas = (uintptr_t)ranges,
882 };
883 __u64 iovas[10];
884 int i;
885
886 /* Simple 4k pages */
887 for (i = 0; i != 10; i++)
888 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]);
889 for (i = 0; i != 10; i++)
890 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE);
891
892 /* Kernel automatically aligns IOVAs properly */
893 for (i = 0; i != 10; i++) {
894 size_t length = PAGE_SIZE * (i + 1);
895
896 if (self->stdev_id) {
897 test_ioctl_ioas_map(buffer, length, &iovas[i]);
898 } else {
899 test_ioctl_ioas_map((void *)(1UL << 31), length,
900 &iovas[i]);
901 }
902 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
903 }
904 for (i = 0; i != 10; i++)
905 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
906
907 /* Avoids a reserved region */
908 ASSERT_EQ(0,
909 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
910 &test_cmd));
911 for (i = 0; i != 10; i++) {
912 size_t length = PAGE_SIZE * (i + 1);
913
914 test_ioctl_ioas_map(buffer, length, &iovas[i]);
915 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
916 EXPECT_EQ(false,
917 iovas[i] > test_cmd.add_reserved.start &&
918 iovas[i] <
919 test_cmd.add_reserved.start +
920 test_cmd.add_reserved.length);
921 }
922 for (i = 0; i != 10; i++)
923 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
924
925 /* Allowed region intersects with a reserved region */
926 ranges[0].start = PAGE_SIZE;
927 ranges[0].last = PAGE_SIZE * 600;
928 EXPECT_ERRNO(EADDRINUSE,
929 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
930
931 /* Allocate from an allowed region */
932 if (self->stdev_id) {
933 ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
934 ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
935 } else {
936 ranges[0].start = PAGE_SIZE * 200;
937 ranges[0].last = PAGE_SIZE * 600 - 1;
938 }
939 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
940 for (i = 0; i != 10; i++) {
941 size_t length = PAGE_SIZE * (i + 1);
942
943 test_ioctl_ioas_map(buffer, length, &iovas[i]);
944 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
945 EXPECT_EQ(true, iovas[i] >= ranges[0].start);
946 EXPECT_EQ(true, iovas[i] <= ranges[0].last);
947 EXPECT_EQ(true, iovas[i] + length > ranges[0].start);
948 EXPECT_EQ(true, iovas[i] + length <= ranges[0].last + 1);
949 }
950 for (i = 0; i != 10; i++)
951 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
952 }
953
TEST_F(iommufd_ioas,area_allowed)954 TEST_F(iommufd_ioas, area_allowed)
955 {
956 struct iommu_test_cmd test_cmd = {
957 .size = sizeof(test_cmd),
958 .op = IOMMU_TEST_OP_ADD_RESERVED,
959 .id = self->ioas_id,
960 .add_reserved = { .start = PAGE_SIZE * 4,
961 .length = PAGE_SIZE * 100 },
962 };
963 struct iommu_iova_range ranges[1] = {};
964 struct iommu_ioas_allow_iovas allow_cmd = {
965 .size = sizeof(allow_cmd),
966 .ioas_id = self->ioas_id,
967 .num_iovas = 1,
968 .allowed_iovas = (uintptr_t)ranges,
969 };
970
971 /* Reserved intersects an allowed */
972 allow_cmd.num_iovas = 1;
973 ranges[0].start = self->base_iova;
974 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
975 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
976 test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE;
977 test_cmd.add_reserved.length = PAGE_SIZE;
978 EXPECT_ERRNO(EADDRINUSE,
979 ioctl(self->fd,
980 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
981 &test_cmd));
982 allow_cmd.num_iovas = 0;
983 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
984
985 /* Allowed intersects a reserved */
986 ASSERT_EQ(0,
987 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
988 &test_cmd));
989 allow_cmd.num_iovas = 1;
990 ranges[0].start = self->base_iova;
991 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
992 EXPECT_ERRNO(EADDRINUSE,
993 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
994 }
995
TEST_F(iommufd_ioas,copy_area)996 TEST_F(iommufd_ioas, copy_area)
997 {
998 struct iommu_ioas_copy copy_cmd = {
999 .size = sizeof(copy_cmd),
1000 .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1001 .dst_ioas_id = self->ioas_id,
1002 .src_ioas_id = self->ioas_id,
1003 .length = PAGE_SIZE,
1004 };
1005
1006 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
1007
1008 /* Copy inside a single IOAS */
1009 copy_cmd.src_iova = self->base_iova;
1010 copy_cmd.dst_iova = self->base_iova + PAGE_SIZE;
1011 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1012
1013 /* Copy between IOAS's */
1014 copy_cmd.src_iova = self->base_iova;
1015 copy_cmd.dst_iova = 0;
1016 test_ioctl_ioas_alloc(©_cmd.dst_ioas_id);
1017 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1018 }
1019
TEST_F(iommufd_ioas,iova_ranges)1020 TEST_F(iommufd_ioas, iova_ranges)
1021 {
1022 struct iommu_test_cmd test_cmd = {
1023 .size = sizeof(test_cmd),
1024 .op = IOMMU_TEST_OP_ADD_RESERVED,
1025 .id = self->ioas_id,
1026 .add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE },
1027 };
1028 struct iommu_iova_range *ranges = buffer;
1029 struct iommu_ioas_iova_ranges ranges_cmd = {
1030 .size = sizeof(ranges_cmd),
1031 .ioas_id = self->ioas_id,
1032 .num_iovas = BUFFER_SIZE / sizeof(*ranges),
1033 .allowed_iovas = (uintptr_t)ranges,
1034 };
1035
1036 /* Range can be read */
1037 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1038 EXPECT_EQ(1, ranges_cmd.num_iovas);
1039 if (!self->stdev_id) {
1040 EXPECT_EQ(0, ranges[0].start);
1041 EXPECT_EQ(SIZE_MAX, ranges[0].last);
1042 EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
1043 } else {
1044 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1045 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1046 EXPECT_EQ(MOCK_PAGE_SIZE, ranges_cmd.out_iova_alignment);
1047 }
1048
1049 /* Buffer too small */
1050 memset(ranges, 0, BUFFER_SIZE);
1051 ranges_cmd.num_iovas = 0;
1052 EXPECT_ERRNO(EMSGSIZE,
1053 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1054 EXPECT_EQ(1, ranges_cmd.num_iovas);
1055 EXPECT_EQ(0, ranges[0].start);
1056 EXPECT_EQ(0, ranges[0].last);
1057
1058 /* 2 ranges */
1059 ASSERT_EQ(0,
1060 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
1061 &test_cmd));
1062 ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
1063 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1064 if (!self->stdev_id) {
1065 EXPECT_EQ(2, ranges_cmd.num_iovas);
1066 EXPECT_EQ(0, ranges[0].start);
1067 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
1068 EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start);
1069 EXPECT_EQ(SIZE_MAX, ranges[1].last);
1070 } else {
1071 EXPECT_EQ(1, ranges_cmd.num_iovas);
1072 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1073 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1074 }
1075
1076 /* Buffer too small */
1077 memset(ranges, 0, BUFFER_SIZE);
1078 ranges_cmd.num_iovas = 1;
1079 if (!self->stdev_id) {
1080 EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
1081 &ranges_cmd));
1082 EXPECT_EQ(2, ranges_cmd.num_iovas);
1083 EXPECT_EQ(0, ranges[0].start);
1084 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
1085 } else {
1086 ASSERT_EQ(0,
1087 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1088 EXPECT_EQ(1, ranges_cmd.num_iovas);
1089 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1090 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1091 }
1092 EXPECT_EQ(0, ranges[1].start);
1093 EXPECT_EQ(0, ranges[1].last);
1094 }
1095
TEST_F(iommufd_ioas,access_domain_destory)1096 TEST_F(iommufd_ioas, access_domain_destory)
1097 {
1098 struct iommu_test_cmd access_cmd = {
1099 .size = sizeof(access_cmd),
1100 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1101 .access_pages = { .iova = self->base_iova + PAGE_SIZE,
1102 .length = PAGE_SIZE},
1103 };
1104 size_t buf_size = 2 * HUGEPAGE_SIZE;
1105 uint8_t *buf;
1106
1107 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
1108 MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
1109 0);
1110 ASSERT_NE(MAP_FAILED, buf);
1111 test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova);
1112
1113 test_cmd_create_access(self->ioas_id, &access_cmd.id,
1114 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1115 access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
1116 ASSERT_EQ(0,
1117 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1118 &access_cmd));
1119
1120 /* Causes a complicated unpin across a huge page boundary */
1121 if (self->stdev_id)
1122 test_ioctl_destroy(self->stdev_id);
1123
1124 test_cmd_destroy_access_pages(
1125 access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1126 test_cmd_destroy_access(access_cmd.id);
1127 ASSERT_EQ(0, munmap(buf, buf_size));
1128 }
1129
TEST_F(iommufd_ioas,access_pin)1130 TEST_F(iommufd_ioas, access_pin)
1131 {
1132 struct iommu_test_cmd access_cmd = {
1133 .size = sizeof(access_cmd),
1134 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1135 .access_pages = { .iova = MOCK_APERTURE_START,
1136 .length = BUFFER_SIZE,
1137 .uptr = (uintptr_t)buffer },
1138 };
1139 struct iommu_test_cmd check_map_cmd = {
1140 .size = sizeof(check_map_cmd),
1141 .op = IOMMU_TEST_OP_MD_CHECK_MAP,
1142 .check_map = { .iova = MOCK_APERTURE_START,
1143 .length = BUFFER_SIZE,
1144 .uptr = (uintptr_t)buffer },
1145 };
1146 uint32_t access_pages_id;
1147 unsigned int npages;
1148
1149 test_cmd_create_access(self->ioas_id, &access_cmd.id,
1150 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1151
1152 for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
1153 uint32_t mock_stdev_id;
1154 uint32_t mock_hwpt_id;
1155
1156 access_cmd.access_pages.length = npages * PAGE_SIZE;
1157
1158 /* Single map/unmap */
1159 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1160 MOCK_APERTURE_START);
1161 ASSERT_EQ(0, ioctl(self->fd,
1162 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1163 &access_cmd));
1164 test_cmd_destroy_access_pages(
1165 access_cmd.id,
1166 access_cmd.access_pages.out_access_pages_id);
1167
1168 /* Double user */
1169 ASSERT_EQ(0, ioctl(self->fd,
1170 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1171 &access_cmd));
1172 access_pages_id = access_cmd.access_pages.out_access_pages_id;
1173 ASSERT_EQ(0, ioctl(self->fd,
1174 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1175 &access_cmd));
1176 test_cmd_destroy_access_pages(
1177 access_cmd.id,
1178 access_cmd.access_pages.out_access_pages_id);
1179 test_cmd_destroy_access_pages(access_cmd.id, access_pages_id);
1180
1181 /* Add/remove a domain with a user */
1182 ASSERT_EQ(0, ioctl(self->fd,
1183 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1184 &access_cmd));
1185 test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1186 &mock_hwpt_id, NULL);
1187 check_map_cmd.id = mock_hwpt_id;
1188 ASSERT_EQ(0, ioctl(self->fd,
1189 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
1190 &check_map_cmd));
1191
1192 test_ioctl_destroy(mock_stdev_id);
1193 test_cmd_destroy_access_pages(
1194 access_cmd.id,
1195 access_cmd.access_pages.out_access_pages_id);
1196
1197 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1198 }
1199 test_cmd_destroy_access(access_cmd.id);
1200 }
1201
TEST_F(iommufd_ioas,access_pin_unmap)1202 TEST_F(iommufd_ioas, access_pin_unmap)
1203 {
1204 struct iommu_test_cmd access_pages_cmd = {
1205 .size = sizeof(access_pages_cmd),
1206 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1207 .access_pages = { .iova = MOCK_APERTURE_START,
1208 .length = BUFFER_SIZE,
1209 .uptr = (uintptr_t)buffer },
1210 };
1211
1212 test_cmd_create_access(self->ioas_id, &access_pages_cmd.id,
1213 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1214 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, MOCK_APERTURE_START);
1215 ASSERT_EQ(0,
1216 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1217 &access_pages_cmd));
1218
1219 /* Trigger the unmap op */
1220 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1221
1222 /* kernel removed the item for us */
1223 test_err_destroy_access_pages(
1224 ENOENT, access_pages_cmd.id,
1225 access_pages_cmd.access_pages.out_access_pages_id);
1226 }
1227
check_access_rw(struct __test_metadata * _metadata,int fd,unsigned int access_id,uint64_t iova,unsigned int def_flags)1228 static void check_access_rw(struct __test_metadata *_metadata, int fd,
1229 unsigned int access_id, uint64_t iova,
1230 unsigned int def_flags)
1231 {
1232 uint16_t tmp[32];
1233 struct iommu_test_cmd access_cmd = {
1234 .size = sizeof(access_cmd),
1235 .op = IOMMU_TEST_OP_ACCESS_RW,
1236 .id = access_id,
1237 .access_rw = { .uptr = (uintptr_t)tmp },
1238 };
1239 uint16_t *buffer16 = buffer;
1240 unsigned int i;
1241 void *tmp2;
1242
1243 for (i = 0; i != BUFFER_SIZE / sizeof(*buffer16); i++)
1244 buffer16[i] = rand();
1245
1246 for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50;
1247 access_cmd.access_rw.iova < iova + PAGE_SIZE + 50;
1248 access_cmd.access_rw.iova++) {
1249 for (access_cmd.access_rw.length = 1;
1250 access_cmd.access_rw.length < sizeof(tmp);
1251 access_cmd.access_rw.length++) {
1252 access_cmd.access_rw.flags = def_flags;
1253 ASSERT_EQ(0, ioctl(fd,
1254 _IOMMU_TEST_CMD(
1255 IOMMU_TEST_OP_ACCESS_RW),
1256 &access_cmd));
1257 ASSERT_EQ(0,
1258 memcmp(buffer + (access_cmd.access_rw.iova -
1259 iova),
1260 tmp, access_cmd.access_rw.length));
1261
1262 for (i = 0; i != ARRAY_SIZE(tmp); i++)
1263 tmp[i] = rand();
1264 access_cmd.access_rw.flags = def_flags |
1265 MOCK_ACCESS_RW_WRITE;
1266 ASSERT_EQ(0, ioctl(fd,
1267 _IOMMU_TEST_CMD(
1268 IOMMU_TEST_OP_ACCESS_RW),
1269 &access_cmd));
1270 ASSERT_EQ(0,
1271 memcmp(buffer + (access_cmd.access_rw.iova -
1272 iova),
1273 tmp, access_cmd.access_rw.length));
1274 }
1275 }
1276
1277 /* Multi-page test */
1278 tmp2 = malloc(BUFFER_SIZE);
1279 ASSERT_NE(NULL, tmp2);
1280 access_cmd.access_rw.iova = iova;
1281 access_cmd.access_rw.length = BUFFER_SIZE;
1282 access_cmd.access_rw.flags = def_flags;
1283 access_cmd.access_rw.uptr = (uintptr_t)tmp2;
1284 ASSERT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
1285 &access_cmd));
1286 ASSERT_EQ(0, memcmp(buffer, tmp2, access_cmd.access_rw.length));
1287 free(tmp2);
1288 }
1289
TEST_F(iommufd_ioas,access_rw)1290 TEST_F(iommufd_ioas, access_rw)
1291 {
1292 __u32 access_id;
1293 __u64 iova;
1294
1295 test_cmd_create_access(self->ioas_id, &access_id, 0);
1296 test_ioctl_ioas_map(buffer, BUFFER_SIZE, &iova);
1297 check_access_rw(_metadata, self->fd, access_id, iova, 0);
1298 check_access_rw(_metadata, self->fd, access_id, iova,
1299 MOCK_ACCESS_RW_SLOW_PATH);
1300 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1301 test_cmd_destroy_access(access_id);
1302 }
1303
TEST_F(iommufd_ioas,access_rw_unaligned)1304 TEST_F(iommufd_ioas, access_rw_unaligned)
1305 {
1306 __u32 access_id;
1307 __u64 iova;
1308
1309 test_cmd_create_access(self->ioas_id, &access_id, 0);
1310
1311 /* Unaligned pages */
1312 iova = self->base_iova + MOCK_PAGE_SIZE;
1313 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, iova);
1314 check_access_rw(_metadata, self->fd, access_id, iova, 0);
1315 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1316 test_cmd_destroy_access(access_id);
1317 }
1318
TEST_F(iommufd_ioas,fork_gone)1319 TEST_F(iommufd_ioas, fork_gone)
1320 {
1321 __u32 access_id;
1322 pid_t child;
1323
1324 test_cmd_create_access(self->ioas_id, &access_id, 0);
1325
1326 /* Create a mapping with a different mm */
1327 child = fork();
1328 if (!child) {
1329 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1330 MOCK_APERTURE_START);
1331 exit(0);
1332 }
1333 ASSERT_NE(-1, child);
1334 ASSERT_EQ(child, waitpid(child, NULL, 0));
1335
1336 if (self->stdev_id) {
1337 /*
1338 * If a domain already existed then everything was pinned within
1339 * the fork, so this copies from one domain to another.
1340 */
1341 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1342 check_access_rw(_metadata, self->fd, access_id,
1343 MOCK_APERTURE_START, 0);
1344
1345 } else {
1346 /*
1347 * Otherwise we need to actually pin pages which can't happen
1348 * since the fork is gone.
1349 */
1350 test_err_mock_domain(EFAULT, self->ioas_id, NULL, NULL);
1351 }
1352
1353 test_cmd_destroy_access(access_id);
1354 }
1355
TEST_F(iommufd_ioas,fork_present)1356 TEST_F(iommufd_ioas, fork_present)
1357 {
1358 __u32 access_id;
1359 int pipefds[2];
1360 uint64_t tmp;
1361 pid_t child;
1362 int efd;
1363
1364 test_cmd_create_access(self->ioas_id, &access_id, 0);
1365
1366 ASSERT_EQ(0, pipe2(pipefds, O_CLOEXEC));
1367 efd = eventfd(0, EFD_CLOEXEC);
1368 ASSERT_NE(-1, efd);
1369
1370 /* Create a mapping with a different mm */
1371 child = fork();
1372 if (!child) {
1373 __u64 iova;
1374 uint64_t one = 1;
1375
1376 close(pipefds[1]);
1377 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1378 MOCK_APERTURE_START);
1379 if (write(efd, &one, sizeof(one)) != sizeof(one))
1380 exit(100);
1381 if (read(pipefds[0], &iova, 1) != 1)
1382 exit(100);
1383 exit(0);
1384 }
1385 close(pipefds[0]);
1386 ASSERT_NE(-1, child);
1387 ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp)));
1388
1389 /* Read pages from the remote process */
1390 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1391 check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0);
1392
1393 ASSERT_EQ(0, close(pipefds[1]));
1394 ASSERT_EQ(child, waitpid(child, NULL, 0));
1395
1396 test_cmd_destroy_access(access_id);
1397 }
1398
TEST_F(iommufd_ioas,ioas_option_huge_pages)1399 TEST_F(iommufd_ioas, ioas_option_huge_pages)
1400 {
1401 struct iommu_option cmd = {
1402 .size = sizeof(cmd),
1403 .option_id = IOMMU_OPTION_HUGE_PAGES,
1404 .op = IOMMU_OPTION_OP_GET,
1405 .val64 = 3,
1406 .object_id = self->ioas_id,
1407 };
1408
1409 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1410 ASSERT_EQ(1, cmd.val64);
1411
1412 cmd.op = IOMMU_OPTION_OP_SET;
1413 cmd.val64 = 0;
1414 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1415
1416 cmd.op = IOMMU_OPTION_OP_GET;
1417 cmd.val64 = 3;
1418 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1419 ASSERT_EQ(0, cmd.val64);
1420
1421 cmd.op = IOMMU_OPTION_OP_SET;
1422 cmd.val64 = 2;
1423 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
1424
1425 cmd.op = IOMMU_OPTION_OP_SET;
1426 cmd.val64 = 1;
1427 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1428 }
1429
TEST_F(iommufd_ioas,ioas_iova_alloc)1430 TEST_F(iommufd_ioas, ioas_iova_alloc)
1431 {
1432 unsigned int length;
1433 __u64 iova;
1434
1435 for (length = 1; length != PAGE_SIZE * 2; length++) {
1436 if (variant->mock_domains && (length % MOCK_PAGE_SIZE)) {
1437 test_err_ioctl_ioas_map(EINVAL, buffer, length, &iova);
1438 } else {
1439 test_ioctl_ioas_map(buffer, length, &iova);
1440 test_ioctl_ioas_unmap(iova, length);
1441 }
1442 }
1443 }
1444
TEST_F(iommufd_ioas,ioas_align_change)1445 TEST_F(iommufd_ioas, ioas_align_change)
1446 {
1447 struct iommu_option cmd = {
1448 .size = sizeof(cmd),
1449 .option_id = IOMMU_OPTION_HUGE_PAGES,
1450 .op = IOMMU_OPTION_OP_SET,
1451 .object_id = self->ioas_id,
1452 /* 0 means everything must be aligned to PAGE_SIZE */
1453 .val64 = 0,
1454 };
1455
1456 /*
1457 * We cannot upgrade the alignment using OPTION_HUGE_PAGES when a domain
1458 * and map are present.
1459 */
1460 if (variant->mock_domains)
1461 return;
1462
1463 /*
1464 * We can upgrade to PAGE_SIZE alignment when things are aligned right
1465 */
1466 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START);
1467 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1468
1469 /* Misalignment is rejected at map time */
1470 test_err_ioctl_ioas_map_fixed(EINVAL, buffer + MOCK_PAGE_SIZE,
1471 PAGE_SIZE,
1472 MOCK_APERTURE_START + PAGE_SIZE);
1473 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1474
1475 /* Reduce alignment */
1476 cmd.val64 = 1;
1477 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1478
1479 /* Confirm misalignment is rejected during alignment upgrade */
1480 test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE,
1481 MOCK_APERTURE_START + PAGE_SIZE);
1482 cmd.val64 = 0;
1483 EXPECT_ERRNO(EADDRINUSE, ioctl(self->fd, IOMMU_OPTION, &cmd));
1484
1485 test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE);
1486 test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE);
1487 }
1488
TEST_F(iommufd_ioas,copy_sweep)1489 TEST_F(iommufd_ioas, copy_sweep)
1490 {
1491 struct iommu_ioas_copy copy_cmd = {
1492 .size = sizeof(copy_cmd),
1493 .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1494 .src_ioas_id = self->ioas_id,
1495 .dst_iova = MOCK_APERTURE_START,
1496 .length = MOCK_PAGE_SIZE,
1497 };
1498 unsigned int dst_ioas_id;
1499 uint64_t last_iova;
1500 uint64_t iova;
1501
1502 test_ioctl_ioas_alloc(&dst_ioas_id);
1503 copy_cmd.dst_ioas_id = dst_ioas_id;
1504
1505 if (variant->mock_domains)
1506 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 1;
1507 else
1508 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 2;
1509
1510 test_ioctl_ioas_map_fixed(buffer, last_iova - MOCK_APERTURE_START + 1,
1511 MOCK_APERTURE_START);
1512
1513 for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova;
1514 iova += 511) {
1515 copy_cmd.src_iova = iova;
1516 if (iova < MOCK_APERTURE_START ||
1517 iova + copy_cmd.length - 1 > last_iova) {
1518 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_IOAS_COPY,
1519 ©_cmd));
1520 } else {
1521 ASSERT_EQ(0,
1522 ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1523 test_ioctl_ioas_unmap_id(dst_ioas_id, copy_cmd.dst_iova,
1524 copy_cmd.length);
1525 }
1526 }
1527
1528 test_ioctl_destroy(dst_ioas_id);
1529 }
1530
FIXTURE(iommufd_mock_domain)1531 FIXTURE(iommufd_mock_domain)
1532 {
1533 int fd;
1534 uint32_t ioas_id;
1535 uint32_t hwpt_id;
1536 uint32_t hwpt_ids[2];
1537 uint32_t stdev_ids[2];
1538 uint32_t idev_ids[2];
1539 int mmap_flags;
1540 size_t mmap_buf_size;
1541 };
1542
FIXTURE_VARIANT(iommufd_mock_domain)1543 FIXTURE_VARIANT(iommufd_mock_domain)
1544 {
1545 unsigned int mock_domains;
1546 bool hugepages;
1547 bool file;
1548 };
1549
FIXTURE_SETUP(iommufd_mock_domain)1550 FIXTURE_SETUP(iommufd_mock_domain)
1551 {
1552 unsigned int i;
1553
1554 self->fd = open("/dev/iommu", O_RDWR);
1555 ASSERT_NE(-1, self->fd);
1556 test_ioctl_ioas_alloc(&self->ioas_id);
1557
1558 ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);
1559
1560 for (i = 0; i != variant->mock_domains; i++) {
1561 test_cmd_mock_domain(self->ioas_id, &self->stdev_ids[i],
1562 &self->hwpt_ids[i], &self->idev_ids[i]);
1563 test_cmd_dev_check_cache_all(self->idev_ids[0],
1564 IOMMU_TEST_DEV_CACHE_DEFAULT);
1565 }
1566 self->hwpt_id = self->hwpt_ids[0];
1567
1568 self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
1569 self->mmap_buf_size = PAGE_SIZE * 8;
1570 if (variant->hugepages) {
1571 /*
1572 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
1573 * not available.
1574 */
1575 self->mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
1576 self->mmap_buf_size = HUGEPAGE_SIZE * 2;
1577 }
1578 }
1579
FIXTURE_TEARDOWN(iommufd_mock_domain)1580 FIXTURE_TEARDOWN(iommufd_mock_domain)
1581 {
1582 teardown_iommufd(self->fd, _metadata);
1583 }
1584
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain)1585 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain)
1586 {
1587 .mock_domains = 1,
1588 .hugepages = false,
1589 .file = false,
1590 };
1591
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains)1592 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains)
1593 {
1594 .mock_domains = 2,
1595 .hugepages = false,
1596 .file = false,
1597 };
1598
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_hugepage)1599 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage)
1600 {
1601 .mock_domains = 1,
1602 .hugepages = true,
1603 .file = false,
1604 };
1605
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains_hugepage)1606 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
1607 {
1608 .mock_domains = 2,
1609 .hugepages = true,
1610 .file = false,
1611 };
1612
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_file)1613 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_file)
1614 {
1615 .mock_domains = 1,
1616 .hugepages = false,
1617 .file = true,
1618 };
1619
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_file_hugepage)1620 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_file_hugepage)
1621 {
1622 .mock_domains = 1,
1623 .hugepages = true,
1624 .file = true,
1625 };
1626
1627
1628 /* Have the kernel check that the user pages made it to the iommu_domain */
1629 #define check_mock_iova(_ptr, _iova, _length) \
1630 ({ \
1631 struct iommu_test_cmd check_map_cmd = { \
1632 .size = sizeof(check_map_cmd), \
1633 .op = IOMMU_TEST_OP_MD_CHECK_MAP, \
1634 .id = self->hwpt_id, \
1635 .check_map = { .iova = _iova, \
1636 .length = _length, \
1637 .uptr = (uintptr_t)(_ptr) }, \
1638 }; \
1639 ASSERT_EQ(0, \
1640 ioctl(self->fd, \
1641 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
1642 &check_map_cmd)); \
1643 if (self->hwpt_ids[1]) { \
1644 check_map_cmd.id = self->hwpt_ids[1]; \
1645 ASSERT_EQ(0, \
1646 ioctl(self->fd, \
1647 _IOMMU_TEST_CMD( \
1648 IOMMU_TEST_OP_MD_CHECK_MAP), \
1649 &check_map_cmd)); \
1650 } \
1651 })
1652
1653 static void
test_basic_mmap(struct __test_metadata * _metadata,struct _test_data_iommufd_mock_domain * self,const struct _fixture_variant_iommufd_mock_domain * variant)1654 test_basic_mmap(struct __test_metadata *_metadata,
1655 struct _test_data_iommufd_mock_domain *self,
1656 const struct _fixture_variant_iommufd_mock_domain *variant)
1657 {
1658 size_t buf_size = self->mmap_buf_size;
1659 uint8_t *buf;
1660 __u64 iova;
1661
1662 /* Simple one page map */
1663 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
1664 check_mock_iova(buffer, iova, PAGE_SIZE);
1665
1666 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1667 0);
1668 ASSERT_NE(MAP_FAILED, buf);
1669
1670 /* EFAULT half way through mapping */
1671 ASSERT_EQ(0, munmap(buf + buf_size / 2, buf_size / 2));
1672 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1673
1674 /* EFAULT on first page */
1675 ASSERT_EQ(0, munmap(buf, buf_size / 2));
1676 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1677 }
1678
1679 static void
test_basic_file(struct __test_metadata * _metadata,struct _test_data_iommufd_mock_domain * self,const struct _fixture_variant_iommufd_mock_domain * variant)1680 test_basic_file(struct __test_metadata *_metadata,
1681 struct _test_data_iommufd_mock_domain *self,
1682 const struct _fixture_variant_iommufd_mock_domain *variant)
1683 {
1684 size_t buf_size = self->mmap_buf_size;
1685 uint8_t *buf;
1686 __u64 iova;
1687 int mfd_tmp;
1688 int prot = PROT_READ | PROT_WRITE;
1689
1690 /* Simple one page map */
1691 test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova);
1692 check_mock_iova(mfd_buffer, iova, PAGE_SIZE);
1693
1694 buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd_tmp);
1695 ASSERT_NE(MAP_FAILED, buf);
1696
1697 test_err_ioctl_ioas_map_file(EINVAL, mfd_tmp, 0, buf_size + 1, &iova);
1698
1699 ASSERT_EQ(0, ftruncate(mfd_tmp, 0));
1700 test_err_ioctl_ioas_map_file(EINVAL, mfd_tmp, 0, buf_size, &iova);
1701
1702 close(mfd_tmp);
1703 }
1704
TEST_F(iommufd_mock_domain,basic)1705 TEST_F(iommufd_mock_domain, basic)
1706 {
1707 if (variant->file)
1708 test_basic_file(_metadata, self, variant);
1709 else
1710 test_basic_mmap(_metadata, self, variant);
1711 }
1712
TEST_F(iommufd_mock_domain,ro_unshare)1713 TEST_F(iommufd_mock_domain, ro_unshare)
1714 {
1715 uint8_t *buf;
1716 __u64 iova;
1717 int fd;
1718
1719 fd = open("/proc/self/exe", O_RDONLY);
1720 ASSERT_NE(-1, fd);
1721
1722 buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1723 ASSERT_NE(MAP_FAILED, buf);
1724 close(fd);
1725
1726 /*
1727 * There have been lots of changes to the "unshare" mechanism in
1728 * get_user_pages(), make sure it works right. The write to the page
1729 * after we map it for reading should not change the assigned PFN.
1730 */
1731 ASSERT_EQ(0,
1732 _test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE,
1733 &iova, IOMMU_IOAS_MAP_READABLE));
1734 check_mock_iova(buf, iova, PAGE_SIZE);
1735 memset(buf, 1, PAGE_SIZE);
1736 check_mock_iova(buf, iova, PAGE_SIZE);
1737 ASSERT_EQ(0, munmap(buf, PAGE_SIZE));
1738 }
1739
TEST_F(iommufd_mock_domain,all_aligns)1740 TEST_F(iommufd_mock_domain, all_aligns)
1741 {
1742 size_t test_step = variant->hugepages ? (self->mmap_buf_size / 16) :
1743 MOCK_PAGE_SIZE;
1744 size_t buf_size = self->mmap_buf_size;
1745 unsigned int start;
1746 unsigned int end;
1747 uint8_t *buf;
1748 int prot = PROT_READ | PROT_WRITE;
1749 int mfd;
1750
1751 if (variant->file)
1752 buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
1753 else
1754 buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
1755 ASSERT_NE(MAP_FAILED, buf);
1756 check_refs(buf, buf_size, 0);
1757
1758 /*
1759 * Map every combination of page size and alignment within a big region,
1760 * less for hugepage case as it takes so long to finish.
1761 */
1762 for (start = 0; start < buf_size; start += test_step) {
1763 if (variant->hugepages)
1764 end = buf_size;
1765 else
1766 end = start + MOCK_PAGE_SIZE;
1767 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1768 size_t length = end - start;
1769 __u64 iova;
1770
1771 if (variant->file) {
1772 test_ioctl_ioas_map_file(mfd, start, length,
1773 &iova);
1774 } else {
1775 test_ioctl_ioas_map(buf + start, length, &iova);
1776 }
1777 check_mock_iova(buf + start, iova, length);
1778 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1779 end / PAGE_SIZE * PAGE_SIZE -
1780 start / PAGE_SIZE * PAGE_SIZE,
1781 1);
1782
1783 test_ioctl_ioas_unmap(iova, length);
1784 }
1785 }
1786 check_refs(buf, buf_size, 0);
1787 ASSERT_EQ(0, munmap(buf, buf_size));
1788 if (variant->file)
1789 close(mfd);
1790 }
1791
TEST_F(iommufd_mock_domain,all_aligns_copy)1792 TEST_F(iommufd_mock_domain, all_aligns_copy)
1793 {
1794 size_t test_step = variant->hugepages ? self->mmap_buf_size / 16 :
1795 MOCK_PAGE_SIZE;
1796 size_t buf_size = self->mmap_buf_size;
1797 unsigned int start;
1798 unsigned int end;
1799 uint8_t *buf;
1800 int prot = PROT_READ | PROT_WRITE;
1801 int mfd;
1802
1803 if (variant->file)
1804 buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
1805 else
1806 buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
1807 ASSERT_NE(MAP_FAILED, buf);
1808 check_refs(buf, buf_size, 0);
1809
1810 /*
1811 * Map every combination of page size and alignment within a big region,
1812 * less for hugepage case as it takes so long to finish.
1813 */
1814 for (start = 0; start < buf_size; start += test_step) {
1815 if (variant->hugepages)
1816 end = buf_size;
1817 else
1818 end = start + MOCK_PAGE_SIZE;
1819 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1820 size_t length = end - start;
1821 unsigned int old_id;
1822 uint32_t mock_stdev_id;
1823 __u64 iova;
1824
1825 if (variant->file) {
1826 test_ioctl_ioas_map_file(mfd, start, length,
1827 &iova);
1828 } else {
1829 test_ioctl_ioas_map(buf + start, length, &iova);
1830 }
1831
1832 /* Add and destroy a domain while the area exists */
1833 old_id = self->hwpt_ids[1];
1834 test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1835 &self->hwpt_ids[1], NULL);
1836
1837 check_mock_iova(buf + start, iova, length);
1838 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1839 end / PAGE_SIZE * PAGE_SIZE -
1840 start / PAGE_SIZE * PAGE_SIZE,
1841 1);
1842
1843 test_ioctl_destroy(mock_stdev_id);
1844 self->hwpt_ids[1] = old_id;
1845
1846 test_ioctl_ioas_unmap(iova, length);
1847 }
1848 }
1849 check_refs(buf, buf_size, 0);
1850 ASSERT_EQ(0, munmap(buf, buf_size));
1851 if (variant->file)
1852 close(mfd);
1853 }
1854
TEST_F(iommufd_mock_domain,user_copy)1855 TEST_F(iommufd_mock_domain, user_copy)
1856 {
1857 void *buf = variant->file ? mfd_buffer : buffer;
1858 struct iommu_test_cmd access_cmd = {
1859 .size = sizeof(access_cmd),
1860 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1861 .access_pages = { .length = BUFFER_SIZE,
1862 .uptr = (uintptr_t)buf },
1863 };
1864 struct iommu_ioas_copy copy_cmd = {
1865 .size = sizeof(copy_cmd),
1866 .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1867 .dst_ioas_id = self->ioas_id,
1868 .dst_iova = MOCK_APERTURE_START,
1869 .length = BUFFER_SIZE,
1870 };
1871 struct iommu_ioas_unmap unmap_cmd = {
1872 .size = sizeof(unmap_cmd),
1873 .ioas_id = self->ioas_id,
1874 .iova = MOCK_APERTURE_START,
1875 .length = BUFFER_SIZE,
1876 };
1877 unsigned int new_ioas_id, ioas_id;
1878
1879 /* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
1880 test_ioctl_ioas_alloc(&ioas_id);
1881 if (variant->file) {
1882 test_ioctl_ioas_map_id_file(ioas_id, mfd, 0, BUFFER_SIZE,
1883 ©_cmd.src_iova);
1884 } else {
1885 test_ioctl_ioas_map_id(ioas_id, buf, BUFFER_SIZE,
1886 ©_cmd.src_iova);
1887 }
1888 test_cmd_create_access(ioas_id, &access_cmd.id,
1889 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1890
1891 access_cmd.access_pages.iova = copy_cmd.src_iova;
1892 ASSERT_EQ(0,
1893 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1894 &access_cmd));
1895 copy_cmd.src_ioas_id = ioas_id;
1896 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1897 check_mock_iova(buf, MOCK_APERTURE_START, BUFFER_SIZE);
1898
1899 /* Now replace the ioas with a new one */
1900 test_ioctl_ioas_alloc(&new_ioas_id);
1901 if (variant->file) {
1902 test_ioctl_ioas_map_id_file(new_ioas_id, mfd, 0, BUFFER_SIZE,
1903 ©_cmd.src_iova);
1904 } else {
1905 test_ioctl_ioas_map_id(new_ioas_id, buf, BUFFER_SIZE,
1906 ©_cmd.src_iova);
1907 }
1908 test_cmd_access_replace_ioas(access_cmd.id, new_ioas_id);
1909
1910 /* Destroy the old ioas and cleanup copied mapping */
1911 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_UNMAP, &unmap_cmd));
1912 test_ioctl_destroy(ioas_id);
1913
1914 /* Then run the same test again with the new ioas */
1915 access_cmd.access_pages.iova = copy_cmd.src_iova;
1916 ASSERT_EQ(0,
1917 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1918 &access_cmd));
1919 copy_cmd.src_ioas_id = new_ioas_id;
1920 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1921 check_mock_iova(buf, MOCK_APERTURE_START, BUFFER_SIZE);
1922
1923 test_cmd_destroy_access_pages(
1924 access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1925 test_cmd_destroy_access(access_cmd.id);
1926
1927 test_ioctl_destroy(new_ioas_id);
1928 }
1929
TEST_F(iommufd_mock_domain,replace)1930 TEST_F(iommufd_mock_domain, replace)
1931 {
1932 uint32_t ioas_id;
1933
1934 test_ioctl_ioas_alloc(&ioas_id);
1935
1936 test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1937
1938 /*
1939 * Replacing the IOAS causes the prior HWPT to be deallocated, thus we
1940 * should get enoent when we try to use it.
1941 */
1942 if (variant->mock_domains == 1)
1943 test_err_mock_domain_replace(ENOENT, self->stdev_ids[0],
1944 self->hwpt_ids[0]);
1945
1946 test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1947 if (variant->mock_domains >= 2) {
1948 test_cmd_mock_domain_replace(self->stdev_ids[0],
1949 self->hwpt_ids[1]);
1950 test_cmd_mock_domain_replace(self->stdev_ids[0],
1951 self->hwpt_ids[1]);
1952 test_cmd_mock_domain_replace(self->stdev_ids[0],
1953 self->hwpt_ids[0]);
1954 }
1955
1956 test_cmd_mock_domain_replace(self->stdev_ids[0], self->ioas_id);
1957 test_ioctl_destroy(ioas_id);
1958 }
1959
TEST_F(iommufd_mock_domain,alloc_hwpt)1960 TEST_F(iommufd_mock_domain, alloc_hwpt)
1961 {
1962 int i;
1963
1964 for (i = 0; i != variant->mock_domains; i++) {
1965 uint32_t hwpt_id[2];
1966 uint32_t stddev_id;
1967
1968 test_err_hwpt_alloc(EOPNOTSUPP,
1969 self->idev_ids[i], self->ioas_id,
1970 ~IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[0]);
1971 test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
1972 0, &hwpt_id[0]);
1973 test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
1974 IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[1]);
1975
1976 /* Do a hw_pagetable rotation test */
1977 test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[0]);
1978 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[0]));
1979 test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[1]);
1980 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[1]));
1981 test_cmd_mock_domain_replace(self->stdev_ids[i], self->ioas_id);
1982 test_ioctl_destroy(hwpt_id[1]);
1983
1984 test_cmd_mock_domain(hwpt_id[0], &stddev_id, NULL, NULL);
1985 test_ioctl_destroy(stddev_id);
1986 test_ioctl_destroy(hwpt_id[0]);
1987 }
1988 }
1989
FIXTURE(iommufd_dirty_tracking)1990 FIXTURE(iommufd_dirty_tracking)
1991 {
1992 int fd;
1993 uint32_t ioas_id;
1994 uint32_t hwpt_id;
1995 uint32_t stdev_id;
1996 uint32_t idev_id;
1997 unsigned long page_size;
1998 unsigned long bitmap_size;
1999 void *bitmap;
2000 void *buffer;
2001 };
2002
FIXTURE_VARIANT(iommufd_dirty_tracking)2003 FIXTURE_VARIANT(iommufd_dirty_tracking)
2004 {
2005 unsigned long buffer_size;
2006 bool hugepages;
2007 };
2008
FIXTURE_SETUP(iommufd_dirty_tracking)2009 FIXTURE_SETUP(iommufd_dirty_tracking)
2010 {
2011 unsigned long size;
2012 int mmap_flags;
2013 void *vrc;
2014 int rc;
2015
2016 if (variant->buffer_size < MOCK_PAGE_SIZE) {
2017 SKIP(return,
2018 "Skipping buffer_size=%lu, less than MOCK_PAGE_SIZE=%lu",
2019 variant->buffer_size, MOCK_PAGE_SIZE);
2020 }
2021
2022 self->fd = open("/dev/iommu", O_RDWR);
2023 ASSERT_NE(-1, self->fd);
2024
2025 rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, variant->buffer_size);
2026 if (rc || !self->buffer) {
2027 SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
2028 variant->buffer_size, rc);
2029 }
2030
2031 mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED;
2032 if (variant->hugepages) {
2033 /*
2034 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
2035 * not available.
2036 */
2037 mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
2038 }
2039 assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
2040 vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE,
2041 mmap_flags, -1, 0);
2042 assert(vrc == self->buffer);
2043
2044 self->page_size = MOCK_PAGE_SIZE;
2045 self->bitmap_size = variant->buffer_size / self->page_size;
2046
2047 /* Provision with an extra (PAGE_SIZE) for the unaligned case */
2048 size = DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE);
2049 rc = posix_memalign(&self->bitmap, PAGE_SIZE, size + PAGE_SIZE);
2050 assert(!rc);
2051 assert(self->bitmap);
2052 assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
2053
2054 test_ioctl_ioas_alloc(&self->ioas_id);
2055 /* Enable 1M mock IOMMU hugepages */
2056 if (variant->hugepages) {
2057 test_cmd_mock_domain_flags(self->ioas_id,
2058 MOCK_FLAGS_DEVICE_HUGE_IOVA,
2059 &self->stdev_id, &self->hwpt_id,
2060 &self->idev_id);
2061 } else {
2062 test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
2063 &self->hwpt_id, &self->idev_id);
2064 }
2065 }
2066
FIXTURE_TEARDOWN(iommufd_dirty_tracking)2067 FIXTURE_TEARDOWN(iommufd_dirty_tracking)
2068 {
2069 munmap(self->buffer, variant->buffer_size);
2070 munmap(self->bitmap, DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE));
2071 teardown_iommufd(self->fd, _metadata);
2072 }
2073
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty8k)2074 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty8k)
2075 {
2076 /* half of an u8 index bitmap */
2077 .buffer_size = 8UL * 1024UL,
2078 };
2079
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty16k)2080 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty16k)
2081 {
2082 /* one u8 index bitmap */
2083 .buffer_size = 16UL * 1024UL,
2084 };
2085
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64k)2086 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64k)
2087 {
2088 /* one u32 index bitmap */
2089 .buffer_size = 64UL * 1024UL,
2090 };
2091
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128k)2092 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k)
2093 {
2094 /* one u64 index bitmap */
2095 .buffer_size = 128UL * 1024UL,
2096 };
2097
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty320k)2098 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty320k)
2099 {
2100 /* two u64 index and trailing end bitmap */
2101 .buffer_size = 320UL * 1024UL,
2102 };
2103
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64M)2104 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M)
2105 {
2106 /* 4K bitmap (64M IOVA range) */
2107 .buffer_size = 64UL * 1024UL * 1024UL,
2108 };
2109
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64M_huge)2110 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M_huge)
2111 {
2112 /* 4K bitmap (64M IOVA range) */
2113 .buffer_size = 64UL * 1024UL * 1024UL,
2114 .hugepages = true,
2115 };
2116
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128M)2117 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
2118 {
2119 /* 8K bitmap (128M IOVA range) */
2120 .buffer_size = 128UL * 1024UL * 1024UL,
2121 };
2122
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128M_huge)2123 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge)
2124 {
2125 /* 8K bitmap (128M IOVA range) */
2126 .buffer_size = 128UL * 1024UL * 1024UL,
2127 .hugepages = true,
2128 };
2129
TEST_F(iommufd_dirty_tracking,enforce_dirty)2130 TEST_F(iommufd_dirty_tracking, enforce_dirty)
2131 {
2132 uint32_t ioas_id, stddev_id, idev_id;
2133 uint32_t hwpt_id, _hwpt_id;
2134 uint32_t dev_flags;
2135
2136 /* Regular case */
2137 dev_flags = MOCK_FLAGS_DEVICE_NO_DIRTY;
2138 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
2139 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2140 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2141 test_err_mock_domain_flags(EINVAL, hwpt_id, dev_flags, &stddev_id,
2142 NULL);
2143 test_ioctl_destroy(stddev_id);
2144 test_ioctl_destroy(hwpt_id);
2145
2146 /* IOMMU device does not support dirty tracking */
2147 test_ioctl_ioas_alloc(&ioas_id);
2148 test_cmd_mock_domain_flags(ioas_id, dev_flags, &stddev_id, &_hwpt_id,
2149 &idev_id);
2150 test_err_hwpt_alloc(EOPNOTSUPP, idev_id, ioas_id,
2151 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2152 test_ioctl_destroy(stddev_id);
2153 }
2154
TEST_F(iommufd_dirty_tracking,set_dirty_tracking)2155 TEST_F(iommufd_dirty_tracking, set_dirty_tracking)
2156 {
2157 uint32_t stddev_id;
2158 uint32_t hwpt_id;
2159
2160 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
2161 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2162 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2163 test_cmd_set_dirty_tracking(hwpt_id, true);
2164 test_cmd_set_dirty_tracking(hwpt_id, false);
2165
2166 test_ioctl_destroy(stddev_id);
2167 test_ioctl_destroy(hwpt_id);
2168 }
2169
TEST_F(iommufd_dirty_tracking,device_dirty_capability)2170 TEST_F(iommufd_dirty_tracking, device_dirty_capability)
2171 {
2172 uint32_t caps = 0;
2173 uint32_t stddev_id;
2174 uint32_t hwpt_id;
2175
2176 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id);
2177 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2178 test_cmd_get_hw_capabilities(self->idev_id, caps,
2179 IOMMU_HW_CAP_DIRTY_TRACKING);
2180 ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING,
2181 caps & IOMMU_HW_CAP_DIRTY_TRACKING);
2182
2183 test_ioctl_destroy(stddev_id);
2184 test_ioctl_destroy(hwpt_id);
2185 }
2186
TEST_F(iommufd_dirty_tracking,get_dirty_bitmap)2187 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
2188 {
2189 uint32_t page_size = MOCK_PAGE_SIZE;
2190 uint32_t hwpt_id;
2191 uint32_t ioas_id;
2192
2193 if (variant->hugepages)
2194 page_size = MOCK_HUGE_PAGE_SIZE;
2195
2196 test_ioctl_ioas_alloc(&ioas_id);
2197 test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
2198 variant->buffer_size, MOCK_APERTURE_START);
2199
2200 test_cmd_hwpt_alloc(self->idev_id, ioas_id,
2201 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2202
2203 test_cmd_set_dirty_tracking(hwpt_id, true);
2204
2205 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2206 MOCK_APERTURE_START, self->page_size, page_size,
2207 self->bitmap, self->bitmap_size, 0, _metadata);
2208
2209 /* PAGE_SIZE unaligned bitmap */
2210 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2211 MOCK_APERTURE_START, self->page_size, page_size,
2212 self->bitmap + MOCK_PAGE_SIZE,
2213 self->bitmap_size, 0, _metadata);
2214
2215 /* u64 unaligned bitmap */
2216 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2217 MOCK_APERTURE_START, self->page_size, page_size,
2218 self->bitmap + 0xff1, self->bitmap_size, 0,
2219 _metadata);
2220
2221 test_ioctl_destroy(hwpt_id);
2222 }
2223
TEST_F(iommufd_dirty_tracking,get_dirty_bitmap_no_clear)2224 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear)
2225 {
2226 uint32_t page_size = MOCK_PAGE_SIZE;
2227 uint32_t hwpt_id;
2228 uint32_t ioas_id;
2229
2230 if (variant->hugepages)
2231 page_size = MOCK_HUGE_PAGE_SIZE;
2232
2233 test_ioctl_ioas_alloc(&ioas_id);
2234 test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
2235 variant->buffer_size, MOCK_APERTURE_START);
2236
2237 test_cmd_hwpt_alloc(self->idev_id, ioas_id,
2238 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2239
2240 test_cmd_set_dirty_tracking(hwpt_id, true);
2241
2242 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2243 MOCK_APERTURE_START, self->page_size, page_size,
2244 self->bitmap, self->bitmap_size,
2245 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2246 _metadata);
2247
2248 /* Unaligned bitmap */
2249 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2250 MOCK_APERTURE_START, self->page_size, page_size,
2251 self->bitmap + MOCK_PAGE_SIZE,
2252 self->bitmap_size,
2253 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2254 _metadata);
2255
2256 /* u64 unaligned bitmap */
2257 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2258 MOCK_APERTURE_START, self->page_size, page_size,
2259 self->bitmap + 0xff1, self->bitmap_size,
2260 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2261 _metadata);
2262
2263 test_ioctl_destroy(hwpt_id);
2264 }
2265
2266 /* VFIO compatibility IOCTLs */
2267
TEST_F(iommufd,simple_ioctls)2268 TEST_F(iommufd, simple_ioctls)
2269 {
2270 ASSERT_EQ(VFIO_API_VERSION, ioctl(self->fd, VFIO_GET_API_VERSION));
2271 ASSERT_EQ(1, ioctl(self->fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU));
2272 }
2273
TEST_F(iommufd,unmap_cmd)2274 TEST_F(iommufd, unmap_cmd)
2275 {
2276 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2277 .iova = MOCK_APERTURE_START,
2278 .size = PAGE_SIZE,
2279 };
2280
2281 unmap_cmd.argsz = 1;
2282 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2283
2284 unmap_cmd.argsz = sizeof(unmap_cmd);
2285 unmap_cmd.flags = 1 << 31;
2286 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2287
2288 unmap_cmd.flags = 0;
2289 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2290 }
2291
TEST_F(iommufd,map_cmd)2292 TEST_F(iommufd, map_cmd)
2293 {
2294 struct vfio_iommu_type1_dma_map map_cmd = {
2295 .iova = MOCK_APERTURE_START,
2296 .size = PAGE_SIZE,
2297 .vaddr = (__u64)buffer,
2298 };
2299
2300 map_cmd.argsz = 1;
2301 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2302
2303 map_cmd.argsz = sizeof(map_cmd);
2304 map_cmd.flags = 1 << 31;
2305 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2306
2307 /* Requires a domain to be attached */
2308 map_cmd.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
2309 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2310 }
2311
TEST_F(iommufd,info_cmd)2312 TEST_F(iommufd, info_cmd)
2313 {
2314 struct vfio_iommu_type1_info info_cmd = {};
2315
2316 /* Invalid argsz */
2317 info_cmd.argsz = 1;
2318 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2319
2320 info_cmd.argsz = sizeof(info_cmd);
2321 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2322 }
2323
TEST_F(iommufd,set_iommu_cmd)2324 TEST_F(iommufd, set_iommu_cmd)
2325 {
2326 /* Requires a domain to be attached */
2327 EXPECT_ERRNO(ENODEV,
2328 ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1v2_IOMMU));
2329 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU));
2330 }
2331
TEST_F(iommufd,vfio_ioas)2332 TEST_F(iommufd, vfio_ioas)
2333 {
2334 struct iommu_vfio_ioas vfio_ioas_cmd = {
2335 .size = sizeof(vfio_ioas_cmd),
2336 .op = IOMMU_VFIO_IOAS_GET,
2337 };
2338 __u32 ioas_id;
2339
2340 /* ENODEV if there is no compat ioas */
2341 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2342
2343 /* Invalid id for set */
2344 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_SET;
2345 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2346
2347 /* Valid id for set*/
2348 test_ioctl_ioas_alloc(&ioas_id);
2349 vfio_ioas_cmd.ioas_id = ioas_id;
2350 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2351
2352 /* Same id comes back from get */
2353 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2354 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2355 ASSERT_EQ(ioas_id, vfio_ioas_cmd.ioas_id);
2356
2357 /* Clear works */
2358 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_CLEAR;
2359 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2360 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2361 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2362 }
2363
FIXTURE(vfio_compat_mock_domain)2364 FIXTURE(vfio_compat_mock_domain)
2365 {
2366 int fd;
2367 uint32_t ioas_id;
2368 };
2369
FIXTURE_VARIANT(vfio_compat_mock_domain)2370 FIXTURE_VARIANT(vfio_compat_mock_domain)
2371 {
2372 unsigned int version;
2373 };
2374
FIXTURE_SETUP(vfio_compat_mock_domain)2375 FIXTURE_SETUP(vfio_compat_mock_domain)
2376 {
2377 struct iommu_vfio_ioas vfio_ioas_cmd = {
2378 .size = sizeof(vfio_ioas_cmd),
2379 .op = IOMMU_VFIO_IOAS_SET,
2380 };
2381
2382 self->fd = open("/dev/iommu", O_RDWR);
2383 ASSERT_NE(-1, self->fd);
2384
2385 /* Create what VFIO would consider a group */
2386 test_ioctl_ioas_alloc(&self->ioas_id);
2387 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
2388
2389 /* Attach it to the vfio compat */
2390 vfio_ioas_cmd.ioas_id = self->ioas_id;
2391 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2392 ASSERT_EQ(0, ioctl(self->fd, VFIO_SET_IOMMU, variant->version));
2393 }
2394
FIXTURE_TEARDOWN(vfio_compat_mock_domain)2395 FIXTURE_TEARDOWN(vfio_compat_mock_domain)
2396 {
2397 teardown_iommufd(self->fd, _metadata);
2398 }
2399
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v2)2400 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v2)
2401 {
2402 .version = VFIO_TYPE1v2_IOMMU,
2403 };
2404
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v0)2405 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v0)
2406 {
2407 .version = VFIO_TYPE1_IOMMU,
2408 };
2409
TEST_F(vfio_compat_mock_domain,simple_close)2410 TEST_F(vfio_compat_mock_domain, simple_close)
2411 {
2412 }
2413
TEST_F(vfio_compat_mock_domain,option_huge_pages)2414 TEST_F(vfio_compat_mock_domain, option_huge_pages)
2415 {
2416 struct iommu_option cmd = {
2417 .size = sizeof(cmd),
2418 .option_id = IOMMU_OPTION_HUGE_PAGES,
2419 .op = IOMMU_OPTION_OP_GET,
2420 .val64 = 3,
2421 .object_id = self->ioas_id,
2422 };
2423
2424 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
2425 if (variant->version == VFIO_TYPE1_IOMMU) {
2426 ASSERT_EQ(0, cmd.val64);
2427 } else {
2428 ASSERT_EQ(1, cmd.val64);
2429 }
2430 }
2431
2432 /*
2433 * Execute an ioctl command stored in buffer and check that the result does not
2434 * overflow memory.
2435 */
is_filled(const void * buf,uint8_t c,size_t len)2436 static bool is_filled(const void *buf, uint8_t c, size_t len)
2437 {
2438 const uint8_t *cbuf = buf;
2439
2440 for (; len; cbuf++, len--)
2441 if (*cbuf != c)
2442 return false;
2443 return true;
2444 }
2445
2446 #define ioctl_check_buf(fd, cmd) \
2447 ({ \
2448 size_t _cmd_len = *(__u32 *)buffer; \
2449 \
2450 memset(buffer + _cmd_len, 0xAA, BUFFER_SIZE - _cmd_len); \
2451 ASSERT_EQ(0, ioctl(fd, cmd, buffer)); \
2452 ASSERT_EQ(true, is_filled(buffer + _cmd_len, 0xAA, \
2453 BUFFER_SIZE - _cmd_len)); \
2454 })
2455
check_vfio_info_cap_chain(struct __test_metadata * _metadata,struct vfio_iommu_type1_info * info_cmd)2456 static void check_vfio_info_cap_chain(struct __test_metadata *_metadata,
2457 struct vfio_iommu_type1_info *info_cmd)
2458 {
2459 const struct vfio_info_cap_header *cap;
2460
2461 ASSERT_GE(info_cmd->argsz, info_cmd->cap_offset + sizeof(*cap));
2462 cap = buffer + info_cmd->cap_offset;
2463 while (true) {
2464 size_t cap_size;
2465
2466 if (cap->next)
2467 cap_size = (buffer + cap->next) - (void *)cap;
2468 else
2469 cap_size = (buffer + info_cmd->argsz) - (void *)cap;
2470
2471 switch (cap->id) {
2472 case VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE: {
2473 struct vfio_iommu_type1_info_cap_iova_range *data =
2474 (void *)cap;
2475
2476 ASSERT_EQ(1, data->header.version);
2477 ASSERT_EQ(1, data->nr_iovas);
2478 EXPECT_EQ(MOCK_APERTURE_START,
2479 data->iova_ranges[0].start);
2480 EXPECT_EQ(MOCK_APERTURE_LAST, data->iova_ranges[0].end);
2481 break;
2482 }
2483 case VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL: {
2484 struct vfio_iommu_type1_info_dma_avail *data =
2485 (void *)cap;
2486
2487 ASSERT_EQ(1, data->header.version);
2488 ASSERT_EQ(sizeof(*data), cap_size);
2489 break;
2490 }
2491 default:
2492 ASSERT_EQ(false, true);
2493 break;
2494 }
2495 if (!cap->next)
2496 break;
2497
2498 ASSERT_GE(info_cmd->argsz, cap->next + sizeof(*cap));
2499 ASSERT_GE(buffer + cap->next, (void *)cap);
2500 cap = buffer + cap->next;
2501 }
2502 }
2503
TEST_F(vfio_compat_mock_domain,get_info)2504 TEST_F(vfio_compat_mock_domain, get_info)
2505 {
2506 struct vfio_iommu_type1_info *info_cmd = buffer;
2507 unsigned int i;
2508 size_t caplen;
2509
2510 /* Pre-cap ABI */
2511 *info_cmd = (struct vfio_iommu_type1_info){
2512 .argsz = offsetof(struct vfio_iommu_type1_info, cap_offset),
2513 };
2514 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2515 ASSERT_NE(0, info_cmd->iova_pgsizes);
2516 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2517 info_cmd->flags);
2518
2519 /* Read the cap chain size */
2520 *info_cmd = (struct vfio_iommu_type1_info){
2521 .argsz = sizeof(*info_cmd),
2522 };
2523 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2524 ASSERT_NE(0, info_cmd->iova_pgsizes);
2525 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2526 info_cmd->flags);
2527 ASSERT_EQ(0, info_cmd->cap_offset);
2528 ASSERT_LT(sizeof(*info_cmd), info_cmd->argsz);
2529
2530 /* Read the caps, kernel should never create a corrupted caps */
2531 caplen = info_cmd->argsz;
2532 for (i = sizeof(*info_cmd); i < caplen; i++) {
2533 *info_cmd = (struct vfio_iommu_type1_info){
2534 .argsz = i,
2535 };
2536 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2537 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2538 info_cmd->flags);
2539 if (!info_cmd->cap_offset)
2540 continue;
2541 check_vfio_info_cap_chain(_metadata, info_cmd);
2542 }
2543 }
2544
shuffle_array(unsigned long * array,size_t nelms)2545 static void shuffle_array(unsigned long *array, size_t nelms)
2546 {
2547 unsigned int i;
2548
2549 /* Shuffle */
2550 for (i = 0; i != nelms; i++) {
2551 unsigned long tmp = array[i];
2552 unsigned int other = rand() % (nelms - i);
2553
2554 array[i] = array[other];
2555 array[other] = tmp;
2556 }
2557 }
2558
TEST_F(vfio_compat_mock_domain,map)2559 TEST_F(vfio_compat_mock_domain, map)
2560 {
2561 struct vfio_iommu_type1_dma_map map_cmd = {
2562 .argsz = sizeof(map_cmd),
2563 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2564 .vaddr = (uintptr_t)buffer,
2565 .size = BUFFER_SIZE,
2566 .iova = MOCK_APERTURE_START,
2567 };
2568 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2569 .argsz = sizeof(unmap_cmd),
2570 .size = BUFFER_SIZE,
2571 .iova = MOCK_APERTURE_START,
2572 };
2573 unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE];
2574 unsigned int i;
2575
2576 /* Simple map/unmap */
2577 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2578 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2579 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2580
2581 /* UNMAP_FLAG_ALL requires 0 iova/size */
2582 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2583 unmap_cmd.flags = VFIO_DMA_UNMAP_FLAG_ALL;
2584 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2585
2586 unmap_cmd.iova = 0;
2587 unmap_cmd.size = 0;
2588 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2589 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2590
2591 /* Small pages */
2592 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2593 map_cmd.iova = pages_iova[i] =
2594 MOCK_APERTURE_START + i * PAGE_SIZE;
2595 map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
2596 map_cmd.size = PAGE_SIZE;
2597 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2598 }
2599 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2600
2601 unmap_cmd.flags = 0;
2602 unmap_cmd.size = PAGE_SIZE;
2603 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2604 unmap_cmd.iova = pages_iova[i];
2605 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2606 }
2607 }
2608
TEST_F(vfio_compat_mock_domain,huge_map)2609 TEST_F(vfio_compat_mock_domain, huge_map)
2610 {
2611 size_t buf_size = HUGEPAGE_SIZE * 2;
2612 struct vfio_iommu_type1_dma_map map_cmd = {
2613 .argsz = sizeof(map_cmd),
2614 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2615 .size = buf_size,
2616 .iova = MOCK_APERTURE_START,
2617 };
2618 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2619 .argsz = sizeof(unmap_cmd),
2620 };
2621 unsigned long pages_iova[16];
2622 unsigned int i;
2623 void *buf;
2624
2625 /* Test huge pages and splitting */
2626 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
2627 MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
2628 0);
2629 ASSERT_NE(MAP_FAILED, buf);
2630 map_cmd.vaddr = (uintptr_t)buf;
2631 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2632
2633 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2634 for (i = 0; i != ARRAY_SIZE(pages_iova); i++)
2635 pages_iova[i] = MOCK_APERTURE_START + (i * unmap_cmd.size);
2636 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2637
2638 /* type1 mode can cut up larger mappings, type1v2 always fails */
2639 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2640 unmap_cmd.iova = pages_iova[i];
2641 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2642 if (variant->version == VFIO_TYPE1_IOMMU) {
2643 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2644 &unmap_cmd));
2645 } else {
2646 EXPECT_ERRNO(ENOENT,
2647 ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2648 &unmap_cmd));
2649 }
2650 }
2651 }
2652
FIXTURE(iommufd_viommu)2653 FIXTURE(iommufd_viommu)
2654 {
2655 int fd;
2656 uint32_t ioas_id;
2657 uint32_t stdev_id;
2658 uint32_t hwpt_id;
2659 uint32_t nested_hwpt_id;
2660 uint32_t device_id;
2661 uint32_t viommu_id;
2662 };
2663
FIXTURE_VARIANT(iommufd_viommu)2664 FIXTURE_VARIANT(iommufd_viommu)
2665 {
2666 unsigned int viommu;
2667 };
2668
FIXTURE_SETUP(iommufd_viommu)2669 FIXTURE_SETUP(iommufd_viommu)
2670 {
2671 self->fd = open("/dev/iommu", O_RDWR);
2672 ASSERT_NE(-1, self->fd);
2673 test_ioctl_ioas_alloc(&self->ioas_id);
2674 test_ioctl_set_default_memory_limit();
2675
2676 if (variant->viommu) {
2677 struct iommu_hwpt_selftest data = {
2678 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
2679 };
2680
2681 test_cmd_mock_domain(self->ioas_id, &self->stdev_id, NULL,
2682 &self->device_id);
2683
2684 /* Allocate a nesting parent hwpt */
2685 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
2686 IOMMU_HWPT_ALLOC_NEST_PARENT,
2687 &self->hwpt_id);
2688
2689 /* Allocate a vIOMMU taking refcount of the parent hwpt */
2690 test_cmd_viommu_alloc(self->device_id, self->hwpt_id,
2691 IOMMU_VIOMMU_TYPE_SELFTEST,
2692 &self->viommu_id);
2693
2694 /* Allocate a regular nested hwpt */
2695 test_cmd_hwpt_alloc_nested(self->device_id, self->viommu_id, 0,
2696 &self->nested_hwpt_id,
2697 IOMMU_HWPT_DATA_SELFTEST, &data,
2698 sizeof(data));
2699 }
2700 }
2701
FIXTURE_TEARDOWN(iommufd_viommu)2702 FIXTURE_TEARDOWN(iommufd_viommu)
2703 {
2704 teardown_iommufd(self->fd, _metadata);
2705 }
2706
FIXTURE_VARIANT_ADD(iommufd_viommu,no_viommu)2707 FIXTURE_VARIANT_ADD(iommufd_viommu, no_viommu)
2708 {
2709 .viommu = 0,
2710 };
2711
FIXTURE_VARIANT_ADD(iommufd_viommu,mock_viommu)2712 FIXTURE_VARIANT_ADD(iommufd_viommu, mock_viommu)
2713 {
2714 .viommu = 1,
2715 };
2716
TEST_F(iommufd_viommu,viommu_auto_destroy)2717 TEST_F(iommufd_viommu, viommu_auto_destroy)
2718 {
2719 }
2720
TEST_F(iommufd_viommu,viommu_negative_tests)2721 TEST_F(iommufd_viommu, viommu_negative_tests)
2722 {
2723 uint32_t device_id = self->device_id;
2724 uint32_t ioas_id = self->ioas_id;
2725 uint32_t hwpt_id;
2726
2727 if (self->device_id) {
2728 /* Negative test -- invalid hwpt (hwpt_id=0) */
2729 test_err_viommu_alloc(ENOENT, device_id, 0,
2730 IOMMU_VIOMMU_TYPE_SELFTEST, NULL);
2731
2732 /* Negative test -- not a nesting parent hwpt */
2733 test_cmd_hwpt_alloc(device_id, ioas_id, 0, &hwpt_id);
2734 test_err_viommu_alloc(EINVAL, device_id, hwpt_id,
2735 IOMMU_VIOMMU_TYPE_SELFTEST, NULL);
2736 test_ioctl_destroy(hwpt_id);
2737
2738 /* Negative test -- unsupported viommu type */
2739 test_err_viommu_alloc(EOPNOTSUPP, device_id, self->hwpt_id,
2740 0xdead, NULL);
2741 EXPECT_ERRNO(EBUSY,
2742 _test_ioctl_destroy(self->fd, self->hwpt_id));
2743 EXPECT_ERRNO(EBUSY,
2744 _test_ioctl_destroy(self->fd, self->viommu_id));
2745 } else {
2746 test_err_viommu_alloc(ENOENT, self->device_id, self->hwpt_id,
2747 IOMMU_VIOMMU_TYPE_SELFTEST, NULL);
2748 }
2749 }
2750
TEST_F(iommufd_viommu,viommu_alloc_nested_iopf)2751 TEST_F(iommufd_viommu, viommu_alloc_nested_iopf)
2752 {
2753 struct iommu_hwpt_selftest data = {
2754 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
2755 };
2756 uint32_t viommu_id = self->viommu_id;
2757 uint32_t dev_id = self->device_id;
2758 uint32_t iopf_hwpt_id;
2759 uint32_t fault_id;
2760 uint32_t fault_fd;
2761 uint32_t vdev_id;
2762
2763 if (self->device_id) {
2764 test_ioctl_fault_alloc(&fault_id, &fault_fd);
2765 test_err_hwpt_alloc_iopf(
2766 ENOENT, dev_id, viommu_id, UINT32_MAX,
2767 IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
2768 IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
2769 test_err_hwpt_alloc_iopf(
2770 EOPNOTSUPP, dev_id, viommu_id, fault_id,
2771 IOMMU_HWPT_FAULT_ID_VALID | (1 << 31), &iopf_hwpt_id,
2772 IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
2773 test_cmd_hwpt_alloc_iopf(
2774 dev_id, viommu_id, fault_id, IOMMU_HWPT_FAULT_ID_VALID,
2775 &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST, &data,
2776 sizeof(data));
2777
2778 /* Must allocate vdevice before attaching to a nested hwpt */
2779 test_err_mock_domain_replace(ENOENT, self->stdev_id,
2780 iopf_hwpt_id);
2781 test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
2782 test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
2783 EXPECT_ERRNO(EBUSY,
2784 _test_ioctl_destroy(self->fd, iopf_hwpt_id));
2785 test_cmd_trigger_iopf(dev_id, fault_fd);
2786
2787 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
2788 test_ioctl_destroy(iopf_hwpt_id);
2789 close(fault_fd);
2790 test_ioctl_destroy(fault_id);
2791 }
2792 }
2793
TEST_F(iommufd_viommu,vdevice_alloc)2794 TEST_F(iommufd_viommu, vdevice_alloc)
2795 {
2796 uint32_t viommu_id = self->viommu_id;
2797 uint32_t dev_id = self->device_id;
2798 uint32_t vdev_id = 0;
2799 uint32_t veventq_id;
2800 uint32_t veventq_fd;
2801 int prev_seq = -1;
2802
2803 if (dev_id) {
2804 /* Must allocate vdevice before attaching to a nested hwpt */
2805 test_err_mock_domain_replace(ENOENT, self->stdev_id,
2806 self->nested_hwpt_id);
2807
2808 /* Allocate a vEVENTQ with veventq_depth=2 */
2809 test_cmd_veventq_alloc(viommu_id, IOMMU_VEVENTQ_TYPE_SELFTEST,
2810 &veventq_id, &veventq_fd);
2811 test_err_veventq_alloc(EEXIST, viommu_id,
2812 IOMMU_VEVENTQ_TYPE_SELFTEST, NULL, NULL);
2813 /* Set vdev_id to 0x99, unset it, and set to 0x88 */
2814 test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
2815 test_cmd_mock_domain_replace(self->stdev_id,
2816 self->nested_hwpt_id);
2817 test_cmd_trigger_vevents(dev_id, 1);
2818 test_cmd_read_vevents(veventq_fd, 1, 0x99, &prev_seq);
2819 test_err_vdevice_alloc(EEXIST, viommu_id, dev_id, 0x99,
2820 &vdev_id);
2821 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
2822 test_ioctl_destroy(vdev_id);
2823
2824 /* Try again with 0x88 */
2825 test_cmd_vdevice_alloc(viommu_id, dev_id, 0x88, &vdev_id);
2826 test_cmd_mock_domain_replace(self->stdev_id,
2827 self->nested_hwpt_id);
2828 /* Trigger an overflow with three events */
2829 test_cmd_trigger_vevents(dev_id, 3);
2830 test_err_read_vevents(EOVERFLOW, veventq_fd, 3, 0x88,
2831 &prev_seq);
2832 /* Overflow must be gone after the previous reads */
2833 test_cmd_trigger_vevents(dev_id, 1);
2834 test_cmd_read_vevents(veventq_fd, 1, 0x88, &prev_seq);
2835 close(veventq_fd);
2836 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
2837 test_ioctl_destroy(vdev_id);
2838 test_ioctl_destroy(veventq_id);
2839 } else {
2840 test_err_vdevice_alloc(ENOENT, viommu_id, dev_id, 0x99, NULL);
2841 }
2842 }
2843
TEST_F(iommufd_viommu,vdevice_cache)2844 TEST_F(iommufd_viommu, vdevice_cache)
2845 {
2846 struct iommu_viommu_invalidate_selftest inv_reqs[2] = {};
2847 uint32_t viommu_id = self->viommu_id;
2848 uint32_t dev_id = self->device_id;
2849 uint32_t vdev_id = 0;
2850 uint32_t num_inv;
2851
2852 if (dev_id) {
2853 test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
2854
2855 test_cmd_dev_check_cache_all(dev_id,
2856 IOMMU_TEST_DEV_CACHE_DEFAULT);
2857
2858 /* Check data_type by passing zero-length array */
2859 num_inv = 0;
2860 test_cmd_viommu_invalidate(viommu_id, inv_reqs,
2861 sizeof(*inv_reqs), &num_inv);
2862 assert(!num_inv);
2863
2864 /* Negative test: Invalid data_type */
2865 num_inv = 1;
2866 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2867 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST_INVALID,
2868 sizeof(*inv_reqs), &num_inv);
2869 assert(!num_inv);
2870
2871 /* Negative test: structure size sanity */
2872 num_inv = 1;
2873 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2874 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2875 sizeof(*inv_reqs) + 1, &num_inv);
2876 assert(!num_inv);
2877
2878 num_inv = 1;
2879 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2880 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2881 1, &num_inv);
2882 assert(!num_inv);
2883
2884 /* Negative test: invalid flag is passed */
2885 num_inv = 1;
2886 inv_reqs[0].flags = 0xffffffff;
2887 inv_reqs[0].vdev_id = 0x99;
2888 test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
2889 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2890 sizeof(*inv_reqs), &num_inv);
2891 assert(!num_inv);
2892
2893 /* Negative test: invalid data_uptr when array is not empty */
2894 num_inv = 1;
2895 inv_reqs[0].flags = 0;
2896 inv_reqs[0].vdev_id = 0x99;
2897 test_err_viommu_invalidate(EINVAL, viommu_id, NULL,
2898 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2899 sizeof(*inv_reqs), &num_inv);
2900 assert(!num_inv);
2901
2902 /* Negative test: invalid entry_len when array is not empty */
2903 num_inv = 1;
2904 inv_reqs[0].flags = 0;
2905 inv_reqs[0].vdev_id = 0x99;
2906 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2907 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2908 0, &num_inv);
2909 assert(!num_inv);
2910
2911 /* Negative test: invalid cache_id */
2912 num_inv = 1;
2913 inv_reqs[0].flags = 0;
2914 inv_reqs[0].vdev_id = 0x99;
2915 inv_reqs[0].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
2916 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2917 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2918 sizeof(*inv_reqs), &num_inv);
2919 assert(!num_inv);
2920
2921 /* Negative test: invalid vdev_id */
2922 num_inv = 1;
2923 inv_reqs[0].flags = 0;
2924 inv_reqs[0].vdev_id = 0x9;
2925 inv_reqs[0].cache_id = 0;
2926 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2927 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2928 sizeof(*inv_reqs), &num_inv);
2929 assert(!num_inv);
2930
2931 /*
2932 * Invalidate the 1st cache entry but fail the 2nd request
2933 * due to invalid flags configuration in the 2nd request.
2934 */
2935 num_inv = 2;
2936 inv_reqs[0].flags = 0;
2937 inv_reqs[0].vdev_id = 0x99;
2938 inv_reqs[0].cache_id = 0;
2939 inv_reqs[1].flags = 0xffffffff;
2940 inv_reqs[1].vdev_id = 0x99;
2941 inv_reqs[1].cache_id = 1;
2942 test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
2943 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2944 sizeof(*inv_reqs), &num_inv);
2945 assert(num_inv == 1);
2946 test_cmd_dev_check_cache(dev_id, 0, 0);
2947 test_cmd_dev_check_cache(dev_id, 1,
2948 IOMMU_TEST_DEV_CACHE_DEFAULT);
2949 test_cmd_dev_check_cache(dev_id, 2,
2950 IOMMU_TEST_DEV_CACHE_DEFAULT);
2951 test_cmd_dev_check_cache(dev_id, 3,
2952 IOMMU_TEST_DEV_CACHE_DEFAULT);
2953
2954 /*
2955 * Invalidate the 1st cache entry but fail the 2nd request
2956 * due to invalid cache_id configuration in the 2nd request.
2957 */
2958 num_inv = 2;
2959 inv_reqs[0].flags = 0;
2960 inv_reqs[0].vdev_id = 0x99;
2961 inv_reqs[0].cache_id = 0;
2962 inv_reqs[1].flags = 0;
2963 inv_reqs[1].vdev_id = 0x99;
2964 inv_reqs[1].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
2965 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2966 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2967 sizeof(*inv_reqs), &num_inv);
2968 assert(num_inv == 1);
2969 test_cmd_dev_check_cache(dev_id, 0, 0);
2970 test_cmd_dev_check_cache(dev_id, 1,
2971 IOMMU_TEST_DEV_CACHE_DEFAULT);
2972 test_cmd_dev_check_cache(dev_id, 2,
2973 IOMMU_TEST_DEV_CACHE_DEFAULT);
2974 test_cmd_dev_check_cache(dev_id, 3,
2975 IOMMU_TEST_DEV_CACHE_DEFAULT);
2976
2977 /* Invalidate the 2nd cache entry and verify */
2978 num_inv = 1;
2979 inv_reqs[0].flags = 0;
2980 inv_reqs[0].vdev_id = 0x99;
2981 inv_reqs[0].cache_id = 1;
2982 test_cmd_viommu_invalidate(viommu_id, inv_reqs,
2983 sizeof(*inv_reqs), &num_inv);
2984 assert(num_inv == 1);
2985 test_cmd_dev_check_cache(dev_id, 0, 0);
2986 test_cmd_dev_check_cache(dev_id, 1, 0);
2987 test_cmd_dev_check_cache(dev_id, 2,
2988 IOMMU_TEST_DEV_CACHE_DEFAULT);
2989 test_cmd_dev_check_cache(dev_id, 3,
2990 IOMMU_TEST_DEV_CACHE_DEFAULT);
2991
2992 /* Invalidate the 3rd and 4th cache entries and verify */
2993 num_inv = 2;
2994 inv_reqs[0].flags = 0;
2995 inv_reqs[0].vdev_id = 0x99;
2996 inv_reqs[0].cache_id = 2;
2997 inv_reqs[1].flags = 0;
2998 inv_reqs[1].vdev_id = 0x99;
2999 inv_reqs[1].cache_id = 3;
3000 test_cmd_viommu_invalidate(viommu_id, inv_reqs,
3001 sizeof(*inv_reqs), &num_inv);
3002 assert(num_inv == 2);
3003 test_cmd_dev_check_cache_all(dev_id, 0);
3004
3005 /* Invalidate all cache entries for nested_dev_id[1] and verify */
3006 num_inv = 1;
3007 inv_reqs[0].vdev_id = 0x99;
3008 inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
3009 test_cmd_viommu_invalidate(viommu_id, inv_reqs,
3010 sizeof(*inv_reqs), &num_inv);
3011 assert(num_inv == 1);
3012 test_cmd_dev_check_cache_all(dev_id, 0);
3013 test_ioctl_destroy(vdev_id);
3014 }
3015 }
3016
FIXTURE(iommufd_device_pasid)3017 FIXTURE(iommufd_device_pasid)
3018 {
3019 int fd;
3020 uint32_t ioas_id;
3021 uint32_t hwpt_id;
3022 uint32_t stdev_id;
3023 uint32_t device_id;
3024 uint32_t no_pasid_stdev_id;
3025 uint32_t no_pasid_device_id;
3026 };
3027
FIXTURE_VARIANT(iommufd_device_pasid)3028 FIXTURE_VARIANT(iommufd_device_pasid)
3029 {
3030 bool pasid_capable;
3031 };
3032
FIXTURE_SETUP(iommufd_device_pasid)3033 FIXTURE_SETUP(iommufd_device_pasid)
3034 {
3035 self->fd = open("/dev/iommu", O_RDWR);
3036 ASSERT_NE(-1, self->fd);
3037 test_ioctl_ioas_alloc(&self->ioas_id);
3038
3039 test_cmd_mock_domain_flags(self->ioas_id,
3040 MOCK_FLAGS_DEVICE_PASID,
3041 &self->stdev_id, &self->hwpt_id,
3042 &self->device_id);
3043 if (!variant->pasid_capable)
3044 test_cmd_mock_domain_flags(self->ioas_id, 0,
3045 &self->no_pasid_stdev_id, NULL,
3046 &self->no_pasid_device_id);
3047 }
3048
FIXTURE_TEARDOWN(iommufd_device_pasid)3049 FIXTURE_TEARDOWN(iommufd_device_pasid)
3050 {
3051 teardown_iommufd(self->fd, _metadata);
3052 }
3053
FIXTURE_VARIANT_ADD(iommufd_device_pasid,no_pasid)3054 FIXTURE_VARIANT_ADD(iommufd_device_pasid, no_pasid)
3055 {
3056 .pasid_capable = false,
3057 };
3058
FIXTURE_VARIANT_ADD(iommufd_device_pasid,has_pasid)3059 FIXTURE_VARIANT_ADD(iommufd_device_pasid, has_pasid)
3060 {
3061 .pasid_capable = true,
3062 };
3063
TEST_F(iommufd_device_pasid,pasid_attach)3064 TEST_F(iommufd_device_pasid, pasid_attach)
3065 {
3066 struct iommu_hwpt_selftest data = {
3067 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
3068 };
3069 uint32_t nested_hwpt_id[3] = {};
3070 uint32_t parent_hwpt_id = 0;
3071 uint32_t fault_id, fault_fd;
3072 uint32_t s2_hwpt_id = 0;
3073 uint32_t iopf_hwpt_id;
3074 uint32_t pasid = 100;
3075 uint32_t viommu_id;
3076
3077 /*
3078 * Negative, detach pasid without attaching, this is not expected.
3079 * But it should not result in failure anyway.
3080 */
3081 test_cmd_pasid_detach(pasid);
3082
3083 /* Allocate two nested hwpts sharing one common parent hwpt */
3084 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
3085 IOMMU_HWPT_ALLOC_NEST_PARENT,
3086 &parent_hwpt_id);
3087 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id,
3088 IOMMU_HWPT_ALLOC_PASID,
3089 &nested_hwpt_id[0],
3090 IOMMU_HWPT_DATA_SELFTEST,
3091 &data, sizeof(data));
3092 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id,
3093 IOMMU_HWPT_ALLOC_PASID,
3094 &nested_hwpt_id[1],
3095 IOMMU_HWPT_DATA_SELFTEST,
3096 &data, sizeof(data));
3097
3098 /* Fault related preparation */
3099 test_ioctl_fault_alloc(&fault_id, &fault_fd);
3100 test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id,
3101 IOMMU_HWPT_FAULT_ID_VALID | IOMMU_HWPT_ALLOC_PASID,
3102 &iopf_hwpt_id,
3103 IOMMU_HWPT_DATA_SELFTEST, &data,
3104 sizeof(data));
3105
3106 /* Allocate a regular nested hwpt based on viommu */
3107 test_cmd_viommu_alloc(self->device_id, parent_hwpt_id,
3108 IOMMU_VIOMMU_TYPE_SELFTEST,
3109 &viommu_id);
3110 test_cmd_hwpt_alloc_nested(self->device_id, viommu_id,
3111 IOMMU_HWPT_ALLOC_PASID,
3112 &nested_hwpt_id[2],
3113 IOMMU_HWPT_DATA_SELFTEST, &data,
3114 sizeof(data));
3115
3116 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
3117 IOMMU_HWPT_ALLOC_PASID,
3118 &s2_hwpt_id);
3119
3120 /* Attach RID to non-pasid compat domain, */
3121 test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
3122 /* then attach to pasid should fail */
3123 test_err_pasid_attach(EINVAL, pasid, s2_hwpt_id);
3124
3125 /* Attach RID to pasid compat domain, */
3126 test_cmd_mock_domain_replace(self->stdev_id, s2_hwpt_id);
3127 /* then attach to pasid should succeed, */
3128 test_cmd_pasid_attach(pasid, nested_hwpt_id[0]);
3129 /* but attach RID to non-pasid compat domain should fail now. */
3130 test_err_mock_domain_replace(EINVAL, self->stdev_id, parent_hwpt_id);
3131 /*
3132 * Detach hwpt from pasid 100, and check if the pasid 100
3133 * has null domain.
3134 */
3135 test_cmd_pasid_detach(pasid);
3136 ASSERT_EQ(0,
3137 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3138 pasid, 0));
3139 /* RID is attached to pasid-comapt domain, pasid path is not used */
3140
3141 if (!variant->pasid_capable) {
3142 /*
3143 * PASID-compatible domain can be used by non-PASID-capable
3144 * device.
3145 */
3146 test_cmd_mock_domain_replace(self->no_pasid_stdev_id, nested_hwpt_id[0]);
3147 test_cmd_mock_domain_replace(self->no_pasid_stdev_id, self->ioas_id);
3148 /*
3149 * Attach hwpt to pasid 100 of non-PASID-capable device,
3150 * should fail, no matter domain is pasid-comapt or not.
3151 */
3152 EXPECT_ERRNO(EINVAL,
3153 _test_cmd_pasid_attach(self->fd, self->no_pasid_stdev_id,
3154 pasid, parent_hwpt_id));
3155 EXPECT_ERRNO(EINVAL,
3156 _test_cmd_pasid_attach(self->fd, self->no_pasid_stdev_id,
3157 pasid, s2_hwpt_id));
3158 }
3159
3160 /*
3161 * Attach non pasid compat hwpt to pasid-capable device, should
3162 * fail, and have null domain.
3163 */
3164 test_err_pasid_attach(EINVAL, pasid, parent_hwpt_id);
3165 ASSERT_EQ(0,
3166 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3167 pasid, 0));
3168
3169 /*
3170 * Attach ioas to pasid 100, should fail, domain should
3171 * be null.
3172 */
3173 test_err_pasid_attach(EINVAL, pasid, self->ioas_id);
3174 ASSERT_EQ(0,
3175 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3176 pasid, 0));
3177
3178 /*
3179 * Attach the s2_hwpt to pasid 100, should succeed, domain should
3180 * be valid.
3181 */
3182 test_cmd_pasid_attach(pasid, s2_hwpt_id);
3183 ASSERT_EQ(0,
3184 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3185 pasid, s2_hwpt_id));
3186
3187 /*
3188 * Try attach pasid 100 with another hwpt, should FAIL
3189 * as attach does not allow overwrite, use REPLACE instead.
3190 */
3191 test_err_pasid_attach(EBUSY, pasid, nested_hwpt_id[0]);
3192
3193 /*
3194 * Detach hwpt from pasid 100 for next test, should succeed,
3195 * and have null domain.
3196 */
3197 test_cmd_pasid_detach(pasid);
3198 ASSERT_EQ(0,
3199 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3200 pasid, 0));
3201
3202 /*
3203 * Attach nested hwpt to pasid 100, should succeed, domain
3204 * should be valid.
3205 */
3206 test_cmd_pasid_attach(pasid, nested_hwpt_id[0]);
3207 ASSERT_EQ(0,
3208 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3209 pasid, nested_hwpt_id[0]));
3210
3211 /* Attach to pasid 100 which has been attached, should fail. */
3212 test_err_pasid_attach(EBUSY, pasid, nested_hwpt_id[0]);
3213
3214 /* cleanup pasid 100 */
3215 test_cmd_pasid_detach(pasid);
3216
3217 /* Replace tests */
3218
3219 pasid = 200;
3220 /*
3221 * Replace pasid 200 without attaching it, should fail
3222 * with -EINVAL.
3223 */
3224 test_err_pasid_replace(EINVAL, pasid, s2_hwpt_id);
3225
3226 /*
3227 * Attach the s2 hwpt to pasid 200, should succeed, domain should
3228 * be valid.
3229 */
3230 test_cmd_pasid_attach(pasid, s2_hwpt_id);
3231 ASSERT_EQ(0,
3232 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3233 pasid, s2_hwpt_id));
3234
3235 /*
3236 * Replace pasid 200 with self->ioas_id, should fail
3237 * and domain should be the prior s2 hwpt.
3238 */
3239 test_err_pasid_replace(EINVAL, pasid, self->ioas_id);
3240 ASSERT_EQ(0,
3241 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3242 pasid, s2_hwpt_id));
3243
3244 /*
3245 * Replace a nested hwpt for pasid 200, should succeed,
3246 * and have valid domain.
3247 */
3248 test_cmd_pasid_replace(pasid, nested_hwpt_id[0]);
3249 ASSERT_EQ(0,
3250 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3251 pasid, nested_hwpt_id[0]));
3252
3253 /*
3254 * Replace with another nested hwpt for pasid 200, should
3255 * succeed, and have valid domain.
3256 */
3257 test_cmd_pasid_replace(pasid, nested_hwpt_id[1]);
3258 ASSERT_EQ(0,
3259 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3260 pasid, nested_hwpt_id[1]));
3261
3262 /* cleanup pasid 200 */
3263 test_cmd_pasid_detach(pasid);
3264
3265 /* Negative Tests for pasid replace, use pasid 1024 */
3266
3267 /*
3268 * Attach the s2 hwpt to pasid 1024, should succeed, domain should
3269 * be valid.
3270 */
3271 pasid = 1024;
3272 test_cmd_pasid_attach(pasid, s2_hwpt_id);
3273 ASSERT_EQ(0,
3274 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3275 pasid, s2_hwpt_id));
3276
3277 /*
3278 * Replace pasid 1024 with nested_hwpt_id[0], should fail,
3279 * but have the old valid domain. This is a designed
3280 * negative case. Normally, this shall succeed.
3281 */
3282 test_err_pasid_replace(ENOMEM, pasid, nested_hwpt_id[0]);
3283 ASSERT_EQ(0,
3284 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3285 pasid, s2_hwpt_id));
3286
3287 /* cleanup pasid 1024 */
3288 test_cmd_pasid_detach(pasid);
3289
3290 /* Attach to iopf-capable hwpt */
3291
3292 /*
3293 * Attach an iopf hwpt to pasid 2048, should succeed, domain should
3294 * be valid.
3295 */
3296 pasid = 2048;
3297 test_cmd_pasid_attach(pasid, iopf_hwpt_id);
3298 ASSERT_EQ(0,
3299 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3300 pasid, iopf_hwpt_id));
3301
3302 test_cmd_trigger_iopf_pasid(self->device_id, pasid, fault_fd);
3303
3304 /*
3305 * Replace with s2_hwpt_id for pasid 2048, should
3306 * succeed, and have valid domain.
3307 */
3308 test_cmd_pasid_replace(pasid, s2_hwpt_id);
3309 ASSERT_EQ(0,
3310 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3311 pasid, s2_hwpt_id));
3312
3313 /* cleanup pasid 2048 */
3314 test_cmd_pasid_detach(pasid);
3315
3316 test_ioctl_destroy(iopf_hwpt_id);
3317 close(fault_fd);
3318 test_ioctl_destroy(fault_id);
3319
3320 /* Detach the s2_hwpt_id from RID */
3321 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
3322 }
3323
3324 TEST_HARNESS_MAIN
3325