xref: /f-stack/dpdk/lib/librte_eal/linux/eal_vfio.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4 
5 #include <inttypes.h>
6 #include <string.h>
7 #include <fcntl.h>
8 #include <unistd.h>
9 #include <sys/ioctl.h>
10 
11 #include <rte_errno.h>
12 #include <rte_log.h>
13 #include <rte_memory.h>
14 #include <rte_eal_memconfig.h>
15 #include <rte_vfio.h>
16 
17 #include "eal_filesystem.h"
18 #include "eal_memcfg.h"
19 #include "eal_vfio.h"
20 #include "eal_private.h"
21 #include "eal_internal_cfg.h"
22 
23 #ifdef VFIO_PRESENT
24 
25 #define VFIO_MEM_EVENT_CLB_NAME "vfio_mem_event_clb"
26 
27 /* hot plug/unplug of VFIO groups may cause all DMA maps to be dropped. we can
28  * recreate the mappings for DPDK segments, but we cannot do so for memory that
29  * was registered by the user themselves, so we need to store the user mappings
30  * somewhere, to recreate them later.
31  */
32 #define VFIO_MAX_USER_MEM_MAPS 256
33 struct user_mem_map {
34 	uint64_t addr;
35 	uint64_t iova;
36 	uint64_t len;
37 };
38 
39 struct user_mem_maps {
40 	rte_spinlock_recursive_t lock;
41 	int n_maps;
42 	struct user_mem_map maps[VFIO_MAX_USER_MEM_MAPS];
43 };
44 
45 struct vfio_config {
46 	int vfio_enabled;
47 	int vfio_container_fd;
48 	int vfio_active_groups;
49 	const struct vfio_iommu_type *vfio_iommu_type;
50 	struct vfio_group vfio_groups[VFIO_MAX_GROUPS];
51 	struct user_mem_maps mem_maps;
52 };
53 
54 /* per-process VFIO config */
55 static struct vfio_config vfio_cfgs[VFIO_MAX_CONTAINERS];
56 static struct vfio_config *default_vfio_cfg = &vfio_cfgs[0];
57 
58 static int vfio_type1_dma_map(int);
59 static int vfio_type1_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
60 static int vfio_spapr_dma_map(int);
61 static int vfio_spapr_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
62 static int vfio_noiommu_dma_map(int);
63 static int vfio_noiommu_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
64 static int vfio_dma_mem_map(struct vfio_config *vfio_cfg, uint64_t vaddr,
65 		uint64_t iova, uint64_t len, int do_map);
66 
67 /* IOMMU types we support */
68 static const struct vfio_iommu_type iommu_types[] = {
69 	/* x86 IOMMU, otherwise known as type 1 */
70 	{
71 		.type_id = RTE_VFIO_TYPE1,
72 		.name = "Type 1",
73 		.dma_map_func = &vfio_type1_dma_map,
74 		.dma_user_map_func = &vfio_type1_dma_mem_map
75 	},
76 	/* ppc64 IOMMU, otherwise known as spapr */
77 	{
78 		.type_id = RTE_VFIO_SPAPR,
79 		.name = "sPAPR",
80 		.dma_map_func = &vfio_spapr_dma_map,
81 		.dma_user_map_func = &vfio_spapr_dma_mem_map
82 	},
83 	/* IOMMU-less mode */
84 	{
85 		.type_id = RTE_VFIO_NOIOMMU,
86 		.name = "No-IOMMU",
87 		.dma_map_func = &vfio_noiommu_dma_map,
88 		.dma_user_map_func = &vfio_noiommu_dma_mem_map
89 	},
90 };
91 
92 static int
is_null_map(const struct user_mem_map * map)93 is_null_map(const struct user_mem_map *map)
94 {
95 	return map->addr == 0 && map->iova == 0 && map->len == 0;
96 }
97 
98 /* we may need to merge user mem maps together in case of user mapping/unmapping
99  * chunks of memory, so we'll need a comparator function to sort segments.
100  */
101 static int
user_mem_map_cmp(const void * a,const void * b)102 user_mem_map_cmp(const void *a, const void *b)
103 {
104 	const struct user_mem_map *umm_a = a;
105 	const struct user_mem_map *umm_b = b;
106 
107 	/* move null entries to end */
108 	if (is_null_map(umm_a))
109 		return 1;
110 	if (is_null_map(umm_b))
111 		return -1;
112 
113 	/* sort by iova first */
114 	if (umm_a->iova < umm_b->iova)
115 		return -1;
116 	if (umm_a->iova > umm_b->iova)
117 		return 1;
118 
119 	if (umm_a->addr < umm_b->addr)
120 		return -1;
121 	if (umm_a->addr > umm_b->addr)
122 		return 1;
123 
124 	if (umm_a->len < umm_b->len)
125 		return -1;
126 	if (umm_a->len > umm_b->len)
127 		return 1;
128 
129 	return 0;
130 }
131 
132 /* adjust user map entry. this may result in shortening of existing map, or in
133  * splitting existing map in two pieces.
134  */
135 static void
adjust_map(struct user_mem_map * src,struct user_mem_map * end,uint64_t remove_va_start,uint64_t remove_len)136 adjust_map(struct user_mem_map *src, struct user_mem_map *end,
137 		uint64_t remove_va_start, uint64_t remove_len)
138 {
139 	/* if va start is same as start address, we're simply moving start */
140 	if (remove_va_start == src->addr) {
141 		src->addr += remove_len;
142 		src->iova += remove_len;
143 		src->len -= remove_len;
144 	} else if (remove_va_start + remove_len == src->addr + src->len) {
145 		/* we're shrinking mapping from the end */
146 		src->len -= remove_len;
147 	} else {
148 		/* we're blowing a hole in the middle */
149 		struct user_mem_map tmp;
150 		uint64_t total_len = src->len;
151 
152 		/* adjust source segment length */
153 		src->len = remove_va_start - src->addr;
154 
155 		/* create temporary segment in the middle */
156 		tmp.addr = src->addr + src->len;
157 		tmp.iova = src->iova + src->len;
158 		tmp.len = remove_len;
159 
160 		/* populate end segment - this one we will be keeping */
161 		end->addr = tmp.addr + tmp.len;
162 		end->iova = tmp.iova + tmp.len;
163 		end->len = total_len - src->len - tmp.len;
164 	}
165 }
166 
167 /* try merging two maps into one, return 1 if succeeded */
168 static int
merge_map(struct user_mem_map * left,struct user_mem_map * right)169 merge_map(struct user_mem_map *left, struct user_mem_map *right)
170 {
171 	if (left->addr + left->len != right->addr)
172 		return 0;
173 	if (left->iova + left->len != right->iova)
174 		return 0;
175 
176 	left->len += right->len;
177 
178 	memset(right, 0, sizeof(*right));
179 
180 	return 1;
181 }
182 
183 static struct user_mem_map *
find_user_mem_map(struct user_mem_maps * user_mem_maps,uint64_t addr,uint64_t iova,uint64_t len)184 find_user_mem_map(struct user_mem_maps *user_mem_maps, uint64_t addr,
185 		uint64_t iova, uint64_t len)
186 {
187 	uint64_t va_end = addr + len;
188 	uint64_t iova_end = iova + len;
189 	int i;
190 
191 	for (i = 0; i < user_mem_maps->n_maps; i++) {
192 		struct user_mem_map *map = &user_mem_maps->maps[i];
193 		uint64_t map_va_end = map->addr + map->len;
194 		uint64_t map_iova_end = map->iova + map->len;
195 
196 		/* check start VA */
197 		if (addr < map->addr || addr >= map_va_end)
198 			continue;
199 		/* check if VA end is within boundaries */
200 		if (va_end <= map->addr || va_end > map_va_end)
201 			continue;
202 
203 		/* check start IOVA */
204 		if (iova < map->iova || iova >= map_iova_end)
205 			continue;
206 		/* check if IOVA end is within boundaries */
207 		if (iova_end <= map->iova || iova_end > map_iova_end)
208 			continue;
209 
210 		/* we've found our map */
211 		return map;
212 	}
213 	return NULL;
214 }
215 
216 /* this will sort all user maps, and merge/compact any adjacent maps */
217 static void
compact_user_maps(struct user_mem_maps * user_mem_maps)218 compact_user_maps(struct user_mem_maps *user_mem_maps)
219 {
220 	int i, n_merged, cur_idx;
221 
222 	qsort(user_mem_maps->maps, user_mem_maps->n_maps,
223 			sizeof(user_mem_maps->maps[0]), user_mem_map_cmp);
224 
225 	/* we'll go over the list backwards when merging */
226 	n_merged = 0;
227 	for (i = user_mem_maps->n_maps - 2; i >= 0; i--) {
228 		struct user_mem_map *l, *r;
229 
230 		l = &user_mem_maps->maps[i];
231 		r = &user_mem_maps->maps[i + 1];
232 
233 		if (is_null_map(l) || is_null_map(r))
234 			continue;
235 
236 		if (merge_map(l, r))
237 			n_merged++;
238 	}
239 
240 	/* the entries are still sorted, but now they have holes in them, so
241 	 * walk through the list and remove the holes
242 	 */
243 	if (n_merged > 0) {
244 		cur_idx = 0;
245 		for (i = 0; i < user_mem_maps->n_maps; i++) {
246 			if (!is_null_map(&user_mem_maps->maps[i])) {
247 				struct user_mem_map *src, *dst;
248 
249 				src = &user_mem_maps->maps[i];
250 				dst = &user_mem_maps->maps[cur_idx++];
251 
252 				if (src != dst) {
253 					memcpy(dst, src, sizeof(*src));
254 					memset(src, 0, sizeof(*src));
255 				}
256 			}
257 		}
258 		user_mem_maps->n_maps = cur_idx;
259 	}
260 }
261 
262 static int
vfio_open_group_fd(int iommu_group_num)263 vfio_open_group_fd(int iommu_group_num)
264 {
265 	int vfio_group_fd;
266 	char filename[PATH_MAX];
267 	struct rte_mp_msg mp_req, *mp_rep;
268 	struct rte_mp_reply mp_reply = {0};
269 	struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
270 	struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
271 	const struct internal_config *internal_conf =
272 		eal_get_internal_configuration();
273 
274 	/* if primary, try to open the group */
275 	if (internal_conf->process_type == RTE_PROC_PRIMARY) {
276 		/* try regular group format */
277 		snprintf(filename, sizeof(filename),
278 				 VFIO_GROUP_FMT, iommu_group_num);
279 		vfio_group_fd = open(filename, O_RDWR);
280 		if (vfio_group_fd < 0) {
281 			/* if file not found, it's not an error */
282 			if (errno != ENOENT) {
283 				RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
284 						strerror(errno));
285 				return -1;
286 			}
287 
288 			/* special case: try no-IOMMU path as well */
289 			snprintf(filename, sizeof(filename),
290 					VFIO_NOIOMMU_GROUP_FMT,
291 					iommu_group_num);
292 			vfio_group_fd = open(filename, O_RDWR);
293 			if (vfio_group_fd < 0) {
294 				if (errno != ENOENT) {
295 					RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
296 							strerror(errno));
297 					return -1;
298 				}
299 				return -ENOENT;
300 			}
301 			/* noiommu group found */
302 		}
303 
304 		return vfio_group_fd;
305 	}
306 	/* if we're in a secondary process, request group fd from the primary
307 	 * process via mp channel.
308 	 */
309 	p->req = SOCKET_REQ_GROUP;
310 	p->group_num = iommu_group_num;
311 	strcpy(mp_req.name, EAL_VFIO_MP);
312 	mp_req.len_param = sizeof(*p);
313 	mp_req.num_fds = 0;
314 
315 	vfio_group_fd = -1;
316 	if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
317 	    mp_reply.nb_received == 1) {
318 		mp_rep = &mp_reply.msgs[0];
319 		p = (struct vfio_mp_param *)mp_rep->param;
320 		if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
321 			vfio_group_fd = mp_rep->fds[0];
322 		} else if (p->result == SOCKET_NO_FD) {
323 			RTE_LOG(ERR, EAL, "  bad VFIO group fd\n");
324 			vfio_group_fd = -ENOENT;
325 		}
326 	}
327 
328 	free(mp_reply.msgs);
329 	if (vfio_group_fd < 0 && vfio_group_fd != -ENOENT)
330 		RTE_LOG(ERR, EAL, "  cannot request group fd\n");
331 	return vfio_group_fd;
332 }
333 
334 static struct vfio_config *
get_vfio_cfg_by_group_num(int iommu_group_num)335 get_vfio_cfg_by_group_num(int iommu_group_num)
336 {
337 	struct vfio_config *vfio_cfg;
338 	int i, j;
339 
340 	for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
341 		vfio_cfg = &vfio_cfgs[i];
342 		for (j = 0; j < VFIO_MAX_GROUPS; j++) {
343 			if (vfio_cfg->vfio_groups[j].group_num ==
344 					iommu_group_num)
345 				return vfio_cfg;
346 		}
347 	}
348 
349 	return NULL;
350 }
351 
352 static int
vfio_get_group_fd(struct vfio_config * vfio_cfg,int iommu_group_num)353 vfio_get_group_fd(struct vfio_config *vfio_cfg,
354 		int iommu_group_num)
355 {
356 	int i;
357 	int vfio_group_fd;
358 	struct vfio_group *cur_grp;
359 
360 	/* check if we already have the group descriptor open */
361 	for (i = 0; i < VFIO_MAX_GROUPS; i++)
362 		if (vfio_cfg->vfio_groups[i].group_num == iommu_group_num)
363 			return vfio_cfg->vfio_groups[i].fd;
364 
365 	/* Lets see first if there is room for a new group */
366 	if (vfio_cfg->vfio_active_groups == VFIO_MAX_GROUPS) {
367 		RTE_LOG(ERR, EAL, "Maximum number of VFIO groups reached!\n");
368 		return -1;
369 	}
370 
371 	/* Now lets get an index for the new group */
372 	for (i = 0; i < VFIO_MAX_GROUPS; i++)
373 		if (vfio_cfg->vfio_groups[i].group_num == -1) {
374 			cur_grp = &vfio_cfg->vfio_groups[i];
375 			break;
376 		}
377 
378 	/* This should not happen */
379 	if (i == VFIO_MAX_GROUPS) {
380 		RTE_LOG(ERR, EAL, "No VFIO group free slot found\n");
381 		return -1;
382 	}
383 
384 	vfio_group_fd = vfio_open_group_fd(iommu_group_num);
385 	if (vfio_group_fd < 0) {
386 		RTE_LOG(ERR, EAL, "Failed to open group %d\n", iommu_group_num);
387 		return vfio_group_fd;
388 	}
389 
390 	cur_grp->group_num = iommu_group_num;
391 	cur_grp->fd = vfio_group_fd;
392 	vfio_cfg->vfio_active_groups++;
393 
394 	return vfio_group_fd;
395 }
396 
397 static struct vfio_config *
get_vfio_cfg_by_group_fd(int vfio_group_fd)398 get_vfio_cfg_by_group_fd(int vfio_group_fd)
399 {
400 	struct vfio_config *vfio_cfg;
401 	int i, j;
402 
403 	for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
404 		vfio_cfg = &vfio_cfgs[i];
405 		for (j = 0; j < VFIO_MAX_GROUPS; j++)
406 			if (vfio_cfg->vfio_groups[j].fd == vfio_group_fd)
407 				return vfio_cfg;
408 	}
409 
410 	return NULL;
411 }
412 
413 static struct vfio_config *
get_vfio_cfg_by_container_fd(int container_fd)414 get_vfio_cfg_by_container_fd(int container_fd)
415 {
416 	int i;
417 
418 	if (container_fd == RTE_VFIO_DEFAULT_CONTAINER_FD)
419 		return default_vfio_cfg;
420 
421 	for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
422 		if (vfio_cfgs[i].vfio_container_fd == container_fd)
423 			return &vfio_cfgs[i];
424 	}
425 
426 	return NULL;
427 }
428 
429 int
rte_vfio_get_group_fd(int iommu_group_num)430 rte_vfio_get_group_fd(int iommu_group_num)
431 {
432 	struct vfio_config *vfio_cfg;
433 
434 	/* get the vfio_config it belongs to */
435 	vfio_cfg = get_vfio_cfg_by_group_num(iommu_group_num);
436 	vfio_cfg = vfio_cfg ? vfio_cfg : default_vfio_cfg;
437 
438 	return vfio_get_group_fd(vfio_cfg, iommu_group_num);
439 }
440 
441 static int
get_vfio_group_idx(int vfio_group_fd)442 get_vfio_group_idx(int vfio_group_fd)
443 {
444 	struct vfio_config *vfio_cfg;
445 	int i, j;
446 
447 	for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
448 		vfio_cfg = &vfio_cfgs[i];
449 		for (j = 0; j < VFIO_MAX_GROUPS; j++)
450 			if (vfio_cfg->vfio_groups[j].fd == vfio_group_fd)
451 				return j;
452 	}
453 
454 	return -1;
455 }
456 
457 static void
vfio_group_device_get(int vfio_group_fd)458 vfio_group_device_get(int vfio_group_fd)
459 {
460 	struct vfio_config *vfio_cfg;
461 	int i;
462 
463 	vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
464 	if (vfio_cfg == NULL) {
465 		RTE_LOG(ERR, EAL, "  invalid group fd!\n");
466 		return;
467 	}
468 
469 	i = get_vfio_group_idx(vfio_group_fd);
470 	if (i < 0 || i > (VFIO_MAX_GROUPS - 1))
471 		RTE_LOG(ERR, EAL, "  wrong vfio_group index (%d)\n", i);
472 	else
473 		vfio_cfg->vfio_groups[i].devices++;
474 }
475 
476 static void
vfio_group_device_put(int vfio_group_fd)477 vfio_group_device_put(int vfio_group_fd)
478 {
479 	struct vfio_config *vfio_cfg;
480 	int i;
481 
482 	vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
483 	if (vfio_cfg == NULL) {
484 		RTE_LOG(ERR, EAL, "  invalid group fd!\n");
485 		return;
486 	}
487 
488 	i = get_vfio_group_idx(vfio_group_fd);
489 	if (i < 0 || i > (VFIO_MAX_GROUPS - 1))
490 		RTE_LOG(ERR, EAL, "  wrong vfio_group index (%d)\n", i);
491 	else
492 		vfio_cfg->vfio_groups[i].devices--;
493 }
494 
495 static int
vfio_group_device_count(int vfio_group_fd)496 vfio_group_device_count(int vfio_group_fd)
497 {
498 	struct vfio_config *vfio_cfg;
499 	int i;
500 
501 	vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
502 	if (vfio_cfg == NULL) {
503 		RTE_LOG(ERR, EAL, "  invalid group fd!\n");
504 		return -1;
505 	}
506 
507 	i = get_vfio_group_idx(vfio_group_fd);
508 	if (i < 0 || i > (VFIO_MAX_GROUPS - 1)) {
509 		RTE_LOG(ERR, EAL, "  wrong vfio_group index (%d)\n", i);
510 		return -1;
511 	}
512 
513 	return vfio_cfg->vfio_groups[i].devices;
514 }
515 
516 static void
vfio_mem_event_callback(enum rte_mem_event type,const void * addr,size_t len,void * arg __rte_unused)517 vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len,
518 		void *arg __rte_unused)
519 {
520 	rte_iova_t iova_start, iova_expected;
521 	struct rte_memseg_list *msl;
522 	struct rte_memseg *ms;
523 	size_t cur_len = 0;
524 	uint64_t va_start;
525 
526 	msl = rte_mem_virt2memseg_list(addr);
527 
528 	/* for IOVA as VA mode, no need to care for IOVA addresses */
529 	if (rte_eal_iova_mode() == RTE_IOVA_VA && msl->external == 0) {
530 		uint64_t vfio_va = (uint64_t)(uintptr_t)addr;
531 		if (type == RTE_MEM_EVENT_ALLOC)
532 			vfio_dma_mem_map(default_vfio_cfg, vfio_va, vfio_va,
533 					len, 1);
534 		else
535 			vfio_dma_mem_map(default_vfio_cfg, vfio_va, vfio_va,
536 					len, 0);
537 		return;
538 	}
539 
540 	/* memsegs are contiguous in memory */
541 	ms = rte_mem_virt2memseg(addr, msl);
542 
543 	/*
544 	 * This memory is not guaranteed to be contiguous, but it still could
545 	 * be, or it could have some small contiguous chunks. Since the number
546 	 * of VFIO mappings is limited, and VFIO appears to not concatenate
547 	 * adjacent mappings, we have to do this ourselves.
548 	 *
549 	 * So, find contiguous chunks, then map them.
550 	 */
551 	va_start = ms->addr_64;
552 	iova_start = iova_expected = ms->iova;
553 	while (cur_len < len) {
554 		bool new_contig_area = ms->iova != iova_expected;
555 		bool last_seg = (len - cur_len) == ms->len;
556 		bool skip_last = false;
557 
558 		/* only do mappings when current contiguous area ends */
559 		if (new_contig_area) {
560 			if (type == RTE_MEM_EVENT_ALLOC)
561 				vfio_dma_mem_map(default_vfio_cfg, va_start,
562 						iova_start,
563 						iova_expected - iova_start, 1);
564 			else
565 				vfio_dma_mem_map(default_vfio_cfg, va_start,
566 						iova_start,
567 						iova_expected - iova_start, 0);
568 			va_start = ms->addr_64;
569 			iova_start = ms->iova;
570 		}
571 		/* some memory segments may have invalid IOVA */
572 		if (ms->iova == RTE_BAD_IOVA) {
573 			RTE_LOG(DEBUG, EAL, "Memory segment at %p has bad IOVA, skipping\n",
574 					ms->addr);
575 			skip_last = true;
576 		}
577 		iova_expected = ms->iova + ms->len;
578 		cur_len += ms->len;
579 		++ms;
580 
581 		/*
582 		 * don't count previous segment, and don't attempt to
583 		 * dereference a potentially invalid pointer.
584 		 */
585 		if (skip_last && !last_seg) {
586 			iova_expected = iova_start = ms->iova;
587 			va_start = ms->addr_64;
588 		} else if (!skip_last && last_seg) {
589 			/* this is the last segment and we're not skipping */
590 			if (type == RTE_MEM_EVENT_ALLOC)
591 				vfio_dma_mem_map(default_vfio_cfg, va_start,
592 						iova_start,
593 						iova_expected - iova_start, 1);
594 			else
595 				vfio_dma_mem_map(default_vfio_cfg, va_start,
596 						iova_start,
597 						iova_expected - iova_start, 0);
598 		}
599 	}
600 }
601 
602 static int
vfio_sync_default_container(void)603 vfio_sync_default_container(void)
604 {
605 	struct rte_mp_msg mp_req, *mp_rep;
606 	struct rte_mp_reply mp_reply = {0};
607 	struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
608 	struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
609 	int iommu_type_id;
610 	unsigned int i;
611 
612 	/* cannot be called from primary */
613 	if (rte_eal_process_type() != RTE_PROC_SECONDARY)
614 		return -1;
615 
616 	/* default container fd should have been opened in rte_vfio_enable() */
617 	if (!default_vfio_cfg->vfio_enabled ||
618 			default_vfio_cfg->vfio_container_fd < 0) {
619 		RTE_LOG(ERR, EAL, "VFIO support is not initialized\n");
620 		return -1;
621 	}
622 
623 	/* find default container's IOMMU type */
624 	p->req = SOCKET_REQ_IOMMU_TYPE;
625 	strcpy(mp_req.name, EAL_VFIO_MP);
626 	mp_req.len_param = sizeof(*p);
627 	mp_req.num_fds = 0;
628 
629 	iommu_type_id = -1;
630 	if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
631 			mp_reply.nb_received == 1) {
632 		mp_rep = &mp_reply.msgs[0];
633 		p = (struct vfio_mp_param *)mp_rep->param;
634 		if (p->result == SOCKET_OK)
635 			iommu_type_id = p->iommu_type_id;
636 	}
637 	free(mp_reply.msgs);
638 	if (iommu_type_id < 0) {
639 		RTE_LOG(ERR, EAL, "Could not get IOMMU type for default container\n");
640 		return -1;
641 	}
642 
643 	/* we now have an fd for default container, as well as its IOMMU type.
644 	 * now, set up default VFIO container config to match.
645 	 */
646 	for (i = 0; i < RTE_DIM(iommu_types); i++) {
647 		const struct vfio_iommu_type *t = &iommu_types[i];
648 		if (t->type_id != iommu_type_id)
649 			continue;
650 
651 		/* we found our IOMMU type */
652 		default_vfio_cfg->vfio_iommu_type = t;
653 
654 		return 0;
655 	}
656 	RTE_LOG(ERR, EAL, "Could not find IOMMU type id (%i)\n",
657 			iommu_type_id);
658 	return -1;
659 }
660 
661 int
rte_vfio_clear_group(int vfio_group_fd)662 rte_vfio_clear_group(int vfio_group_fd)
663 {
664 	int i;
665 	struct vfio_config *vfio_cfg;
666 
667 	vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
668 	if (vfio_cfg == NULL) {
669 		RTE_LOG(ERR, EAL, "  invalid group fd!\n");
670 		return -1;
671 	}
672 
673 	i = get_vfio_group_idx(vfio_group_fd);
674 	if (i < 0)
675 		return -1;
676 	vfio_cfg->vfio_groups[i].group_num = -1;
677 	vfio_cfg->vfio_groups[i].fd = -1;
678 	vfio_cfg->vfio_groups[i].devices = 0;
679 	vfio_cfg->vfio_active_groups--;
680 
681 	return 0;
682 }
683 
684 int
rte_vfio_setup_device(const char * sysfs_base,const char * dev_addr,int * vfio_dev_fd,struct vfio_device_info * device_info)685 rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
686 		int *vfio_dev_fd, struct vfio_device_info *device_info)
687 {
688 	struct vfio_group_status group_status = {
689 			.argsz = sizeof(group_status)
690 	};
691 	struct vfio_config *vfio_cfg;
692 	struct user_mem_maps *user_mem_maps;
693 	int vfio_container_fd;
694 	int vfio_group_fd;
695 	int iommu_group_num;
696 	rte_uuid_t vf_token;
697 	int i, ret;
698 	const struct internal_config *internal_conf =
699 		eal_get_internal_configuration();
700 
701 	/* get group number */
702 	ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_num);
703 	if (ret == 0) {
704 		RTE_LOG(WARNING, EAL, "  %s not managed by VFIO driver, skipping\n",
705 			dev_addr);
706 		return 1;
707 	}
708 
709 	/* if negative, something failed */
710 	if (ret < 0)
711 		return -1;
712 
713 	/* get the actual group fd */
714 	vfio_group_fd = rte_vfio_get_group_fd(iommu_group_num);
715 	if (vfio_group_fd < 0 && vfio_group_fd != -ENOENT)
716 		return -1;
717 
718 	/*
719 	 * if vfio_group_fd == -ENOENT, that means the device
720 	 * isn't managed by VFIO
721 	 */
722 	if (vfio_group_fd == -ENOENT) {
723 		RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
724 				dev_addr);
725 		return 1;
726 	}
727 
728 	/*
729 	 * at this point, we know that this group is viable (meaning, all devices
730 	 * are either bound to VFIO or not bound to anything)
731 	 */
732 
733 	/* check if the group is viable */
734 	ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status);
735 	if (ret) {
736 		RTE_LOG(ERR, EAL, "  %s cannot get group status, "
737 				"error %i (%s)\n", dev_addr, errno, strerror(errno));
738 		close(vfio_group_fd);
739 		rte_vfio_clear_group(vfio_group_fd);
740 		return -1;
741 	} else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
742 		RTE_LOG(ERR, EAL, "  %s VFIO group is not viable! "
743 				"Not all devices in IOMMU group bound to VFIO or unbound\n",
744 				dev_addr);
745 		close(vfio_group_fd);
746 		rte_vfio_clear_group(vfio_group_fd);
747 		return -1;
748 	}
749 
750 	/* get the vfio_config it belongs to */
751 	vfio_cfg = get_vfio_cfg_by_group_num(iommu_group_num);
752 	vfio_cfg = vfio_cfg ? vfio_cfg : default_vfio_cfg;
753 	vfio_container_fd = vfio_cfg->vfio_container_fd;
754 	user_mem_maps = &vfio_cfg->mem_maps;
755 
756 	/* check if group does not have a container yet */
757 	if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
758 
759 		/* add group to a container */
760 		ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
761 				&vfio_container_fd);
762 		if (ret) {
763 			RTE_LOG(ERR, EAL, "  %s cannot add VFIO group to container, "
764 					"error %i (%s)\n", dev_addr, errno, strerror(errno));
765 			close(vfio_group_fd);
766 			rte_vfio_clear_group(vfio_group_fd);
767 			return -1;
768 		}
769 
770 		/*
771 		 * pick an IOMMU type and set up DMA mappings for container
772 		 *
773 		 * needs to be done only once, only when first group is
774 		 * assigned to a container and only in primary process.
775 		 * Note this can happen several times with the hotplug
776 		 * functionality.
777 		 */
778 		if (internal_conf->process_type == RTE_PROC_PRIMARY &&
779 				vfio_cfg->vfio_active_groups == 1 &&
780 				vfio_group_device_count(vfio_group_fd) == 0) {
781 			const struct vfio_iommu_type *t;
782 
783 			/* select an IOMMU type which we will be using */
784 			t = vfio_set_iommu_type(vfio_container_fd);
785 			if (!t) {
786 				RTE_LOG(ERR, EAL,
787 					"  %s failed to select IOMMU type\n",
788 					dev_addr);
789 				close(vfio_group_fd);
790 				rte_vfio_clear_group(vfio_group_fd);
791 				return -1;
792 			}
793 			/* lock memory hotplug before mapping and release it
794 			 * after registering callback, to prevent races
795 			 */
796 			rte_mcfg_mem_read_lock();
797 			if (vfio_cfg == default_vfio_cfg)
798 				ret = t->dma_map_func(vfio_container_fd);
799 			else
800 				ret = 0;
801 			if (ret) {
802 				RTE_LOG(ERR, EAL,
803 					"  %s DMA remapping failed, error %i (%s)\n",
804 					dev_addr, errno, strerror(errno));
805 				close(vfio_group_fd);
806 				rte_vfio_clear_group(vfio_group_fd);
807 				rte_mcfg_mem_read_unlock();
808 				return -1;
809 			}
810 
811 			vfio_cfg->vfio_iommu_type = t;
812 
813 			/* re-map all user-mapped segments */
814 			rte_spinlock_recursive_lock(&user_mem_maps->lock);
815 
816 			/* this IOMMU type may not support DMA mapping, but
817 			 * if we have mappings in the list - that means we have
818 			 * previously mapped something successfully, so we can
819 			 * be sure that DMA mapping is supported.
820 			 */
821 			for (i = 0; i < user_mem_maps->n_maps; i++) {
822 				struct user_mem_map *map;
823 				map = &user_mem_maps->maps[i];
824 
825 				ret = t->dma_user_map_func(
826 						vfio_container_fd,
827 						map->addr, map->iova, map->len,
828 						1);
829 				if (ret) {
830 					RTE_LOG(ERR, EAL, "Couldn't map user memory for DMA: "
831 							"va: 0x%" PRIx64 " "
832 							"iova: 0x%" PRIx64 " "
833 							"len: 0x%" PRIu64 "\n",
834 							map->addr, map->iova,
835 							map->len);
836 					rte_spinlock_recursive_unlock(
837 							&user_mem_maps->lock);
838 					rte_mcfg_mem_read_unlock();
839 					return -1;
840 				}
841 			}
842 			rte_spinlock_recursive_unlock(&user_mem_maps->lock);
843 
844 			/* register callback for mem events */
845 			if (vfio_cfg == default_vfio_cfg)
846 				ret = rte_mem_event_callback_register(
847 					VFIO_MEM_EVENT_CLB_NAME,
848 					vfio_mem_event_callback, NULL);
849 			else
850 				ret = 0;
851 			/* unlock memory hotplug */
852 			rte_mcfg_mem_read_unlock();
853 
854 			if (ret && rte_errno != ENOTSUP) {
855 				RTE_LOG(ERR, EAL, "Could not install memory event callback for VFIO\n");
856 				return -1;
857 			}
858 			if (ret)
859 				RTE_LOG(DEBUG, EAL, "Memory event callbacks not supported\n");
860 			else
861 				RTE_LOG(DEBUG, EAL, "Installed memory event callback for VFIO\n");
862 		}
863 	} else if (rte_eal_process_type() != RTE_PROC_PRIMARY &&
864 			vfio_cfg == default_vfio_cfg &&
865 			vfio_cfg->vfio_iommu_type == NULL) {
866 		/* if we're not a primary process, we do not set up the VFIO
867 		 * container because it's already been set up by the primary
868 		 * process. instead, we simply ask the primary about VFIO type
869 		 * we are using, and set the VFIO config up appropriately.
870 		 */
871 		ret = vfio_sync_default_container();
872 		if (ret < 0) {
873 			RTE_LOG(ERR, EAL, "Could not sync default VFIO container\n");
874 			close(vfio_group_fd);
875 			rte_vfio_clear_group(vfio_group_fd);
876 			return -1;
877 		}
878 		/* we have successfully initialized VFIO, notify user */
879 		const struct vfio_iommu_type *t =
880 				default_vfio_cfg->vfio_iommu_type;
881 		RTE_LOG(INFO, EAL, "  using IOMMU type %d (%s)\n",
882 				t->type_id, t->name);
883 	}
884 
885 	rte_eal_vfio_get_vf_token(vf_token);
886 
887 	/* get a file descriptor for the device with VF token firstly */
888 	if (!rte_uuid_is_null(vf_token)) {
889 		char vf_token_str[RTE_UUID_STRLEN];
890 		char dev[PATH_MAX];
891 
892 		rte_uuid_unparse(vf_token, vf_token_str, sizeof(vf_token_str));
893 		snprintf(dev, sizeof(dev),
894 			 "%s vf_token=%s", dev_addr, vf_token_str);
895 
896 		*vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD,
897 				     dev);
898 		if (*vfio_dev_fd >= 0)
899 			goto dev_get_info;
900 	}
901 
902 	/* get a file descriptor for the device */
903 	*vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, dev_addr);
904 	if (*vfio_dev_fd < 0) {
905 		/* if we cannot get a device fd, this implies a problem with
906 		 * the VFIO group or the container not having IOMMU configured.
907 		 */
908 
909 		RTE_LOG(WARNING, EAL, "Getting a vfio_dev_fd for %s failed\n",
910 				dev_addr);
911 		close(vfio_group_fd);
912 		rte_vfio_clear_group(vfio_group_fd);
913 		return -1;
914 	}
915 
916 	/* test and setup the device */
917 dev_get_info:
918 	ret = ioctl(*vfio_dev_fd, VFIO_DEVICE_GET_INFO, device_info);
919 	if (ret) {
920 		RTE_LOG(ERR, EAL, "  %s cannot get device info, "
921 				"error %i (%s)\n", dev_addr, errno,
922 				strerror(errno));
923 		close(*vfio_dev_fd);
924 		close(vfio_group_fd);
925 		rte_vfio_clear_group(vfio_group_fd);
926 		return -1;
927 	}
928 	vfio_group_device_get(vfio_group_fd);
929 
930 	return 0;
931 }
932 
933 int
rte_vfio_release_device(const char * sysfs_base,const char * dev_addr,int vfio_dev_fd)934 rte_vfio_release_device(const char *sysfs_base, const char *dev_addr,
935 		    int vfio_dev_fd)
936 {
937 	struct vfio_config *vfio_cfg;
938 	int vfio_group_fd;
939 	int iommu_group_num;
940 	int ret;
941 
942 	/* we don't want any DMA mapping messages to come while we're detaching
943 	 * VFIO device, because this might be the last device and we might need
944 	 * to unregister the callback.
945 	 */
946 	rte_mcfg_mem_read_lock();
947 
948 	/* get group number */
949 	ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_num);
950 	if (ret <= 0) {
951 		RTE_LOG(WARNING, EAL, "  %s not managed by VFIO driver\n",
952 			dev_addr);
953 		/* This is an error at this point. */
954 		ret = -1;
955 		goto out;
956 	}
957 
958 	/* get the actual group fd */
959 	vfio_group_fd = rte_vfio_get_group_fd(iommu_group_num);
960 	if (vfio_group_fd < 0) {
961 		RTE_LOG(INFO, EAL, "rte_vfio_get_group_fd failed for %s\n",
962 				   dev_addr);
963 		ret = vfio_group_fd;
964 		goto out;
965 	}
966 
967 	/* get the vfio_config it belongs to */
968 	vfio_cfg = get_vfio_cfg_by_group_num(iommu_group_num);
969 	vfio_cfg = vfio_cfg ? vfio_cfg : default_vfio_cfg;
970 
971 	/* At this point we got an active group. Closing it will make the
972 	 * container detachment. If this is the last active group, VFIO kernel
973 	 * code will unset the container and the IOMMU mappings.
974 	 */
975 
976 	/* Closing a device */
977 	if (close(vfio_dev_fd) < 0) {
978 		RTE_LOG(INFO, EAL, "Error when closing vfio_dev_fd for %s\n",
979 				   dev_addr);
980 		ret = -1;
981 		goto out;
982 	}
983 
984 	/* An VFIO group can have several devices attached. Just when there is
985 	 * no devices remaining should the group be closed.
986 	 */
987 	vfio_group_device_put(vfio_group_fd);
988 	if (!vfio_group_device_count(vfio_group_fd)) {
989 
990 		if (close(vfio_group_fd) < 0) {
991 			RTE_LOG(INFO, EAL, "Error when closing vfio_group_fd for %s\n",
992 				dev_addr);
993 			ret = -1;
994 			goto out;
995 		}
996 
997 		if (rte_vfio_clear_group(vfio_group_fd) < 0) {
998 			RTE_LOG(INFO, EAL, "Error when clearing group for %s\n",
999 					   dev_addr);
1000 			ret = -1;
1001 			goto out;
1002 		}
1003 	}
1004 
1005 	/* if there are no active device groups, unregister the callback to
1006 	 * avoid spurious attempts to map/unmap memory from VFIO.
1007 	 */
1008 	if (vfio_cfg == default_vfio_cfg && vfio_cfg->vfio_active_groups == 0 &&
1009 			rte_eal_process_type() != RTE_PROC_SECONDARY)
1010 		rte_mem_event_callback_unregister(VFIO_MEM_EVENT_CLB_NAME,
1011 				NULL);
1012 
1013 	/* success */
1014 	ret = 0;
1015 
1016 out:
1017 	rte_mcfg_mem_read_unlock();
1018 	return ret;
1019 }
1020 
1021 int
rte_vfio_enable(const char * modname)1022 rte_vfio_enable(const char *modname)
1023 {
1024 	/* initialize group list */
1025 	int i, j;
1026 	int vfio_available;
1027 	const struct internal_config *internal_conf =
1028 		eal_get_internal_configuration();
1029 
1030 	rte_spinlock_recursive_t lock = RTE_SPINLOCK_RECURSIVE_INITIALIZER;
1031 
1032 	for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
1033 		vfio_cfgs[i].vfio_container_fd = -1;
1034 		vfio_cfgs[i].vfio_active_groups = 0;
1035 		vfio_cfgs[i].vfio_iommu_type = NULL;
1036 		vfio_cfgs[i].mem_maps.lock = lock;
1037 
1038 		for (j = 0; j < VFIO_MAX_GROUPS; j++) {
1039 			vfio_cfgs[i].vfio_groups[j].fd = -1;
1040 			vfio_cfgs[i].vfio_groups[j].group_num = -1;
1041 			vfio_cfgs[i].vfio_groups[j].devices = 0;
1042 		}
1043 	}
1044 
1045 	/* inform the user that we are probing for VFIO */
1046 	RTE_LOG(INFO, EAL, "Probing VFIO support...\n");
1047 
1048 	/* check if vfio module is loaded */
1049 	vfio_available = rte_eal_check_module(modname);
1050 
1051 	/* return error directly */
1052 	if (vfio_available == -1) {
1053 		RTE_LOG(INFO, EAL, "Could not get loaded module details!\n");
1054 		return -1;
1055 	}
1056 
1057 	/* return 0 if VFIO modules not loaded */
1058 	if (vfio_available == 0) {
1059 		RTE_LOG(DEBUG, EAL, "VFIO modules not loaded, "
1060 			"skipping VFIO support...\n");
1061 		return 0;
1062 	}
1063 
1064 	if (internal_conf->process_type == RTE_PROC_PRIMARY) {
1065 		/* open a new container */
1066 		default_vfio_cfg->vfio_container_fd =
1067 				rte_vfio_get_container_fd();
1068 	} else {
1069 		/* get the default container from the primary process */
1070 		default_vfio_cfg->vfio_container_fd =
1071 				vfio_get_default_container_fd();
1072 	}
1073 
1074 	/* check if we have VFIO driver enabled */
1075 	if (default_vfio_cfg->vfio_container_fd != -1) {
1076 		RTE_LOG(INFO, EAL, "VFIO support initialized\n");
1077 		default_vfio_cfg->vfio_enabled = 1;
1078 	} else {
1079 		RTE_LOG(NOTICE, EAL, "VFIO support could not be initialized\n");
1080 	}
1081 
1082 	return 0;
1083 }
1084 
1085 int
rte_vfio_is_enabled(const char * modname)1086 rte_vfio_is_enabled(const char *modname)
1087 {
1088 	const int mod_available = rte_eal_check_module(modname) > 0;
1089 	return default_vfio_cfg->vfio_enabled && mod_available;
1090 }
1091 
1092 int
vfio_get_default_container_fd(void)1093 vfio_get_default_container_fd(void)
1094 {
1095 	struct rte_mp_msg mp_req, *mp_rep;
1096 	struct rte_mp_reply mp_reply = {0};
1097 	struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
1098 	struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
1099 	int container_fd;
1100 	const struct internal_config *internal_conf =
1101 		eal_get_internal_configuration();
1102 
1103 	if (default_vfio_cfg->vfio_enabled)
1104 		return default_vfio_cfg->vfio_container_fd;
1105 
1106 	if (internal_conf->process_type == RTE_PROC_PRIMARY) {
1107 		/* if we were secondary process we would try requesting
1108 		 * container fd from the primary, but we're the primary
1109 		 * process so just exit here
1110 		 */
1111 		return -1;
1112 	}
1113 
1114 	p->req = SOCKET_REQ_DEFAULT_CONTAINER;
1115 	strcpy(mp_req.name, EAL_VFIO_MP);
1116 	mp_req.len_param = sizeof(*p);
1117 	mp_req.num_fds = 0;
1118 
1119 	if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
1120 	    mp_reply.nb_received == 1) {
1121 		mp_rep = &mp_reply.msgs[0];
1122 		p = (struct vfio_mp_param *)mp_rep->param;
1123 		if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
1124 			container_fd = mp_rep->fds[0];
1125 			free(mp_reply.msgs);
1126 			return container_fd;
1127 		}
1128 	}
1129 
1130 	free(mp_reply.msgs);
1131 	RTE_LOG(ERR, EAL, "  cannot request default container fd\n");
1132 	return -1;
1133 }
1134 
1135 int
vfio_get_iommu_type(void)1136 vfio_get_iommu_type(void)
1137 {
1138 	if (default_vfio_cfg->vfio_iommu_type == NULL)
1139 		return -1;
1140 
1141 	return default_vfio_cfg->vfio_iommu_type->type_id;
1142 }
1143 
1144 const struct vfio_iommu_type *
vfio_set_iommu_type(int vfio_container_fd)1145 vfio_set_iommu_type(int vfio_container_fd)
1146 {
1147 	unsigned idx;
1148 	for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
1149 		const struct vfio_iommu_type *t = &iommu_types[idx];
1150 
1151 		int ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU,
1152 				t->type_id);
1153 		if (!ret) {
1154 			RTE_LOG(INFO, EAL, "  using IOMMU type %d (%s)\n",
1155 					t->type_id, t->name);
1156 			return t;
1157 		}
1158 		/* not an error, there may be more supported IOMMU types */
1159 		RTE_LOG(DEBUG, EAL, "  set IOMMU type %d (%s) failed, "
1160 				"error %i (%s)\n", t->type_id, t->name, errno,
1161 				strerror(errno));
1162 	}
1163 	/* if we didn't find a suitable IOMMU type, fail */
1164 	return NULL;
1165 }
1166 
1167 int
vfio_has_supported_extensions(int vfio_container_fd)1168 vfio_has_supported_extensions(int vfio_container_fd)
1169 {
1170 	int ret;
1171 	unsigned idx, n_extensions = 0;
1172 	for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
1173 		const struct vfio_iommu_type *t = &iommu_types[idx];
1174 
1175 		ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION,
1176 				t->type_id);
1177 		if (ret < 0) {
1178 			RTE_LOG(ERR, EAL, "  could not get IOMMU type, "
1179 				"error %i (%s)\n", errno,
1180 				strerror(errno));
1181 			close(vfio_container_fd);
1182 			return -1;
1183 		} else if (ret == 1) {
1184 			/* we found a supported extension */
1185 			n_extensions++;
1186 		}
1187 		RTE_LOG(DEBUG, EAL, "  IOMMU type %d (%s) is %s\n",
1188 				t->type_id, t->name,
1189 				ret ? "supported" : "not supported");
1190 	}
1191 
1192 	/* if we didn't find any supported IOMMU types, fail */
1193 	if (!n_extensions) {
1194 		close(vfio_container_fd);
1195 		return -1;
1196 	}
1197 
1198 	return 0;
1199 }
1200 
1201 int
rte_vfio_get_container_fd(void)1202 rte_vfio_get_container_fd(void)
1203 {
1204 	int ret, vfio_container_fd;
1205 	struct rte_mp_msg mp_req, *mp_rep;
1206 	struct rte_mp_reply mp_reply = {0};
1207 	struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
1208 	struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
1209 	const struct internal_config *internal_conf =
1210 		eal_get_internal_configuration();
1211 
1212 
1213 	/* if we're in a primary process, try to open the container */
1214 	if (internal_conf->process_type == RTE_PROC_PRIMARY) {
1215 		vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR);
1216 		if (vfio_container_fd < 0) {
1217 			RTE_LOG(ERR, EAL, "  cannot open VFIO container, "
1218 					"error %i (%s)\n", errno, strerror(errno));
1219 			return -1;
1220 		}
1221 
1222 		/* check VFIO API version */
1223 		ret = ioctl(vfio_container_fd, VFIO_GET_API_VERSION);
1224 		if (ret != VFIO_API_VERSION) {
1225 			if (ret < 0)
1226 				RTE_LOG(ERR, EAL, "  could not get VFIO API version, "
1227 						"error %i (%s)\n", errno, strerror(errno));
1228 			else
1229 				RTE_LOG(ERR, EAL, "  unsupported VFIO API version!\n");
1230 			close(vfio_container_fd);
1231 			return -1;
1232 		}
1233 
1234 		ret = vfio_has_supported_extensions(vfio_container_fd);
1235 		if (ret) {
1236 			RTE_LOG(ERR, EAL, "  no supported IOMMU "
1237 					"extensions found!\n");
1238 			return -1;
1239 		}
1240 
1241 		return vfio_container_fd;
1242 	}
1243 	/*
1244 	 * if we're in a secondary process, request container fd from the
1245 	 * primary process via mp channel
1246 	 */
1247 	p->req = SOCKET_REQ_CONTAINER;
1248 	strcpy(mp_req.name, EAL_VFIO_MP);
1249 	mp_req.len_param = sizeof(*p);
1250 	mp_req.num_fds = 0;
1251 
1252 	vfio_container_fd = -1;
1253 	if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
1254 	    mp_reply.nb_received == 1) {
1255 		mp_rep = &mp_reply.msgs[0];
1256 		p = (struct vfio_mp_param *)mp_rep->param;
1257 		if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
1258 			vfio_container_fd = mp_rep->fds[0];
1259 			free(mp_reply.msgs);
1260 			return vfio_container_fd;
1261 		}
1262 	}
1263 
1264 	free(mp_reply.msgs);
1265 	RTE_LOG(ERR, EAL, "  cannot request container fd\n");
1266 	return -1;
1267 }
1268 
1269 int
rte_vfio_get_group_num(const char * sysfs_base,const char * dev_addr,int * iommu_group_num)1270 rte_vfio_get_group_num(const char *sysfs_base,
1271 		const char *dev_addr, int *iommu_group_num)
1272 {
1273 	char linkname[PATH_MAX];
1274 	char filename[PATH_MAX];
1275 	char *tok[16], *group_tok, *end;
1276 	int ret;
1277 
1278 	memset(linkname, 0, sizeof(linkname));
1279 	memset(filename, 0, sizeof(filename));
1280 
1281 	/* try to find out IOMMU group for this device */
1282 	snprintf(linkname, sizeof(linkname),
1283 			 "%s/%s/iommu_group", sysfs_base, dev_addr);
1284 
1285 	ret = readlink(linkname, filename, sizeof(filename));
1286 
1287 	/* if the link doesn't exist, no VFIO for us */
1288 	if (ret < 0)
1289 		return 0;
1290 
1291 	ret = rte_strsplit(filename, sizeof(filename),
1292 			tok, RTE_DIM(tok), '/');
1293 
1294 	if (ret <= 0) {
1295 		RTE_LOG(ERR, EAL, "  %s cannot get IOMMU group\n", dev_addr);
1296 		return -1;
1297 	}
1298 
1299 	/* IOMMU group is always the last token */
1300 	errno = 0;
1301 	group_tok = tok[ret - 1];
1302 	end = group_tok;
1303 	*iommu_group_num = strtol(group_tok, &end, 10);
1304 	if ((end != group_tok && *end != '\0') || errno != 0) {
1305 		RTE_LOG(ERR, EAL, "  %s error parsing IOMMU number!\n", dev_addr);
1306 		return -1;
1307 	}
1308 
1309 	return 1;
1310 }
1311 
1312 static int
type1_map_contig(const struct rte_memseg_list * msl,const struct rte_memseg * ms,size_t len,void * arg)1313 type1_map_contig(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
1314 		size_t len, void *arg)
1315 {
1316 	int *vfio_container_fd = arg;
1317 
1318 	if (msl->external)
1319 		return 0;
1320 
1321 	return vfio_type1_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova,
1322 			len, 1);
1323 }
1324 
1325 static int
type1_map(const struct rte_memseg_list * msl,const struct rte_memseg * ms,void * arg)1326 type1_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
1327 		void *arg)
1328 {
1329 	int *vfio_container_fd = arg;
1330 
1331 	/* skip external memory that isn't a heap */
1332 	if (msl->external && !msl->heap)
1333 		return 0;
1334 
1335 	/* skip any segments with invalid IOVA addresses */
1336 	if (ms->iova == RTE_BAD_IOVA)
1337 		return 0;
1338 
1339 	/* if IOVA mode is VA, we've already mapped the internal segments */
1340 	if (!msl->external && rte_eal_iova_mode() == RTE_IOVA_VA)
1341 		return 0;
1342 
1343 	return vfio_type1_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova,
1344 			ms->len, 1);
1345 }
1346 
1347 static int
vfio_type1_dma_mem_map(int vfio_container_fd,uint64_t vaddr,uint64_t iova,uint64_t len,int do_map)1348 vfio_type1_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
1349 		uint64_t len, int do_map)
1350 {
1351 	struct vfio_iommu_type1_dma_map dma_map;
1352 	struct vfio_iommu_type1_dma_unmap dma_unmap;
1353 	int ret;
1354 
1355 	if (do_map != 0) {
1356 		memset(&dma_map, 0, sizeof(dma_map));
1357 		dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
1358 		dma_map.vaddr = vaddr;
1359 		dma_map.size = len;
1360 		dma_map.iova = iova;
1361 		dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
1362 				VFIO_DMA_MAP_FLAG_WRITE;
1363 
1364 		ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
1365 		if (ret) {
1366 			/**
1367 			 * In case the mapping was already done EEXIST will be
1368 			 * returned from kernel.
1369 			 */
1370 			if (errno == EEXIST) {
1371 				RTE_LOG(DEBUG, EAL,
1372 					" Memory segment is already mapped,"
1373 					" skipping");
1374 			} else {
1375 				RTE_LOG(ERR, EAL,
1376 					"  cannot set up DMA remapping,"
1377 					" error %i (%s)\n",
1378 					errno, strerror(errno));
1379 				return -1;
1380 			}
1381 		}
1382 	} else {
1383 		memset(&dma_unmap, 0, sizeof(dma_unmap));
1384 		dma_unmap.argsz = sizeof(struct vfio_iommu_type1_dma_unmap);
1385 		dma_unmap.size = len;
1386 		dma_unmap.iova = iova;
1387 
1388 		ret = ioctl(vfio_container_fd, VFIO_IOMMU_UNMAP_DMA,
1389 				&dma_unmap);
1390 		if (ret) {
1391 			RTE_LOG(ERR, EAL, "  cannot clear DMA remapping, error %i (%s)\n",
1392 					errno, strerror(errno));
1393 			return -1;
1394 		}
1395 	}
1396 
1397 	return 0;
1398 }
1399 
1400 static int
vfio_type1_dma_map(int vfio_container_fd)1401 vfio_type1_dma_map(int vfio_container_fd)
1402 {
1403 	if (rte_eal_iova_mode() == RTE_IOVA_VA) {
1404 		/* with IOVA as VA mode, we can get away with mapping contiguous
1405 		 * chunks rather than going page-by-page.
1406 		 */
1407 		int ret = rte_memseg_contig_walk(type1_map_contig,
1408 				&vfio_container_fd);
1409 		if (ret)
1410 			return ret;
1411 		/* we have to continue the walk because we've skipped the
1412 		 * external segments during the config walk.
1413 		 */
1414 	}
1415 	return rte_memseg_walk(type1_map, &vfio_container_fd);
1416 }
1417 
1418 /* Track the size of the statically allocated DMA window for SPAPR */
1419 uint64_t spapr_dma_win_len;
1420 uint64_t spapr_dma_win_page_sz;
1421 
1422 static int
vfio_spapr_dma_do_map(int vfio_container_fd,uint64_t vaddr,uint64_t iova,uint64_t len,int do_map)1423 vfio_spapr_dma_do_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
1424 		uint64_t len, int do_map)
1425 {
1426 	struct vfio_iommu_spapr_register_memory reg = {
1427 		.argsz = sizeof(reg),
1428 		.vaddr = (uintptr_t) vaddr,
1429 		.size = len,
1430 		.flags = 0
1431 	};
1432 	int ret;
1433 
1434 	if (do_map != 0) {
1435 		struct vfio_iommu_type1_dma_map dma_map;
1436 
1437 		if (iova + len > spapr_dma_win_len) {
1438 			RTE_LOG(ERR, EAL, "  dma map attempt outside DMA window\n");
1439 			return -1;
1440 		}
1441 
1442 		ret = ioctl(vfio_container_fd,
1443 				VFIO_IOMMU_SPAPR_REGISTER_MEMORY, &reg);
1444 		if (ret) {
1445 			RTE_LOG(ERR, EAL, "  cannot register vaddr for IOMMU, "
1446 				"error %i (%s)\n", errno, strerror(errno));
1447 			return -1;
1448 		}
1449 
1450 		memset(&dma_map, 0, sizeof(dma_map));
1451 		dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
1452 		dma_map.vaddr = vaddr;
1453 		dma_map.size = len;
1454 		dma_map.iova = iova;
1455 		dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
1456 				VFIO_DMA_MAP_FLAG_WRITE;
1457 
1458 		ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
1459 		if (ret) {
1460 			RTE_LOG(ERR, EAL, "  cannot map vaddr for IOMMU, error %i (%s)\n",
1461 				errno, strerror(errno));
1462 			return -1;
1463 		}
1464 
1465 	} else {
1466 		struct vfio_iommu_type1_dma_map dma_unmap;
1467 
1468 		memset(&dma_unmap, 0, sizeof(dma_unmap));
1469 		dma_unmap.argsz = sizeof(struct vfio_iommu_type1_dma_unmap);
1470 		dma_unmap.size = len;
1471 		dma_unmap.iova = iova;
1472 
1473 		ret = ioctl(vfio_container_fd, VFIO_IOMMU_UNMAP_DMA,
1474 				&dma_unmap);
1475 		if (ret) {
1476 			RTE_LOG(ERR, EAL, "  cannot unmap vaddr for IOMMU, error %i (%s)\n",
1477 				errno, strerror(errno));
1478 			return -1;
1479 		}
1480 
1481 		ret = ioctl(vfio_container_fd,
1482 				VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY, &reg);
1483 		if (ret) {
1484 			RTE_LOG(ERR, EAL, "  cannot unregister vaddr for IOMMU, error %i (%s)\n",
1485 				errno, strerror(errno));
1486 			return -1;
1487 		}
1488 	}
1489 
1490 	return ret;
1491 }
1492 
1493 static int
vfio_spapr_map_walk(const struct rte_memseg_list * msl,const struct rte_memseg * ms,void * arg)1494 vfio_spapr_map_walk(const struct rte_memseg_list *msl,
1495 		const struct rte_memseg *ms, void *arg)
1496 {
1497 	int *vfio_container_fd = arg;
1498 
1499 	/* skip external memory that isn't a heap */
1500 	if (msl->external && !msl->heap)
1501 		return 0;
1502 
1503 	/* skip any segments with invalid IOVA addresses */
1504 	if (ms->iova == RTE_BAD_IOVA)
1505 		return 0;
1506 
1507 	return vfio_spapr_dma_do_map(*vfio_container_fd,
1508 		ms->addr_64, ms->iova, ms->len, 1);
1509 }
1510 
1511 struct spapr_size_walk_param {
1512 	uint64_t max_va;
1513 	uint64_t page_sz;
1514 	bool is_user_managed;
1515 };
1516 
1517 /*
1518  * In order to set the DMA window size required for the SPAPR IOMMU
1519  * we need to walk the existing virtual memory allocations as well as
1520  * find the hugepage size used.
1521  */
1522 static int
vfio_spapr_size_walk(const struct rte_memseg_list * msl,void * arg)1523 vfio_spapr_size_walk(const struct rte_memseg_list *msl, void *arg)
1524 {
1525 	struct spapr_size_walk_param *param = arg;
1526 	uint64_t max = (uint64_t) msl->base_va + (uint64_t) msl->len;
1527 
1528 	if (msl->external && !msl->heap) {
1529 		/* ignore user managed external memory */
1530 		param->is_user_managed = true;
1531 		return 0;
1532 	}
1533 
1534 	if (max > param->max_va) {
1535 		param->page_sz = msl->page_sz;
1536 		param->max_va = max;
1537 	}
1538 
1539 	return 0;
1540 }
1541 
1542 /*
1543  * Find the highest memory address used in physical or virtual address
1544  * space and use that as the top of the DMA window.
1545  */
1546 static int
find_highest_mem_addr(struct spapr_size_walk_param * param)1547 find_highest_mem_addr(struct spapr_size_walk_param *param)
1548 {
1549 	/* find the maximum IOVA address for setting the DMA window size */
1550 	if (rte_eal_iova_mode() == RTE_IOVA_PA) {
1551 		static const char proc_iomem[] = "/proc/iomem";
1552 		static const char str_sysram[] = "System RAM";
1553 		uint64_t start, end, max = 0;
1554 		char *line = NULL;
1555 		char *dash, *space;
1556 		size_t line_len;
1557 
1558 		/*
1559 		 * Example "System RAM" in /proc/iomem:
1560 		 * 00000000-1fffffffff : System RAM
1561 		 * 200000000000-201fffffffff : System RAM
1562 		 */
1563 		FILE *fd = fopen(proc_iomem, "r");
1564 		if (fd == NULL) {
1565 			RTE_LOG(ERR, EAL, "Cannot open %s\n", proc_iomem);
1566 			return -1;
1567 		}
1568 		/* Scan /proc/iomem for the highest PA in the system */
1569 		while (getline(&line, &line_len, fd) != -1) {
1570 			if (strstr(line, str_sysram) == NULL)
1571 				continue;
1572 
1573 			space = strstr(line, " ");
1574 			dash = strstr(line, "-");
1575 
1576 			/* Validate the format of the memory string */
1577 			if (space == NULL || dash == NULL || space < dash) {
1578 				RTE_LOG(ERR, EAL, "Can't parse line \"%s\" in file %s\n",
1579 					line, proc_iomem);
1580 				continue;
1581 			}
1582 
1583 			start = strtoull(line, NULL, 16);
1584 			end   = strtoull(dash + 1, NULL, 16);
1585 			RTE_LOG(DEBUG, EAL, "Found system RAM from 0x%" PRIx64
1586 				" to 0x%" PRIx64 "\n", start, end);
1587 			if (end > max)
1588 				max = end;
1589 		}
1590 		free(line);
1591 		fclose(fd);
1592 
1593 		if (max == 0) {
1594 			RTE_LOG(ERR, EAL, "Failed to find valid \"System RAM\" "
1595 				"entry in file %s\n", proc_iomem);
1596 			return -1;
1597 		}
1598 
1599 		spapr_dma_win_len = rte_align64pow2(max + 1);
1600 		return 0;
1601 	} else if (rte_eal_iova_mode() == RTE_IOVA_VA) {
1602 		RTE_LOG(DEBUG, EAL, "Highest VA address in memseg list is 0x%"
1603 			PRIx64 "\n", param->max_va);
1604 		spapr_dma_win_len = rte_align64pow2(param->max_va);
1605 		return 0;
1606 	}
1607 
1608 	spapr_dma_win_len = 0;
1609 	RTE_LOG(ERR, EAL, "Unsupported IOVA mode\n");
1610 	return -1;
1611 }
1612 
1613 
1614 /*
1615  * The SPAPRv2 IOMMU supports 2 DMA windows with starting
1616  * address at 0 or 1<<59.  By default, a DMA window is set
1617  * at address 0, 2GB long, with a 4KB page.  For DPDK we
1618  * must remove the default window and setup a new DMA window
1619  * based on the hugepage size and memory requirements of
1620  * the application before we can map memory for DMA.
1621  */
1622 static int
spapr_dma_win_size(void)1623 spapr_dma_win_size(void)
1624 {
1625 	struct spapr_size_walk_param param;
1626 
1627 	/* only create DMA window once */
1628 	if (spapr_dma_win_len > 0)
1629 		return 0;
1630 
1631 	/* walk the memseg list to find the page size/max VA address */
1632 	memset(&param, 0, sizeof(param));
1633 	if (rte_memseg_list_walk(vfio_spapr_size_walk, &param) < 0) {
1634 		RTE_LOG(ERR, EAL, "Failed to walk memseg list for DMA window size\n");
1635 		return -1;
1636 	}
1637 
1638 	/* we can't be sure if DMA window covers external memory */
1639 	if (param.is_user_managed)
1640 		RTE_LOG(WARNING, EAL, "Detected user managed external memory which may not be managed by the IOMMU\n");
1641 
1642 	/* check physical/virtual memory size */
1643 	if (find_highest_mem_addr(&param) < 0)
1644 		return -1;
1645 	RTE_LOG(DEBUG, EAL, "Setting DMA window size to 0x%" PRIx64 "\n",
1646 		spapr_dma_win_len);
1647 	spapr_dma_win_page_sz = param.page_sz;
1648 	rte_mem_set_dma_mask(__builtin_ctzll(spapr_dma_win_len));
1649 	return 0;
1650 }
1651 
1652 static int
vfio_spapr_create_dma_window(int vfio_container_fd)1653 vfio_spapr_create_dma_window(int vfio_container_fd)
1654 {
1655 	struct vfio_iommu_spapr_tce_create create = {
1656 		.argsz = sizeof(create), };
1657 	struct vfio_iommu_spapr_tce_remove remove = {
1658 		.argsz = sizeof(remove), };
1659 	struct vfio_iommu_spapr_tce_info info = {
1660 		.argsz = sizeof(info), };
1661 	int ret;
1662 
1663 	ret = spapr_dma_win_size();
1664 	if (ret < 0)
1665 		return ret;
1666 
1667 	ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
1668 	if (ret) {
1669 		RTE_LOG(ERR, EAL, "  can't get iommu info, error %i (%s)\n",
1670 			errno, strerror(errno));
1671 		return -1;
1672 	}
1673 
1674 	/*
1675 	 * sPAPR v1/v2 IOMMU always has a default 1G DMA window set.  The window
1676 	 * can't be changed for v1 but it can be changed for v2. Since DPDK only
1677 	 * supports v2, remove the default DMA window so it can be resized.
1678 	 */
1679 	remove.start_addr = info.dma32_window_start;
1680 	ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);
1681 	if (ret)
1682 		return -1;
1683 
1684 	/* create a new DMA window (start address is not selectable) */
1685 	create.window_size = spapr_dma_win_len;
1686 	create.page_shift  = __builtin_ctzll(spapr_dma_win_page_sz);
1687 	create.levels = 1;
1688 	ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
1689 #ifdef VFIO_IOMMU_SPAPR_INFO_DDW
1690 	/*
1691 	 * The vfio_iommu_spapr_tce_info structure was modified in
1692 	 * Linux kernel 4.2.0 to add support for the
1693 	 * vfio_iommu_spapr_tce_ddw_info structure needed to try
1694 	 * multiple table levels.  Skip the attempt if running with
1695 	 * an older kernel.
1696 	 */
1697 	if (ret) {
1698 		/* if at first we don't succeed, try more levels */
1699 		uint32_t levels;
1700 
1701 		for (levels = create.levels + 1;
1702 			ret && levels <= info.ddw.levels; levels++) {
1703 			create.levels = levels;
1704 			ret = ioctl(vfio_container_fd,
1705 				VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
1706 		}
1707 	}
1708 #endif /* VFIO_IOMMU_SPAPR_INFO_DDW */
1709 	if (ret) {
1710 		RTE_LOG(ERR, EAL, "  cannot create new DMA window, error %i (%s)\n",
1711 			errno, strerror(errno));
1712 		RTE_LOG(ERR, EAL, "  consider using a larger hugepage size "
1713 			"if supported by the system\n");
1714 		return -1;
1715 	}
1716 
1717 	/* verify the start address  */
1718 	if (create.start_addr != 0) {
1719 		RTE_LOG(ERR, EAL, "  received unsupported start address 0x%"
1720 			PRIx64 "\n", (uint64_t)create.start_addr);
1721 		return -1;
1722 	}
1723 	return ret;
1724 }
1725 
1726 static int
vfio_spapr_dma_mem_map(int vfio_container_fd,uint64_t vaddr,uint64_t iova,uint64_t len,int do_map)1727 vfio_spapr_dma_mem_map(int vfio_container_fd, uint64_t vaddr,
1728 		uint64_t iova, uint64_t len, int do_map)
1729 {
1730 	int ret = 0;
1731 
1732 	if (do_map) {
1733 		if (vfio_spapr_dma_do_map(vfio_container_fd,
1734 			vaddr, iova, len, 1)) {
1735 			RTE_LOG(ERR, EAL, "Failed to map DMA\n");
1736 			ret = -1;
1737 		}
1738 	} else {
1739 		if (vfio_spapr_dma_do_map(vfio_container_fd,
1740 			vaddr, iova, len, 0)) {
1741 			RTE_LOG(ERR, EAL, "Failed to unmap DMA\n");
1742 			ret = -1;
1743 		}
1744 	}
1745 
1746 	return ret;
1747 }
1748 
1749 static int
vfio_spapr_dma_map(int vfio_container_fd)1750 vfio_spapr_dma_map(int vfio_container_fd)
1751 {
1752 	if (vfio_spapr_create_dma_window(vfio_container_fd) < 0) {
1753 		RTE_LOG(ERR, EAL, "Could not create new DMA window!\n");
1754 		return -1;
1755 	}
1756 
1757 	/* map all existing DPDK segments for DMA */
1758 	if (rte_memseg_walk(vfio_spapr_map_walk, &vfio_container_fd) < 0)
1759 		return -1;
1760 
1761 	return 0;
1762 }
1763 
1764 static int
vfio_noiommu_dma_map(int __rte_unused vfio_container_fd)1765 vfio_noiommu_dma_map(int __rte_unused vfio_container_fd)
1766 {
1767 	/* No-IOMMU mode does not need DMA mapping */
1768 	return 0;
1769 }
1770 
1771 static int
vfio_noiommu_dma_mem_map(int __rte_unused vfio_container_fd,uint64_t __rte_unused vaddr,uint64_t __rte_unused iova,uint64_t __rte_unused len,int __rte_unused do_map)1772 vfio_noiommu_dma_mem_map(int __rte_unused vfio_container_fd,
1773 			 uint64_t __rte_unused vaddr,
1774 			 uint64_t __rte_unused iova, uint64_t __rte_unused len,
1775 			 int __rte_unused do_map)
1776 {
1777 	/* No-IOMMU mode does not need DMA mapping */
1778 	return 0;
1779 }
1780 
1781 static int
vfio_dma_mem_map(struct vfio_config * vfio_cfg,uint64_t vaddr,uint64_t iova,uint64_t len,int do_map)1782 vfio_dma_mem_map(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
1783 		uint64_t len, int do_map)
1784 {
1785 	const struct vfio_iommu_type *t = vfio_cfg->vfio_iommu_type;
1786 
1787 	if (!t) {
1788 		RTE_LOG(ERR, EAL, "  VFIO support not initialized\n");
1789 		rte_errno = ENODEV;
1790 		return -1;
1791 	}
1792 
1793 	if (!t->dma_user_map_func) {
1794 		RTE_LOG(ERR, EAL,
1795 			"  VFIO custom DMA region maping not supported by IOMMU %s\n",
1796 			t->name);
1797 		rte_errno = ENOTSUP;
1798 		return -1;
1799 	}
1800 
1801 	return t->dma_user_map_func(vfio_cfg->vfio_container_fd, vaddr, iova,
1802 			len, do_map);
1803 }
1804 
1805 static int
container_dma_map(struct vfio_config * vfio_cfg,uint64_t vaddr,uint64_t iova,uint64_t len)1806 container_dma_map(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
1807 		uint64_t len)
1808 {
1809 	struct user_mem_map *new_map;
1810 	struct user_mem_maps *user_mem_maps;
1811 	int ret = 0;
1812 
1813 	user_mem_maps = &vfio_cfg->mem_maps;
1814 	rte_spinlock_recursive_lock(&user_mem_maps->lock);
1815 	if (user_mem_maps->n_maps == VFIO_MAX_USER_MEM_MAPS) {
1816 		RTE_LOG(ERR, EAL, "No more space for user mem maps\n");
1817 		rte_errno = ENOMEM;
1818 		ret = -1;
1819 		goto out;
1820 	}
1821 	/* map the entry */
1822 	if (vfio_dma_mem_map(vfio_cfg, vaddr, iova, len, 1)) {
1823 		/* technically, this will fail if there are currently no devices
1824 		 * plugged in, even if a device were added later, this mapping
1825 		 * might have succeeded. however, since we cannot verify if this
1826 		 * is a valid mapping without having a device attached, consider
1827 		 * this to be unsupported, because we can't just store any old
1828 		 * mapping and pollute list of active mappings willy-nilly.
1829 		 */
1830 		RTE_LOG(ERR, EAL, "Couldn't map new region for DMA\n");
1831 		ret = -1;
1832 		goto out;
1833 	}
1834 	/* create new user mem map entry */
1835 	new_map = &user_mem_maps->maps[user_mem_maps->n_maps++];
1836 	new_map->addr = vaddr;
1837 	new_map->iova = iova;
1838 	new_map->len = len;
1839 
1840 	compact_user_maps(user_mem_maps);
1841 out:
1842 	rte_spinlock_recursive_unlock(&user_mem_maps->lock);
1843 	return ret;
1844 }
1845 
1846 static int
container_dma_unmap(struct vfio_config * vfio_cfg,uint64_t vaddr,uint64_t iova,uint64_t len)1847 container_dma_unmap(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
1848 		uint64_t len)
1849 {
1850 	struct user_mem_map *map, *new_map = NULL;
1851 	struct user_mem_maps *user_mem_maps;
1852 	int ret = 0;
1853 
1854 	user_mem_maps = &vfio_cfg->mem_maps;
1855 	rte_spinlock_recursive_lock(&user_mem_maps->lock);
1856 
1857 	/* find our mapping */
1858 	map = find_user_mem_map(user_mem_maps, vaddr, iova, len);
1859 	if (!map) {
1860 		RTE_LOG(ERR, EAL, "Couldn't find previously mapped region\n");
1861 		rte_errno = EINVAL;
1862 		ret = -1;
1863 		goto out;
1864 	}
1865 	if (map->addr != vaddr || map->iova != iova || map->len != len) {
1866 		/* we're partially unmapping a previously mapped region, so we
1867 		 * need to split entry into two.
1868 		 */
1869 		if (user_mem_maps->n_maps == VFIO_MAX_USER_MEM_MAPS) {
1870 			RTE_LOG(ERR, EAL, "Not enough space to store partial mapping\n");
1871 			rte_errno = ENOMEM;
1872 			ret = -1;
1873 			goto out;
1874 		}
1875 		new_map = &user_mem_maps->maps[user_mem_maps->n_maps++];
1876 	}
1877 
1878 	/* unmap the entry */
1879 	if (vfio_dma_mem_map(vfio_cfg, vaddr, iova, len, 0)) {
1880 		/* there may not be any devices plugged in, so unmapping will
1881 		 * fail with ENODEV/ENOTSUP rte_errno values, but that doesn't
1882 		 * stop us from removing the mapping, as the assumption is we
1883 		 * won't be needing this memory any more and thus will want to
1884 		 * prevent it from being remapped again on hotplug. so, only
1885 		 * fail if we indeed failed to unmap (e.g. if the mapping was
1886 		 * within our mapped range but had invalid alignment).
1887 		 */
1888 		if (rte_errno != ENODEV && rte_errno != ENOTSUP) {
1889 			RTE_LOG(ERR, EAL, "Couldn't unmap region for DMA\n");
1890 			ret = -1;
1891 			goto out;
1892 		} else {
1893 			RTE_LOG(DEBUG, EAL, "DMA unmapping failed, but removing mappings anyway\n");
1894 		}
1895 	}
1896 	/* remove map from the list of active mappings */
1897 	if (new_map != NULL) {
1898 		adjust_map(map, new_map, vaddr, len);
1899 
1900 		/* if we've created a new map by splitting, sort everything */
1901 		if (!is_null_map(new_map)) {
1902 			compact_user_maps(user_mem_maps);
1903 		} else {
1904 			/* we've created a new mapping, but it was unused */
1905 			user_mem_maps->n_maps--;
1906 		}
1907 	} else {
1908 		memset(map, 0, sizeof(*map));
1909 		compact_user_maps(user_mem_maps);
1910 		user_mem_maps->n_maps--;
1911 	}
1912 
1913 out:
1914 	rte_spinlock_recursive_unlock(&user_mem_maps->lock);
1915 	return ret;
1916 }
1917 
1918 int
rte_vfio_noiommu_is_enabled(void)1919 rte_vfio_noiommu_is_enabled(void)
1920 {
1921 	int fd;
1922 	ssize_t cnt;
1923 	char c;
1924 
1925 	fd = open(VFIO_NOIOMMU_MODE, O_RDONLY);
1926 	if (fd < 0) {
1927 		if (errno != ENOENT) {
1928 			RTE_LOG(ERR, EAL, "  cannot open vfio noiommu file %i (%s)\n",
1929 					errno, strerror(errno));
1930 			return -1;
1931 		}
1932 		/*
1933 		 * else the file does not exists
1934 		 * i.e. noiommu is not enabled
1935 		 */
1936 		return 0;
1937 	}
1938 
1939 	cnt = read(fd, &c, 1);
1940 	close(fd);
1941 	if (cnt != 1) {
1942 		RTE_LOG(ERR, EAL, "  unable to read from vfio noiommu "
1943 				"file %i (%s)\n", errno, strerror(errno));
1944 		return -1;
1945 	}
1946 
1947 	return c == 'Y';
1948 }
1949 
1950 int
rte_vfio_container_create(void)1951 rte_vfio_container_create(void)
1952 {
1953 	int i;
1954 
1955 	/* Find an empty slot to store new vfio config */
1956 	for (i = 1; i < VFIO_MAX_CONTAINERS; i++) {
1957 		if (vfio_cfgs[i].vfio_container_fd == -1)
1958 			break;
1959 	}
1960 
1961 	if (i == VFIO_MAX_CONTAINERS) {
1962 		RTE_LOG(ERR, EAL, "exceed max vfio container limit\n");
1963 		return -1;
1964 	}
1965 
1966 	vfio_cfgs[i].vfio_container_fd = rte_vfio_get_container_fd();
1967 	if (vfio_cfgs[i].vfio_container_fd < 0) {
1968 		RTE_LOG(NOTICE, EAL, "fail to create a new container\n");
1969 		return -1;
1970 	}
1971 
1972 	return vfio_cfgs[i].vfio_container_fd;
1973 }
1974 
1975 int
rte_vfio_container_destroy(int container_fd)1976 rte_vfio_container_destroy(int container_fd)
1977 {
1978 	struct vfio_config *vfio_cfg;
1979 	int i;
1980 
1981 	vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
1982 	if (vfio_cfg == NULL) {
1983 		RTE_LOG(ERR, EAL, "Invalid container fd\n");
1984 		return -1;
1985 	}
1986 
1987 	for (i = 0; i < VFIO_MAX_GROUPS; i++)
1988 		if (vfio_cfg->vfio_groups[i].group_num != -1)
1989 			rte_vfio_container_group_unbind(container_fd,
1990 				vfio_cfg->vfio_groups[i].group_num);
1991 
1992 	close(container_fd);
1993 	vfio_cfg->vfio_container_fd = -1;
1994 	vfio_cfg->vfio_active_groups = 0;
1995 	vfio_cfg->vfio_iommu_type = NULL;
1996 
1997 	return 0;
1998 }
1999 
2000 int
rte_vfio_container_group_bind(int container_fd,int iommu_group_num)2001 rte_vfio_container_group_bind(int container_fd, int iommu_group_num)
2002 {
2003 	struct vfio_config *vfio_cfg;
2004 
2005 	vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
2006 	if (vfio_cfg == NULL) {
2007 		RTE_LOG(ERR, EAL, "Invalid container fd\n");
2008 		return -1;
2009 	}
2010 
2011 	return vfio_get_group_fd(vfio_cfg, iommu_group_num);
2012 }
2013 
2014 int
rte_vfio_container_group_unbind(int container_fd,int iommu_group_num)2015 rte_vfio_container_group_unbind(int container_fd, int iommu_group_num)
2016 {
2017 	struct vfio_config *vfio_cfg;
2018 	struct vfio_group *cur_grp = NULL;
2019 	int i;
2020 
2021 	vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
2022 	if (vfio_cfg == NULL) {
2023 		RTE_LOG(ERR, EAL, "Invalid container fd\n");
2024 		return -1;
2025 	}
2026 
2027 	for (i = 0; i < VFIO_MAX_GROUPS; i++) {
2028 		if (vfio_cfg->vfio_groups[i].group_num == iommu_group_num) {
2029 			cur_grp = &vfio_cfg->vfio_groups[i];
2030 			break;
2031 		}
2032 	}
2033 
2034 	/* This should not happen */
2035 	if (i == VFIO_MAX_GROUPS || cur_grp == NULL) {
2036 		RTE_LOG(ERR, EAL, "Specified group number not found\n");
2037 		return -1;
2038 	}
2039 
2040 	if (cur_grp->fd >= 0 && close(cur_grp->fd) < 0) {
2041 		RTE_LOG(ERR, EAL, "Error when closing vfio_group_fd for"
2042 			" iommu_group_num %d\n", iommu_group_num);
2043 		return -1;
2044 	}
2045 	cur_grp->group_num = -1;
2046 	cur_grp->fd = -1;
2047 	cur_grp->devices = 0;
2048 	vfio_cfg->vfio_active_groups--;
2049 
2050 	return 0;
2051 }
2052 
2053 int
rte_vfio_container_dma_map(int container_fd,uint64_t vaddr,uint64_t iova,uint64_t len)2054 rte_vfio_container_dma_map(int container_fd, uint64_t vaddr, uint64_t iova,
2055 		uint64_t len)
2056 {
2057 	struct vfio_config *vfio_cfg;
2058 
2059 	if (len == 0) {
2060 		rte_errno = EINVAL;
2061 		return -1;
2062 	}
2063 
2064 	vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
2065 	if (vfio_cfg == NULL) {
2066 		RTE_LOG(ERR, EAL, "Invalid container fd\n");
2067 		return -1;
2068 	}
2069 
2070 	return container_dma_map(vfio_cfg, vaddr, iova, len);
2071 }
2072 
2073 int
rte_vfio_container_dma_unmap(int container_fd,uint64_t vaddr,uint64_t iova,uint64_t len)2074 rte_vfio_container_dma_unmap(int container_fd, uint64_t vaddr, uint64_t iova,
2075 		uint64_t len)
2076 {
2077 	struct vfio_config *vfio_cfg;
2078 
2079 	if (len == 0) {
2080 		rte_errno = EINVAL;
2081 		return -1;
2082 	}
2083 
2084 	vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
2085 	if (vfio_cfg == NULL) {
2086 		RTE_LOG(ERR, EAL, "Invalid container fd\n");
2087 		return -1;
2088 	}
2089 
2090 	return container_dma_unmap(vfio_cfg, vaddr, iova, len);
2091 }
2092 
2093 #else
2094 
2095 int
rte_vfio_setup_device(__rte_unused const char * sysfs_base,__rte_unused const char * dev_addr,__rte_unused int * vfio_dev_fd,__rte_unused struct vfio_device_info * device_info)2096 rte_vfio_setup_device(__rte_unused const char *sysfs_base,
2097 		__rte_unused const char *dev_addr,
2098 		__rte_unused int *vfio_dev_fd,
2099 		__rte_unused struct vfio_device_info *device_info)
2100 {
2101 	return -1;
2102 }
2103 
2104 int
rte_vfio_release_device(__rte_unused const char * sysfs_base,__rte_unused const char * dev_addr,__rte_unused int fd)2105 rte_vfio_release_device(__rte_unused const char *sysfs_base,
2106 		__rte_unused const char *dev_addr, __rte_unused int fd)
2107 {
2108 	return -1;
2109 }
2110 
2111 int
rte_vfio_enable(__rte_unused const char * modname)2112 rte_vfio_enable(__rte_unused const char *modname)
2113 {
2114 	return -1;
2115 }
2116 
2117 int
rte_vfio_is_enabled(__rte_unused const char * modname)2118 rte_vfio_is_enabled(__rte_unused const char *modname)
2119 {
2120 	return -1;
2121 }
2122 
2123 int
rte_vfio_noiommu_is_enabled(void)2124 rte_vfio_noiommu_is_enabled(void)
2125 {
2126 	return -1;
2127 }
2128 
2129 int
rte_vfio_clear_group(__rte_unused int vfio_group_fd)2130 rte_vfio_clear_group(__rte_unused int vfio_group_fd)
2131 {
2132 	return -1;
2133 }
2134 
2135 int
rte_vfio_get_group_num(__rte_unused const char * sysfs_base,__rte_unused const char * dev_addr,__rte_unused int * iommu_group_num)2136 rte_vfio_get_group_num(__rte_unused const char *sysfs_base,
2137 		__rte_unused const char *dev_addr,
2138 		__rte_unused int *iommu_group_num)
2139 {
2140 	return -1;
2141 }
2142 
2143 int
rte_vfio_get_container_fd(void)2144 rte_vfio_get_container_fd(void)
2145 {
2146 	return -1;
2147 }
2148 
2149 int
rte_vfio_get_group_fd(__rte_unused int iommu_group_num)2150 rte_vfio_get_group_fd(__rte_unused int iommu_group_num)
2151 {
2152 	return -1;
2153 }
2154 
2155 int
rte_vfio_container_create(void)2156 rte_vfio_container_create(void)
2157 {
2158 	return -1;
2159 }
2160 
2161 int
rte_vfio_container_destroy(__rte_unused int container_fd)2162 rte_vfio_container_destroy(__rte_unused int container_fd)
2163 {
2164 	return -1;
2165 }
2166 
2167 int
rte_vfio_container_group_bind(__rte_unused int container_fd,__rte_unused int iommu_group_num)2168 rte_vfio_container_group_bind(__rte_unused int container_fd,
2169 		__rte_unused int iommu_group_num)
2170 {
2171 	return -1;
2172 }
2173 
2174 int
rte_vfio_container_group_unbind(__rte_unused int container_fd,__rte_unused int iommu_group_num)2175 rte_vfio_container_group_unbind(__rte_unused int container_fd,
2176 		__rte_unused int iommu_group_num)
2177 {
2178 	return -1;
2179 }
2180 
2181 int
rte_vfio_container_dma_map(__rte_unused int container_fd,__rte_unused uint64_t vaddr,__rte_unused uint64_t iova,__rte_unused uint64_t len)2182 rte_vfio_container_dma_map(__rte_unused int container_fd,
2183 		__rte_unused uint64_t vaddr,
2184 		__rte_unused uint64_t iova,
2185 		__rte_unused uint64_t len)
2186 {
2187 	return -1;
2188 }
2189 
2190 int
rte_vfio_container_dma_unmap(__rte_unused int container_fd,__rte_unused uint64_t vaddr,__rte_unused uint64_t iova,__rte_unused uint64_t len)2191 rte_vfio_container_dma_unmap(__rte_unused int container_fd,
2192 		__rte_unused uint64_t vaddr,
2193 		__rte_unused uint64_t iova,
2194 		__rte_unused uint64_t len)
2195 {
2196 	return -1;
2197 }
2198 
2199 #endif /* VFIO_PRESENT */
2200