xref: /spdk/lib/env_dpdk/memory.c (revision b30d57cdad6d2bc75cc1e4e2ebbcebcb0d98dcfa)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "env_internal.h"
37 
38 #include <rte_config.h>
39 #include <rte_memory.h>
40 #include <rte_eal_memconfig.h>
41 
42 #include "spdk_internal/assert.h"
43 
44 #include "spdk/assert.h"
45 #include "spdk/likely.h"
46 #include "spdk/queue.h"
47 #include "spdk/util.h"
48 #include "spdk/memory.h"
49 #include "spdk/env_dpdk.h"
50 #include "spdk/log.h"
51 
52 #ifndef __linux__
53 #define VFIO_ENABLED 0
54 #else
55 #include <linux/version.h>
56 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
57 #define VFIO_ENABLED 1
58 #include <linux/vfio.h>
59 #include <rte_vfio.h>
60 
61 struct spdk_vfio_dma_map {
62 	struct vfio_iommu_type1_dma_map map;
63 	TAILQ_ENTRY(spdk_vfio_dma_map) tailq;
64 };
65 
66 struct vfio_cfg {
67 	int fd;
68 	bool enabled;
69 	bool noiommu_enabled;
70 	unsigned device_ref;
71 	TAILQ_HEAD(, spdk_vfio_dma_map) maps;
72 	pthread_mutex_t mutex;
73 };
74 
75 static struct vfio_cfg g_vfio = {
76 	.fd = -1,
77 	.enabled = false,
78 	.noiommu_enabled = false,
79 	.device_ref = 0,
80 	.maps = TAILQ_HEAD_INITIALIZER(g_vfio.maps),
81 	.mutex = PTHREAD_MUTEX_INITIALIZER
82 };
83 
84 #else
85 #define VFIO_ENABLED 0
86 #endif
87 #endif
88 
89 #if DEBUG
90 #define DEBUG_PRINT(...) SPDK_ERRLOG(__VA_ARGS__)
91 #else
92 #define DEBUG_PRINT(...)
93 #endif
94 
95 #define FN_2MB_TO_4KB(fn)	(fn << (SHIFT_2MB - SHIFT_4KB))
96 #define FN_4KB_TO_2MB(fn)	(fn >> (SHIFT_2MB - SHIFT_4KB))
97 
98 #define MAP_256TB_IDX(vfn_2mb)	((vfn_2mb) >> (SHIFT_1GB - SHIFT_2MB))
99 #define MAP_1GB_IDX(vfn_2mb)	((vfn_2mb) & ((1ULL << (SHIFT_1GB - SHIFT_2MB)) - 1))
100 
101 /* Page is registered */
102 #define REG_MAP_REGISTERED	(1ULL << 62)
103 
104 /* A notification region barrier. The 2MB translation entry that's marked
105  * with this flag must be unregistered separately. This allows contiguous
106  * regions to be unregistered in the same chunks they were registered.
107  */
108 #define REG_MAP_NOTIFY_START	(1ULL << 63)
109 
110 /* Translation of a single 2MB page. */
111 struct map_2mb {
112 	uint64_t translation_2mb;
113 };
114 
115 /* Second-level map table indexed by bits [21..29] of the virtual address.
116  * Each entry contains the address translation or error for entries that haven't
117  * been retrieved yet.
118  */
119 struct map_1gb {
120 	struct map_2mb map[1ULL << (SHIFT_1GB - SHIFT_2MB)];
121 };
122 
123 /* Top-level map table indexed by bits [30..47] of the virtual address.
124  * Each entry points to a second-level map table or NULL.
125  */
126 struct map_256tb {
127 	struct map_1gb *map[1ULL << (SHIFT_256TB - SHIFT_1GB)];
128 };
129 
130 /* Page-granularity memory address translation */
131 struct spdk_mem_map {
132 	struct map_256tb map_256tb;
133 	pthread_mutex_t mutex;
134 	uint64_t default_translation;
135 	struct spdk_mem_map_ops ops;
136 	void *cb_ctx;
137 	TAILQ_ENTRY(spdk_mem_map) tailq;
138 };
139 
140 /* Registrations map. The 64 bit translations are bit fields with the
141  * following layout (starting with the low bits):
142  *    0 - 61 : reserved
143  *   62 - 63 : flags
144  */
145 static struct spdk_mem_map *g_mem_reg_map;
146 static TAILQ_HEAD(spdk_mem_map_head, spdk_mem_map) g_spdk_mem_maps =
147 	TAILQ_HEAD_INITIALIZER(g_spdk_mem_maps);
148 static pthread_mutex_t g_spdk_mem_map_mutex = PTHREAD_MUTEX_INITIALIZER;
149 
150 static bool g_legacy_mem;
151 
152 /*
153  * Walk the currently registered memory via the main memory registration map
154  * and call the new map's notify callback for each virtually contiguous region.
155  */
156 static int
157 mem_map_notify_walk(struct spdk_mem_map *map, enum spdk_mem_map_notify_action action)
158 {
159 	size_t idx_256tb;
160 	uint64_t idx_1gb;
161 	uint64_t contig_start = UINT64_MAX;
162 	uint64_t contig_end = UINT64_MAX;
163 	struct map_1gb *map_1gb;
164 	int rc;
165 
166 	if (!g_mem_reg_map) {
167 		return -EINVAL;
168 	}
169 
170 	/* Hold the memory registration map mutex so no new registrations can be added while we are looping. */
171 	pthread_mutex_lock(&g_mem_reg_map->mutex);
172 
173 	for (idx_256tb = 0;
174 	     idx_256tb < sizeof(g_mem_reg_map->map_256tb.map) / sizeof(g_mem_reg_map->map_256tb.map[0]);
175 	     idx_256tb++) {
176 		map_1gb = g_mem_reg_map->map_256tb.map[idx_256tb];
177 
178 		if (!map_1gb) {
179 			if (contig_start != UINT64_MAX) {
180 				/* End of of a virtually contiguous range */
181 				rc = map->ops.notify_cb(map->cb_ctx, map, action,
182 							(void *)contig_start,
183 							contig_end - contig_start + VALUE_2MB);
184 				/* Don't bother handling unregister failures. It can't be any worse */
185 				if (rc != 0 && action == SPDK_MEM_MAP_NOTIFY_REGISTER) {
186 					goto err_unregister;
187 				}
188 			}
189 			contig_start = UINT64_MAX;
190 			continue;
191 		}
192 
193 		for (idx_1gb = 0; idx_1gb < sizeof(map_1gb->map) / sizeof(map_1gb->map[0]); idx_1gb++) {
194 			if ((map_1gb->map[idx_1gb].translation_2mb & REG_MAP_REGISTERED) &&
195 			    (contig_start == UINT64_MAX ||
196 			     (map_1gb->map[idx_1gb].translation_2mb & REG_MAP_NOTIFY_START) == 0)) {
197 				/* Rebuild the virtual address from the indexes */
198 				uint64_t vaddr = (idx_256tb << SHIFT_1GB) | (idx_1gb << SHIFT_2MB);
199 
200 				if (contig_start == UINT64_MAX) {
201 					contig_start = vaddr;
202 				}
203 
204 				contig_end = vaddr;
205 			} else {
206 				if (contig_start != UINT64_MAX) {
207 					/* End of of a virtually contiguous range */
208 					rc = map->ops.notify_cb(map->cb_ctx, map, action,
209 								(void *)contig_start,
210 								contig_end - contig_start + VALUE_2MB);
211 					/* Don't bother handling unregister failures. It can't be any worse */
212 					if (rc != 0 && action == SPDK_MEM_MAP_NOTIFY_REGISTER) {
213 						goto err_unregister;
214 					}
215 
216 					/* This page might be a part of a neighbour region, so process
217 					 * it again. The idx_1gb will be incremented immediately.
218 					 */
219 					idx_1gb--;
220 				}
221 				contig_start = UINT64_MAX;
222 			}
223 		}
224 	}
225 
226 	pthread_mutex_unlock(&g_mem_reg_map->mutex);
227 	return 0;
228 
229 err_unregister:
230 	/* Unwind to the first empty translation so we don't unregister
231 	 * a region that just failed to register.
232 	 */
233 	idx_256tb = MAP_256TB_IDX((contig_start >> SHIFT_2MB) - 1);
234 	idx_1gb = MAP_1GB_IDX((contig_start >> SHIFT_2MB) - 1);
235 	contig_start = UINT64_MAX;
236 	contig_end = UINT64_MAX;
237 
238 	/* Unregister any memory we managed to register before the failure */
239 	for (; idx_256tb < SIZE_MAX; idx_256tb--) {
240 		map_1gb = g_mem_reg_map->map_256tb.map[idx_256tb];
241 
242 		if (!map_1gb) {
243 			if (contig_end != UINT64_MAX) {
244 				/* End of of a virtually contiguous range */
245 				map->ops.notify_cb(map->cb_ctx, map,
246 						   SPDK_MEM_MAP_NOTIFY_UNREGISTER,
247 						   (void *)contig_start,
248 						   contig_end - contig_start + VALUE_2MB);
249 			}
250 			contig_end = UINT64_MAX;
251 			continue;
252 		}
253 
254 		for (; idx_1gb < UINT64_MAX; idx_1gb--) {
255 			if ((map_1gb->map[idx_1gb].translation_2mb & REG_MAP_REGISTERED) &&
256 			    (contig_end == UINT64_MAX || (map_1gb->map[idx_1gb].translation_2mb & REG_MAP_NOTIFY_START) == 0)) {
257 				/* Rebuild the virtual address from the indexes */
258 				uint64_t vaddr = (idx_256tb << SHIFT_1GB) | (idx_1gb << SHIFT_2MB);
259 
260 				if (contig_end == UINT64_MAX) {
261 					contig_end = vaddr;
262 				}
263 				contig_start = vaddr;
264 			} else {
265 				if (contig_end != UINT64_MAX) {
266 					/* End of of a virtually contiguous range */
267 					map->ops.notify_cb(map->cb_ctx, map,
268 							   SPDK_MEM_MAP_NOTIFY_UNREGISTER,
269 							   (void *)contig_start,
270 							   contig_end - contig_start + VALUE_2MB);
271 					idx_1gb++;
272 				}
273 				contig_end = UINT64_MAX;
274 			}
275 		}
276 		idx_1gb = sizeof(map_1gb->map) / sizeof(map_1gb->map[0]) - 1;
277 	}
278 
279 	pthread_mutex_unlock(&g_mem_reg_map->mutex);
280 	return rc;
281 }
282 
283 struct spdk_mem_map *
284 spdk_mem_map_alloc(uint64_t default_translation, const struct spdk_mem_map_ops *ops, void *cb_ctx)
285 {
286 	struct spdk_mem_map *map;
287 	int rc;
288 
289 	map = calloc(1, sizeof(*map));
290 	if (map == NULL) {
291 		return NULL;
292 	}
293 
294 	if (pthread_mutex_init(&map->mutex, NULL)) {
295 		free(map);
296 		return NULL;
297 	}
298 
299 	map->default_translation = default_translation;
300 	map->cb_ctx = cb_ctx;
301 	if (ops) {
302 		map->ops = *ops;
303 	}
304 
305 	if (ops && ops->notify_cb) {
306 		pthread_mutex_lock(&g_spdk_mem_map_mutex);
307 		rc = mem_map_notify_walk(map, SPDK_MEM_MAP_NOTIFY_REGISTER);
308 		if (rc != 0) {
309 			pthread_mutex_unlock(&g_spdk_mem_map_mutex);
310 			DEBUG_PRINT("Initial mem_map notify failed\n");
311 			pthread_mutex_destroy(&map->mutex);
312 			free(map);
313 			return NULL;
314 		}
315 		TAILQ_INSERT_TAIL(&g_spdk_mem_maps, map, tailq);
316 		pthread_mutex_unlock(&g_spdk_mem_map_mutex);
317 	}
318 
319 	return map;
320 }
321 
322 void
323 spdk_mem_map_free(struct spdk_mem_map **pmap)
324 {
325 	struct spdk_mem_map *map;
326 	size_t i;
327 
328 	if (!pmap) {
329 		return;
330 	}
331 
332 	map = *pmap;
333 
334 	if (!map) {
335 		return;
336 	}
337 
338 	if (map->ops.notify_cb) {
339 		pthread_mutex_lock(&g_spdk_mem_map_mutex);
340 		mem_map_notify_walk(map, SPDK_MEM_MAP_NOTIFY_UNREGISTER);
341 		TAILQ_REMOVE(&g_spdk_mem_maps, map, tailq);
342 		pthread_mutex_unlock(&g_spdk_mem_map_mutex);
343 	}
344 
345 	for (i = 0; i < sizeof(map->map_256tb.map) / sizeof(map->map_256tb.map[0]); i++) {
346 		free(map->map_256tb.map[i]);
347 	}
348 
349 	pthread_mutex_destroy(&map->mutex);
350 
351 	free(map);
352 	*pmap = NULL;
353 }
354 
355 int
356 spdk_mem_register(void *vaddr, size_t len)
357 {
358 	struct spdk_mem_map *map;
359 	int rc;
360 	void *seg_vaddr;
361 	size_t seg_len;
362 	uint64_t reg;
363 
364 	if ((uintptr_t)vaddr & ~MASK_256TB) {
365 		DEBUG_PRINT("invalid usermode virtual address %p\n", vaddr);
366 		return -EINVAL;
367 	}
368 
369 	if (((uintptr_t)vaddr & MASK_2MB) || (len & MASK_2MB)) {
370 		DEBUG_PRINT("invalid %s parameters, vaddr=%p len=%ju\n",
371 			    __func__, vaddr, len);
372 		return -EINVAL;
373 	}
374 
375 	if (len == 0) {
376 		return 0;
377 	}
378 
379 	pthread_mutex_lock(&g_spdk_mem_map_mutex);
380 
381 	seg_vaddr = vaddr;
382 	seg_len = len;
383 	while (seg_len > 0) {
384 		reg = spdk_mem_map_translate(g_mem_reg_map, (uint64_t)seg_vaddr, NULL);
385 		if (reg & REG_MAP_REGISTERED) {
386 			pthread_mutex_unlock(&g_spdk_mem_map_mutex);
387 			return -EBUSY;
388 		}
389 		seg_vaddr += VALUE_2MB;
390 		seg_len -= VALUE_2MB;
391 	}
392 
393 	seg_vaddr = vaddr;
394 	seg_len = 0;
395 	while (len > 0) {
396 		spdk_mem_map_set_translation(g_mem_reg_map, (uint64_t)vaddr, VALUE_2MB,
397 					     seg_len == 0 ? REG_MAP_REGISTERED | REG_MAP_NOTIFY_START : REG_MAP_REGISTERED);
398 		seg_len += VALUE_2MB;
399 		vaddr += VALUE_2MB;
400 		len -= VALUE_2MB;
401 	}
402 
403 	TAILQ_FOREACH(map, &g_spdk_mem_maps, tailq) {
404 		rc = map->ops.notify_cb(map->cb_ctx, map, SPDK_MEM_MAP_NOTIFY_REGISTER, seg_vaddr, seg_len);
405 		if (rc != 0) {
406 			pthread_mutex_unlock(&g_spdk_mem_map_mutex);
407 			return rc;
408 		}
409 	}
410 
411 	pthread_mutex_unlock(&g_spdk_mem_map_mutex);
412 	return 0;
413 }
414 
415 int
416 spdk_mem_unregister(void *vaddr, size_t len)
417 {
418 	struct spdk_mem_map *map;
419 	int rc;
420 	void *seg_vaddr;
421 	size_t seg_len;
422 	uint64_t reg, newreg;
423 
424 	if ((uintptr_t)vaddr & ~MASK_256TB) {
425 		DEBUG_PRINT("invalid usermode virtual address %p\n", vaddr);
426 		return -EINVAL;
427 	}
428 
429 	if (((uintptr_t)vaddr & MASK_2MB) || (len & MASK_2MB)) {
430 		DEBUG_PRINT("invalid %s parameters, vaddr=%p len=%ju\n",
431 			    __func__, vaddr, len);
432 		return -EINVAL;
433 	}
434 
435 	pthread_mutex_lock(&g_spdk_mem_map_mutex);
436 
437 	/* The first page must be a start of a region. Also check if it's
438 	 * registered to make sure we don't return -ERANGE for non-registered
439 	 * regions.
440 	 */
441 	reg = spdk_mem_map_translate(g_mem_reg_map, (uint64_t)vaddr, NULL);
442 	if ((reg & REG_MAP_REGISTERED) && (reg & REG_MAP_NOTIFY_START) == 0) {
443 		pthread_mutex_unlock(&g_spdk_mem_map_mutex);
444 		return -ERANGE;
445 	}
446 
447 	seg_vaddr = vaddr;
448 	seg_len = len;
449 	while (seg_len > 0) {
450 		reg = spdk_mem_map_translate(g_mem_reg_map, (uint64_t)seg_vaddr, NULL);
451 		if ((reg & REG_MAP_REGISTERED) == 0) {
452 			pthread_mutex_unlock(&g_spdk_mem_map_mutex);
453 			return -EINVAL;
454 		}
455 		seg_vaddr += VALUE_2MB;
456 		seg_len -= VALUE_2MB;
457 	}
458 
459 	newreg = spdk_mem_map_translate(g_mem_reg_map, (uint64_t)seg_vaddr, NULL);
460 	/* If the next page is registered, it must be a start of a region as well,
461 	 * otherwise we'd be unregistering only a part of a region.
462 	 */
463 	if ((newreg & REG_MAP_NOTIFY_START) == 0 && (newreg & REG_MAP_REGISTERED)) {
464 		pthread_mutex_unlock(&g_spdk_mem_map_mutex);
465 		return -ERANGE;
466 	}
467 	seg_vaddr = vaddr;
468 	seg_len = 0;
469 
470 	while (len > 0) {
471 		reg = spdk_mem_map_translate(g_mem_reg_map, (uint64_t)vaddr, NULL);
472 		spdk_mem_map_set_translation(g_mem_reg_map, (uint64_t)vaddr, VALUE_2MB, 0);
473 
474 		if (seg_len > 0 && (reg & REG_MAP_NOTIFY_START)) {
475 			TAILQ_FOREACH_REVERSE(map, &g_spdk_mem_maps, spdk_mem_map_head, tailq) {
476 				rc = map->ops.notify_cb(map->cb_ctx, map, SPDK_MEM_MAP_NOTIFY_UNREGISTER, seg_vaddr, seg_len);
477 				if (rc != 0) {
478 					pthread_mutex_unlock(&g_spdk_mem_map_mutex);
479 					return rc;
480 				}
481 			}
482 
483 			seg_vaddr = vaddr;
484 			seg_len = VALUE_2MB;
485 		} else {
486 			seg_len += VALUE_2MB;
487 		}
488 
489 		vaddr += VALUE_2MB;
490 		len -= VALUE_2MB;
491 	}
492 
493 	if (seg_len > 0) {
494 		TAILQ_FOREACH_REVERSE(map, &g_spdk_mem_maps, spdk_mem_map_head, tailq) {
495 			rc = map->ops.notify_cb(map->cb_ctx, map, SPDK_MEM_MAP_NOTIFY_UNREGISTER, seg_vaddr, seg_len);
496 			if (rc != 0) {
497 				pthread_mutex_unlock(&g_spdk_mem_map_mutex);
498 				return rc;
499 			}
500 		}
501 	}
502 
503 	pthread_mutex_unlock(&g_spdk_mem_map_mutex);
504 	return 0;
505 }
506 
507 int
508 spdk_mem_reserve(void *vaddr, size_t len)
509 {
510 	struct spdk_mem_map *map;
511 	void *seg_vaddr;
512 	size_t seg_len;
513 	uint64_t reg;
514 
515 	if ((uintptr_t)vaddr & ~MASK_256TB) {
516 		DEBUG_PRINT("invalid usermode virtual address %p\n", vaddr);
517 		return -EINVAL;
518 	}
519 
520 	if (((uintptr_t)vaddr & MASK_2MB) || (len & MASK_2MB)) {
521 		DEBUG_PRINT("invalid %s parameters, vaddr=%p len=%ju\n",
522 			    __func__, vaddr, len);
523 		return -EINVAL;
524 	}
525 
526 	if (len == 0) {
527 		return 0;
528 	}
529 
530 	pthread_mutex_lock(&g_spdk_mem_map_mutex);
531 
532 	/* Check if any part of this range is already registered */
533 	seg_vaddr = vaddr;
534 	seg_len = len;
535 	while (seg_len > 0) {
536 		reg = spdk_mem_map_translate(g_mem_reg_map, (uint64_t)seg_vaddr, NULL);
537 		if (reg & REG_MAP_REGISTERED) {
538 			pthread_mutex_unlock(&g_spdk_mem_map_mutex);
539 			return -EBUSY;
540 		}
541 		seg_vaddr += VALUE_2MB;
542 		seg_len -= VALUE_2MB;
543 	}
544 
545 	/* Simply set the translation to the memory map's default. This allocates the space in the
546 	 * map but does not provide a valid translation. */
547 	spdk_mem_map_set_translation(g_mem_reg_map, (uint64_t)vaddr, len,
548 				     g_mem_reg_map->default_translation);
549 
550 	TAILQ_FOREACH(map, &g_spdk_mem_maps, tailq) {
551 		spdk_mem_map_set_translation(map, (uint64_t)vaddr, len, map->default_translation);
552 	}
553 
554 	pthread_mutex_unlock(&g_spdk_mem_map_mutex);
555 	return 0;
556 }
557 
558 static struct map_1gb *
559 mem_map_get_map_1gb(struct spdk_mem_map *map, uint64_t vfn_2mb)
560 {
561 	struct map_1gb *map_1gb;
562 	uint64_t idx_256tb = MAP_256TB_IDX(vfn_2mb);
563 	size_t i;
564 
565 	if (spdk_unlikely(idx_256tb >= SPDK_COUNTOF(map->map_256tb.map))) {
566 		return NULL;
567 	}
568 
569 	map_1gb = map->map_256tb.map[idx_256tb];
570 
571 	if (!map_1gb) {
572 		pthread_mutex_lock(&map->mutex);
573 
574 		/* Recheck to make sure nobody else got the mutex first. */
575 		map_1gb = map->map_256tb.map[idx_256tb];
576 		if (!map_1gb) {
577 			map_1gb = malloc(sizeof(struct map_1gb));
578 			if (map_1gb) {
579 				/* initialize all entries to default translation */
580 				for (i = 0; i < SPDK_COUNTOF(map_1gb->map); i++) {
581 					map_1gb->map[i].translation_2mb = map->default_translation;
582 				}
583 				map->map_256tb.map[idx_256tb] = map_1gb;
584 			}
585 		}
586 
587 		pthread_mutex_unlock(&map->mutex);
588 
589 		if (!map_1gb) {
590 			DEBUG_PRINT("allocation failed\n");
591 			return NULL;
592 		}
593 	}
594 
595 	return map_1gb;
596 }
597 
598 int
599 spdk_mem_map_set_translation(struct spdk_mem_map *map, uint64_t vaddr, uint64_t size,
600 			     uint64_t translation)
601 {
602 	uint64_t vfn_2mb;
603 	struct map_1gb *map_1gb;
604 	uint64_t idx_1gb;
605 	struct map_2mb *map_2mb;
606 
607 	if ((uintptr_t)vaddr & ~MASK_256TB) {
608 		DEBUG_PRINT("invalid usermode virtual address %" PRIu64 "\n", vaddr);
609 		return -EINVAL;
610 	}
611 
612 	/* For now, only 2 MB-aligned registrations are supported */
613 	if (((uintptr_t)vaddr & MASK_2MB) || (size & MASK_2MB)) {
614 		DEBUG_PRINT("invalid %s parameters, vaddr=%" PRIu64 " len=%" PRIu64 "\n",
615 			    __func__, vaddr, size);
616 		return -EINVAL;
617 	}
618 
619 	vfn_2mb = vaddr >> SHIFT_2MB;
620 
621 	while (size) {
622 		map_1gb = mem_map_get_map_1gb(map, vfn_2mb);
623 		if (!map_1gb) {
624 			DEBUG_PRINT("could not get %p map\n", (void *)vaddr);
625 			return -ENOMEM;
626 		}
627 
628 		idx_1gb = MAP_1GB_IDX(vfn_2mb);
629 		map_2mb = &map_1gb->map[idx_1gb];
630 		map_2mb->translation_2mb = translation;
631 
632 		size -= VALUE_2MB;
633 		vfn_2mb++;
634 	}
635 
636 	return 0;
637 }
638 
639 int
640 spdk_mem_map_clear_translation(struct spdk_mem_map *map, uint64_t vaddr, uint64_t size)
641 {
642 	return spdk_mem_map_set_translation(map, vaddr, size, map->default_translation);
643 }
644 
645 inline uint64_t
646 spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size)
647 {
648 	const struct map_1gb *map_1gb;
649 	const struct map_2mb *map_2mb;
650 	uint64_t idx_256tb;
651 	uint64_t idx_1gb;
652 	uint64_t vfn_2mb;
653 	uint64_t cur_size;
654 	uint64_t prev_translation;
655 	uint64_t orig_translation;
656 
657 	if (spdk_unlikely(vaddr & ~MASK_256TB)) {
658 		DEBUG_PRINT("invalid usermode virtual address %p\n", (void *)vaddr);
659 		return map->default_translation;
660 	}
661 
662 	vfn_2mb = vaddr >> SHIFT_2MB;
663 	idx_256tb = MAP_256TB_IDX(vfn_2mb);
664 	idx_1gb = MAP_1GB_IDX(vfn_2mb);
665 
666 	map_1gb = map->map_256tb.map[idx_256tb];
667 	if (spdk_unlikely(!map_1gb)) {
668 		return map->default_translation;
669 	}
670 
671 	cur_size = VALUE_2MB - _2MB_OFFSET(vaddr);
672 	map_2mb = &map_1gb->map[idx_1gb];
673 	if (size == NULL || map->ops.are_contiguous == NULL ||
674 	    map_2mb->translation_2mb == map->default_translation) {
675 		if (size != NULL) {
676 			*size = spdk_min(*size, cur_size);
677 		}
678 		return map_2mb->translation_2mb;
679 	}
680 
681 	orig_translation = map_2mb->translation_2mb;
682 	prev_translation = orig_translation;
683 	while (cur_size < *size) {
684 		vfn_2mb++;
685 		idx_256tb = MAP_256TB_IDX(vfn_2mb);
686 		idx_1gb = MAP_1GB_IDX(vfn_2mb);
687 
688 		map_1gb = map->map_256tb.map[idx_256tb];
689 		if (spdk_unlikely(!map_1gb)) {
690 			break;
691 		}
692 
693 		map_2mb = &map_1gb->map[idx_1gb];
694 		if (!map->ops.are_contiguous(prev_translation, map_2mb->translation_2mb)) {
695 			break;
696 		}
697 
698 		cur_size += VALUE_2MB;
699 		prev_translation = map_2mb->translation_2mb;
700 	}
701 
702 	*size = spdk_min(*size, cur_size);
703 	return orig_translation;
704 }
705 
706 static void
707 memory_hotplug_cb(enum rte_mem_event event_type,
708 		  const void *addr, size_t len, void *arg)
709 {
710 	if (event_type == RTE_MEM_EVENT_ALLOC) {
711 		spdk_mem_register((void *)addr, len);
712 
713 		if (!spdk_env_dpdk_external_init()) {
714 			return;
715 		}
716 
717 		/* When the user initialized DPDK separately, we can't
718 		 * be sure that --match-allocations RTE flag was specified.
719 		 * Without this flag, DPDK can free memory in different units
720 		 * than it was allocated. It doesn't work with things like RDMA MRs.
721 		 *
722 		 * For such cases, we mark segments so they aren't freed.
723 		 */
724 		while (len > 0) {
725 			struct rte_memseg *seg;
726 
727 			seg = rte_mem_virt2memseg(addr, NULL);
728 			assert(seg != NULL);
729 			seg->flags |= RTE_MEMSEG_FLAG_DO_NOT_FREE;
730 			addr = (void *)((uintptr_t)addr + seg->hugepage_sz);
731 			len -= seg->hugepage_sz;
732 		}
733 	} else if (event_type == RTE_MEM_EVENT_FREE) {
734 		spdk_mem_unregister((void *)addr, len);
735 	}
736 }
737 
738 static int
739 memory_iter_cb(const struct rte_memseg_list *msl,
740 	       const struct rte_memseg *ms, size_t len, void *arg)
741 {
742 	return spdk_mem_register(ms->addr, len);
743 }
744 
745 int
746 mem_map_init(bool legacy_mem)
747 {
748 	g_legacy_mem = legacy_mem;
749 
750 	g_mem_reg_map = spdk_mem_map_alloc(0, NULL, NULL);
751 	if (g_mem_reg_map == NULL) {
752 		DEBUG_PRINT("memory registration map allocation failed\n");
753 		return -ENOMEM;
754 	}
755 
756 	/*
757 	 * Walk all DPDK memory segments and register them
758 	 * with the main memory map
759 	 */
760 	rte_mem_event_callback_register("spdk", memory_hotplug_cb, NULL);
761 	rte_memseg_contig_walk(memory_iter_cb, NULL);
762 	return 0;
763 }
764 
765 bool
766 spdk_iommu_is_enabled(void)
767 {
768 #if VFIO_ENABLED
769 	return g_vfio.enabled && !g_vfio.noiommu_enabled;
770 #else
771 	return false;
772 #endif
773 }
774 
775 struct spdk_vtophys_pci_device {
776 	struct rte_pci_device *pci_device;
777 	TAILQ_ENTRY(spdk_vtophys_pci_device) tailq;
778 };
779 
780 static pthread_mutex_t g_vtophys_pci_devices_mutex = PTHREAD_MUTEX_INITIALIZER;
781 static TAILQ_HEAD(, spdk_vtophys_pci_device) g_vtophys_pci_devices =
782 	TAILQ_HEAD_INITIALIZER(g_vtophys_pci_devices);
783 
784 static struct spdk_mem_map *g_vtophys_map;
785 static struct spdk_mem_map *g_phys_ref_map;
786 
787 #if VFIO_ENABLED
788 static int
789 vtophys_iommu_map_dma(uint64_t vaddr, uint64_t iova, uint64_t size)
790 {
791 	struct spdk_vfio_dma_map *dma_map;
792 	uint64_t refcount;
793 	int ret;
794 
795 	refcount = spdk_mem_map_translate(g_phys_ref_map, iova, NULL);
796 	assert(refcount < UINT64_MAX);
797 	if (refcount > 0) {
798 		spdk_mem_map_set_translation(g_phys_ref_map, iova, size, refcount + 1);
799 		return 0;
800 	}
801 
802 	dma_map = calloc(1, sizeof(*dma_map));
803 	if (dma_map == NULL) {
804 		return -ENOMEM;
805 	}
806 
807 	dma_map->map.argsz = sizeof(dma_map->map);
808 	dma_map->map.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
809 	dma_map->map.vaddr = vaddr;
810 	dma_map->map.iova = iova;
811 	dma_map->map.size = size;
812 
813 	pthread_mutex_lock(&g_vfio.mutex);
814 	if (g_vfio.device_ref == 0) {
815 		/* VFIO requires at least one device (IOMMU group) to be added to
816 		 * a VFIO container before it is possible to perform any IOMMU
817 		 * operations on that container. This memory will be mapped once
818 		 * the first device (IOMMU group) is hotplugged.
819 		 *
820 		 * Since the vfio container is managed internally by DPDK, it is
821 		 * also possible that some device is already in that container, but
822 		 * it's not managed by SPDK -  e.g. an NIC attached internally
823 		 * inside DPDK. We could map the memory straight away in such
824 		 * scenario, but there's no need to do it. DPDK devices clearly
825 		 * don't need our mappings and hence we defer the mapping
826 		 * unconditionally until the first SPDK-managed device is
827 		 * hotplugged.
828 		 */
829 		goto out_insert;
830 	}
831 
832 	ret = ioctl(g_vfio.fd, VFIO_IOMMU_MAP_DMA, &dma_map->map);
833 	if (ret) {
834 		DEBUG_PRINT("Cannot set up DMA mapping, error %d\n", errno);
835 		pthread_mutex_unlock(&g_vfio.mutex);
836 		free(dma_map);
837 		return ret;
838 	}
839 
840 out_insert:
841 	TAILQ_INSERT_TAIL(&g_vfio.maps, dma_map, tailq);
842 	pthread_mutex_unlock(&g_vfio.mutex);
843 	spdk_mem_map_set_translation(g_phys_ref_map, iova, size, refcount + 1);
844 	return 0;
845 }
846 
847 static int
848 vtophys_iommu_unmap_dma(uint64_t iova, uint64_t size)
849 {
850 	struct spdk_vfio_dma_map *dma_map;
851 	uint64_t refcount;
852 	int ret;
853 	struct vfio_iommu_type1_dma_unmap unmap = {};
854 
855 	pthread_mutex_lock(&g_vfio.mutex);
856 	TAILQ_FOREACH(dma_map, &g_vfio.maps, tailq) {
857 		if (dma_map->map.iova == iova) {
858 			break;
859 		}
860 	}
861 
862 	if (dma_map == NULL) {
863 		DEBUG_PRINT("Cannot clear DMA mapping for IOVA %"PRIx64" - it's not mapped\n", iova);
864 		pthread_mutex_unlock(&g_vfio.mutex);
865 		return -ENXIO;
866 	}
867 
868 	refcount = spdk_mem_map_translate(g_phys_ref_map, iova, NULL);
869 	assert(refcount < UINT64_MAX);
870 	if (refcount > 0) {
871 		spdk_mem_map_set_translation(g_phys_ref_map, iova, size, refcount - 1);
872 	}
873 
874 	/* We still have outstanding references, don't clear it. */
875 	if (refcount > 1) {
876 		pthread_mutex_unlock(&g_vfio.mutex);
877 		return 0;
878 	}
879 
880 	/** don't support partial or multiple-page unmap for now */
881 	assert(dma_map->map.size == size);
882 
883 	if (g_vfio.device_ref == 0) {
884 		/* Memory is not mapped anymore, just remove it's references */
885 		goto out_remove;
886 	}
887 
888 	unmap.argsz = sizeof(unmap);
889 	unmap.flags = 0;
890 	unmap.iova = dma_map->map.iova;
891 	unmap.size = dma_map->map.size;
892 	ret = ioctl(g_vfio.fd, VFIO_IOMMU_UNMAP_DMA, &unmap);
893 	if (ret) {
894 		DEBUG_PRINT("Cannot clear DMA mapping, error %d\n", errno);
895 		pthread_mutex_unlock(&g_vfio.mutex);
896 		return ret;
897 	}
898 
899 out_remove:
900 	TAILQ_REMOVE(&g_vfio.maps, dma_map, tailq);
901 	pthread_mutex_unlock(&g_vfio.mutex);
902 	free(dma_map);
903 	return 0;
904 }
905 #endif
906 
907 static uint64_t
908 vtophys_get_paddr_memseg(uint64_t vaddr)
909 {
910 	uintptr_t paddr;
911 	struct rte_memseg *seg;
912 
913 	seg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL);
914 	if (seg != NULL) {
915 		paddr = seg->iova;
916 		if (paddr == RTE_BAD_IOVA) {
917 			return SPDK_VTOPHYS_ERROR;
918 		}
919 		paddr += (vaddr - (uintptr_t)seg->addr);
920 		return paddr;
921 	}
922 
923 	return SPDK_VTOPHYS_ERROR;
924 }
925 
926 /* Try to get the paddr from /proc/self/pagemap */
927 static uint64_t
928 vtophys_get_paddr_pagemap(uint64_t vaddr)
929 {
930 	uintptr_t paddr;
931 
932 	/* Silence static analyzers */
933 	assert(vaddr != 0);
934 	paddr = rte_mem_virt2iova((void *)vaddr);
935 	if (paddr == RTE_BAD_IOVA) {
936 		/*
937 		 * The vaddr may be valid but doesn't have a backing page
938 		 * assigned yet.  Touch the page to ensure a backing page
939 		 * gets assigned, then try to translate again.
940 		 */
941 		rte_atomic64_read((rte_atomic64_t *)vaddr);
942 		paddr = rte_mem_virt2iova((void *)vaddr);
943 	}
944 	if (paddr == RTE_BAD_IOVA) {
945 		/* Unable to get to the physical address. */
946 		return SPDK_VTOPHYS_ERROR;
947 	}
948 
949 	return paddr;
950 }
951 
952 /* Try to get the paddr from pci devices */
953 static uint64_t
954 vtophys_get_paddr_pci(uint64_t vaddr)
955 {
956 	struct spdk_vtophys_pci_device *vtophys_dev;
957 	uintptr_t paddr;
958 	struct rte_pci_device	*dev;
959 	struct rte_mem_resource *res;
960 	unsigned r;
961 
962 	pthread_mutex_lock(&g_vtophys_pci_devices_mutex);
963 	TAILQ_FOREACH(vtophys_dev, &g_vtophys_pci_devices, tailq) {
964 		dev = vtophys_dev->pci_device;
965 
966 		for (r = 0; r < PCI_MAX_RESOURCE; r++) {
967 			res = &dev->mem_resource[r];
968 			if (res->phys_addr && vaddr >= (uint64_t)res->addr &&
969 			    vaddr < (uint64_t)res->addr + res->len) {
970 				paddr = res->phys_addr + (vaddr - (uint64_t)res->addr);
971 				DEBUG_PRINT("%s: %p -> %p\n", __func__, (void *)vaddr,
972 					    (void *)paddr);
973 				pthread_mutex_unlock(&g_vtophys_pci_devices_mutex);
974 				return paddr;
975 			}
976 		}
977 	}
978 	pthread_mutex_unlock(&g_vtophys_pci_devices_mutex);
979 
980 	return  SPDK_VTOPHYS_ERROR;
981 }
982 
983 static int
984 vtophys_notify(void *cb_ctx, struct spdk_mem_map *map,
985 	       enum spdk_mem_map_notify_action action,
986 	       void *vaddr, size_t len)
987 {
988 	int rc = 0, pci_phys = 0;
989 	uint64_t paddr;
990 
991 	if ((uintptr_t)vaddr & ~MASK_256TB) {
992 		DEBUG_PRINT("invalid usermode virtual address %p\n", vaddr);
993 		return -EINVAL;
994 	}
995 
996 	if (((uintptr_t)vaddr & MASK_2MB) || (len & MASK_2MB)) {
997 		DEBUG_PRINT("invalid parameters, vaddr=%p len=%ju\n",
998 			    vaddr, len);
999 		return -EINVAL;
1000 	}
1001 
1002 	/* Get the physical address from the DPDK memsegs */
1003 	paddr = vtophys_get_paddr_memseg((uint64_t)vaddr);
1004 
1005 	switch (action) {
1006 	case SPDK_MEM_MAP_NOTIFY_REGISTER:
1007 		if (paddr == SPDK_VTOPHYS_ERROR) {
1008 			/* This is not an address that DPDK is managing. */
1009 #if VFIO_ENABLED
1010 			enum rte_iova_mode iova_mode;
1011 
1012 			iova_mode = rte_eal_iova_mode();
1013 
1014 			if (spdk_iommu_is_enabled() && iova_mode == RTE_IOVA_VA) {
1015 				/* We'll use the virtual address as the iova to match DPDK. */
1016 				paddr = (uint64_t)vaddr;
1017 				rc = vtophys_iommu_map_dma((uint64_t)vaddr, paddr, len);
1018 				if (rc) {
1019 					return -EFAULT;
1020 				}
1021 				while (len > 0) {
1022 					rc = spdk_mem_map_set_translation(map, (uint64_t)vaddr, VALUE_2MB, paddr);
1023 					if (rc != 0) {
1024 						return rc;
1025 					}
1026 					vaddr += VALUE_2MB;
1027 					paddr += VALUE_2MB;
1028 					len -= VALUE_2MB;
1029 				}
1030 			} else
1031 #endif
1032 			{
1033 				/* Get the physical address from /proc/self/pagemap. */
1034 				paddr = vtophys_get_paddr_pagemap((uint64_t)vaddr);
1035 				if (paddr == SPDK_VTOPHYS_ERROR) {
1036 					/* Get the physical address from PCI devices */
1037 					paddr = vtophys_get_paddr_pci((uint64_t)vaddr);
1038 					if (paddr == SPDK_VTOPHYS_ERROR) {
1039 						DEBUG_PRINT("could not get phys addr for %p\n", vaddr);
1040 						return -EFAULT;
1041 					}
1042 					/* The beginning of this address range points to a PCI resource,
1043 					 * so the rest must point to a PCI resource as well.
1044 					 */
1045 					pci_phys = 1;
1046 				}
1047 
1048 				/* Get paddr for each 2MB chunk in this address range */
1049 				while (len > 0) {
1050 					/* Get the physical address from /proc/self/pagemap. */
1051 					if (pci_phys) {
1052 						paddr = vtophys_get_paddr_pci((uint64_t)vaddr);
1053 					} else {
1054 						paddr = vtophys_get_paddr_pagemap((uint64_t)vaddr);
1055 					}
1056 
1057 					if (paddr == SPDK_VTOPHYS_ERROR) {
1058 						DEBUG_PRINT("could not get phys addr for %p\n", vaddr);
1059 						return -EFAULT;
1060 					}
1061 
1062 					/* Since PCI paddr can break the 2MiB physical alignment skip this check for that. */
1063 					if (!pci_phys && (paddr & MASK_2MB)) {
1064 						DEBUG_PRINT("invalid paddr 0x%" PRIx64 " - must be 2MB aligned\n", paddr);
1065 						return -EINVAL;
1066 					}
1067 #if VFIO_ENABLED
1068 					/* If the IOMMU is on, but DPDK is using iova-mode=pa, we want to register this memory
1069 					 * with the IOMMU using the physical address to match. */
1070 					if (spdk_iommu_is_enabled()) {
1071 						rc = vtophys_iommu_map_dma((uint64_t)vaddr, paddr, VALUE_2MB);
1072 						if (rc) {
1073 							DEBUG_PRINT("Unable to assign vaddr %p to paddr 0x%" PRIx64 "\n", vaddr, paddr);
1074 							return -EFAULT;
1075 						}
1076 					}
1077 #endif
1078 
1079 					rc = spdk_mem_map_set_translation(map, (uint64_t)vaddr, VALUE_2MB, paddr);
1080 					if (rc != 0) {
1081 						return rc;
1082 					}
1083 
1084 					vaddr += VALUE_2MB;
1085 					len -= VALUE_2MB;
1086 				}
1087 			}
1088 		} else {
1089 			/* This is an address managed by DPDK. Just setup the translations. */
1090 			while (len > 0) {
1091 				paddr = vtophys_get_paddr_memseg((uint64_t)vaddr);
1092 				if (paddr == SPDK_VTOPHYS_ERROR) {
1093 					DEBUG_PRINT("could not get phys addr for %p\n", vaddr);
1094 					return -EFAULT;
1095 				}
1096 
1097 				rc = spdk_mem_map_set_translation(map, (uint64_t)vaddr, VALUE_2MB, paddr);
1098 				if (rc != 0) {
1099 					return rc;
1100 				}
1101 
1102 				vaddr += VALUE_2MB;
1103 				len -= VALUE_2MB;
1104 			}
1105 		}
1106 
1107 		break;
1108 	case SPDK_MEM_MAP_NOTIFY_UNREGISTER:
1109 #if VFIO_ENABLED
1110 		if (paddr == SPDK_VTOPHYS_ERROR) {
1111 			/*
1112 			 * This is not an address that DPDK is managing. If vfio is enabled,
1113 			 * we need to unmap the range from the IOMMU
1114 			 */
1115 			if (spdk_iommu_is_enabled()) {
1116 				uint64_t buffer_len = len;
1117 				uint8_t *va = vaddr;
1118 				enum rte_iova_mode iova_mode;
1119 
1120 				iova_mode = rte_eal_iova_mode();
1121 				/*
1122 				 * In virtual address mode, the region is contiguous and can be done in
1123 				 * one unmap.
1124 				 */
1125 				if (iova_mode == RTE_IOVA_VA) {
1126 					paddr = spdk_mem_map_translate(map, (uint64_t)va, &buffer_len);
1127 					if (buffer_len != len || paddr != (uintptr_t)va) {
1128 						DEBUG_PRINT("Unmapping %p with length %lu failed because "
1129 							    "translation had address 0x%" PRIx64 " and length %lu\n",
1130 							    va, len, paddr, buffer_len);
1131 						return -EINVAL;
1132 					}
1133 					rc = vtophys_iommu_unmap_dma(paddr, len);
1134 					if (rc) {
1135 						DEBUG_PRINT("Failed to iommu unmap paddr 0x%" PRIx64 "\n", paddr);
1136 						return -EFAULT;
1137 					}
1138 				} else if (iova_mode == RTE_IOVA_PA) {
1139 					/* Get paddr for each 2MB chunk in this address range */
1140 					while (buffer_len > 0) {
1141 						paddr = spdk_mem_map_translate(map, (uint64_t)va, NULL);
1142 
1143 						if (paddr == SPDK_VTOPHYS_ERROR || buffer_len < VALUE_2MB) {
1144 							DEBUG_PRINT("could not get phys addr for %p\n", va);
1145 							return -EFAULT;
1146 						}
1147 
1148 						rc = vtophys_iommu_unmap_dma(paddr, VALUE_2MB);
1149 						if (rc) {
1150 							DEBUG_PRINT("Failed to iommu unmap paddr 0x%" PRIx64 "\n", paddr);
1151 							return -EFAULT;
1152 						}
1153 
1154 						va += VALUE_2MB;
1155 						buffer_len -= VALUE_2MB;
1156 					}
1157 				}
1158 			}
1159 		}
1160 #endif
1161 		while (len > 0) {
1162 			rc = spdk_mem_map_clear_translation(map, (uint64_t)vaddr, VALUE_2MB);
1163 			if (rc != 0) {
1164 				return rc;
1165 			}
1166 
1167 			vaddr += VALUE_2MB;
1168 			len -= VALUE_2MB;
1169 		}
1170 
1171 		break;
1172 	default:
1173 		SPDK_UNREACHABLE();
1174 	}
1175 
1176 	return rc;
1177 }
1178 
1179 static int
1180 vtophys_check_contiguous_entries(uint64_t paddr1, uint64_t paddr2)
1181 {
1182 	/* This function is always called with paddrs for two subsequent
1183 	 * 2MB chunks in virtual address space, so those chunks will be only
1184 	 * physically contiguous if the physical addresses are 2MB apart
1185 	 * from each other as well.
1186 	 */
1187 	return (paddr2 - paddr1 == VALUE_2MB);
1188 }
1189 
1190 #if VFIO_ENABLED
1191 
1192 static bool
1193 vfio_enabled(void)
1194 {
1195 	return rte_vfio_is_enabled("vfio_pci");
1196 }
1197 
1198 /* Check if IOMMU is enabled on the system */
1199 static bool
1200 has_iommu_groups(void)
1201 {
1202 	int count = 0;
1203 	DIR *dir = opendir("/sys/kernel/iommu_groups");
1204 
1205 	if (dir == NULL) {
1206 		return false;
1207 	}
1208 
1209 	while (count < 3 && readdir(dir) != NULL) {
1210 		count++;
1211 	}
1212 
1213 	closedir(dir);
1214 	/* there will always be ./ and ../ entries */
1215 	return count > 2;
1216 }
1217 
1218 static bool
1219 vfio_noiommu_enabled(void)
1220 {
1221 	return rte_vfio_noiommu_is_enabled();
1222 }
1223 
1224 static void
1225 vtophys_iommu_init(void)
1226 {
1227 	char proc_fd_path[PATH_MAX + 1];
1228 	char link_path[PATH_MAX + 1];
1229 	const char vfio_path[] = "/dev/vfio/vfio";
1230 	DIR *dir;
1231 	struct dirent *d;
1232 
1233 	if (!vfio_enabled()) {
1234 		return;
1235 	}
1236 
1237 	if (vfio_noiommu_enabled()) {
1238 		g_vfio.noiommu_enabled = true;
1239 	} else if (!has_iommu_groups()) {
1240 		return;
1241 	}
1242 
1243 	dir = opendir("/proc/self/fd");
1244 	if (!dir) {
1245 		DEBUG_PRINT("Failed to open /proc/self/fd (%d)\n", errno);
1246 		return;
1247 	}
1248 
1249 	while ((d = readdir(dir)) != NULL) {
1250 		if (d->d_type != DT_LNK) {
1251 			continue;
1252 		}
1253 
1254 		snprintf(proc_fd_path, sizeof(proc_fd_path), "/proc/self/fd/%s", d->d_name);
1255 		if (readlink(proc_fd_path, link_path, sizeof(link_path)) != (sizeof(vfio_path) - 1)) {
1256 			continue;
1257 		}
1258 
1259 		if (memcmp(link_path, vfio_path, sizeof(vfio_path) - 1) == 0) {
1260 			sscanf(d->d_name, "%d", &g_vfio.fd);
1261 			break;
1262 		}
1263 	}
1264 
1265 	closedir(dir);
1266 
1267 	if (g_vfio.fd < 0) {
1268 		DEBUG_PRINT("Failed to discover DPDK VFIO container fd.\n");
1269 		return;
1270 	}
1271 
1272 	g_vfio.enabled = true;
1273 
1274 	return;
1275 }
1276 #endif
1277 
1278 void
1279 vtophys_pci_device_added(struct rte_pci_device *pci_device)
1280 {
1281 	struct spdk_vtophys_pci_device *vtophys_dev;
1282 
1283 	pthread_mutex_lock(&g_vtophys_pci_devices_mutex);
1284 
1285 	vtophys_dev = calloc(1, sizeof(*vtophys_dev));
1286 	if (vtophys_dev) {
1287 		vtophys_dev->pci_device = pci_device;
1288 		TAILQ_INSERT_TAIL(&g_vtophys_pci_devices, vtophys_dev, tailq);
1289 	} else {
1290 		DEBUG_PRINT("Memory allocation error\n");
1291 	}
1292 	pthread_mutex_unlock(&g_vtophys_pci_devices_mutex);
1293 
1294 #if VFIO_ENABLED
1295 	struct spdk_vfio_dma_map *dma_map;
1296 	int ret;
1297 
1298 	if (!g_vfio.enabled) {
1299 		return;
1300 	}
1301 
1302 	pthread_mutex_lock(&g_vfio.mutex);
1303 	g_vfio.device_ref++;
1304 	if (g_vfio.device_ref > 1) {
1305 		pthread_mutex_unlock(&g_vfio.mutex);
1306 		return;
1307 	}
1308 
1309 	/* This is the first SPDK device using DPDK vfio. This means that the first
1310 	 * IOMMU group might have been just been added to the DPDK vfio container.
1311 	 * From this point it is certain that the memory can be mapped now.
1312 	 */
1313 	TAILQ_FOREACH(dma_map, &g_vfio.maps, tailq) {
1314 		ret = ioctl(g_vfio.fd, VFIO_IOMMU_MAP_DMA, &dma_map->map);
1315 		if (ret) {
1316 			DEBUG_PRINT("Cannot update DMA mapping, error %d\n", errno);
1317 			break;
1318 		}
1319 	}
1320 	pthread_mutex_unlock(&g_vfio.mutex);
1321 #endif
1322 }
1323 
1324 void
1325 vtophys_pci_device_removed(struct rte_pci_device *pci_device)
1326 {
1327 	struct spdk_vtophys_pci_device *vtophys_dev;
1328 
1329 	pthread_mutex_lock(&g_vtophys_pci_devices_mutex);
1330 	TAILQ_FOREACH(vtophys_dev, &g_vtophys_pci_devices, tailq) {
1331 		if (vtophys_dev->pci_device == pci_device) {
1332 			TAILQ_REMOVE(&g_vtophys_pci_devices, vtophys_dev, tailq);
1333 			free(vtophys_dev);
1334 			break;
1335 		}
1336 	}
1337 	pthread_mutex_unlock(&g_vtophys_pci_devices_mutex);
1338 
1339 #if VFIO_ENABLED
1340 	struct spdk_vfio_dma_map *dma_map;
1341 	int ret;
1342 
1343 	if (!g_vfio.enabled) {
1344 		return;
1345 	}
1346 
1347 	pthread_mutex_lock(&g_vfio.mutex);
1348 	assert(g_vfio.device_ref > 0);
1349 	g_vfio.device_ref--;
1350 	if (g_vfio.device_ref > 0) {
1351 		pthread_mutex_unlock(&g_vfio.mutex);
1352 		return;
1353 	}
1354 
1355 	/* This is the last SPDK device using DPDK vfio. If DPDK doesn't have
1356 	 * any additional devices using it's vfio container, all the mappings
1357 	 * will be automatically removed by the Linux vfio driver. We unmap
1358 	 * the memory manually to be able to easily re-map it later regardless
1359 	 * of other, external factors.
1360 	 */
1361 	TAILQ_FOREACH(dma_map, &g_vfio.maps, tailq) {
1362 		struct vfio_iommu_type1_dma_unmap unmap = {};
1363 		unmap.argsz = sizeof(unmap);
1364 		unmap.flags = 0;
1365 		unmap.iova = dma_map->map.iova;
1366 		unmap.size = dma_map->map.size;
1367 		ret = ioctl(g_vfio.fd, VFIO_IOMMU_UNMAP_DMA, &unmap);
1368 		if (ret) {
1369 			DEBUG_PRINT("Cannot unmap DMA memory, error %d\n", errno);
1370 			break;
1371 		}
1372 	}
1373 	pthread_mutex_unlock(&g_vfio.mutex);
1374 #endif
1375 }
1376 
1377 int
1378 vtophys_init(void)
1379 {
1380 	const struct spdk_mem_map_ops vtophys_map_ops = {
1381 		.notify_cb = vtophys_notify,
1382 		.are_contiguous = vtophys_check_contiguous_entries,
1383 	};
1384 
1385 	const struct spdk_mem_map_ops phys_ref_map_ops = {
1386 		.notify_cb = NULL,
1387 		.are_contiguous = NULL,
1388 	};
1389 
1390 #if VFIO_ENABLED
1391 	vtophys_iommu_init();
1392 #endif
1393 
1394 	g_phys_ref_map = spdk_mem_map_alloc(0, &phys_ref_map_ops, NULL);
1395 	if (g_phys_ref_map == NULL) {
1396 		DEBUG_PRINT("phys_ref map allocation failed.\n");
1397 		return -ENOMEM;
1398 	}
1399 
1400 	g_vtophys_map = spdk_mem_map_alloc(SPDK_VTOPHYS_ERROR, &vtophys_map_ops, NULL);
1401 	if (g_vtophys_map == NULL) {
1402 		DEBUG_PRINT("vtophys map allocation failed\n");
1403 		return -ENOMEM;
1404 	}
1405 	return 0;
1406 }
1407 
1408 uint64_t
1409 spdk_vtophys(const void *buf, uint64_t *size)
1410 {
1411 	uint64_t vaddr, paddr_2mb;
1412 
1413 	vaddr = (uint64_t)buf;
1414 	paddr_2mb = spdk_mem_map_translate(g_vtophys_map, vaddr, size);
1415 
1416 	/*
1417 	 * SPDK_VTOPHYS_ERROR has all bits set, so if the lookup returned SPDK_VTOPHYS_ERROR,
1418 	 * we will still bitwise-or it with the buf offset below, but the result will still be
1419 	 * SPDK_VTOPHYS_ERROR. However now that we do + rather than | (due to PCI vtophys being
1420 	 * unaligned) we must now check the return value before addition.
1421 	 */
1422 	SPDK_STATIC_ASSERT(SPDK_VTOPHYS_ERROR == UINT64_C(-1), "SPDK_VTOPHYS_ERROR should be all 1s");
1423 	if (paddr_2mb == SPDK_VTOPHYS_ERROR) {
1424 		return SPDK_VTOPHYS_ERROR;
1425 	} else {
1426 		return paddr_2mb + (vaddr & MASK_2MB);
1427 	}
1428 }
1429 
1430 int
1431 spdk_mem_get_fd_and_offset(void *vaddr, uint64_t *offset)
1432 {
1433 	struct rte_memseg *seg;
1434 	int ret, fd;
1435 
1436 	seg = rte_mem_virt2memseg(vaddr, NULL);
1437 	if (!seg) {
1438 		SPDK_ERRLOG("memory %p doesn't exist\n", vaddr);
1439 		return -ENOENT;
1440 	}
1441 
1442 	fd = rte_memseg_get_fd_thread_unsafe(seg);
1443 	if (fd < 0) {
1444 		return fd;
1445 	}
1446 
1447 	ret = rte_memseg_get_fd_offset_thread_unsafe(seg, offset);
1448 	if (ret < 0) {
1449 		return ret;
1450 	}
1451 
1452 	return fd;
1453 }
1454