xref: /dpdk/drivers/bus/pci/linux/pci_vfio.c (revision 3bb3ebb51b789d4ecb417cbdb1dce5c7211f6f18)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <string.h>
6 #include <fcntl.h>
7 #include <linux/pci_regs.h>
8 #include <sys/eventfd.h>
9 #include <sys/socket.h>
10 #include <sys/ioctl.h>
11 #include <sys/mman.h>
12 #include <stdbool.h>
13 
14 #include <rte_log.h>
15 #include <rte_pci.h>
16 #include <rte_bus_pci.h>
17 #include <rte_eal_paging.h>
18 #include <rte_malloc.h>
19 #include <rte_vfio.h>
20 #include <rte_eal.h>
21 #include <rte_bus.h>
22 #include <rte_spinlock.h>
23 #include <rte_tailq.h>
24 
25 #include "eal_filesystem.h"
26 
27 #include "pci_init.h"
28 #include "private.h"
29 
30 /**
31  * @file
32  * PCI probing under linux (VFIO version)
33  *
34  * This code tries to determine if the PCI device is bound to VFIO driver,
35  * and initialize it (map BARs, set up interrupts) if that's the case.
36  *
37  */
38 
39 #ifdef VFIO_PRESENT
40 
41 static struct rte_tailq_elem rte_vfio_tailq = {
42 	.name = "VFIO_RESOURCE_LIST",
43 };
44 EAL_REGISTER_TAILQ(rte_vfio_tailq)
45 
46 int
47 pci_vfio_read_config(const struct rte_intr_handle *intr_handle,
48 		    void *buf, size_t len, off_t offs)
49 {
50 	return pread64(intr_handle->vfio_dev_fd, buf, len,
51 	       VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs);
52 }
53 
54 int
55 pci_vfio_write_config(const struct rte_intr_handle *intr_handle,
56 		    const void *buf, size_t len, off_t offs)
57 {
58 	return pwrite64(intr_handle->vfio_dev_fd, buf, len,
59 	       VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs);
60 }
61 
62 /* get PCI BAR number where MSI-X interrupts are */
63 static int
64 pci_vfio_get_msix_bar(int fd, struct pci_msix_table *msix_table)
65 {
66 	int ret;
67 	uint32_t reg;
68 	uint16_t flags;
69 	uint8_t cap_id, cap_offset;
70 
71 	/* read PCI capability pointer from config space */
72 	ret = pread64(fd, &reg, sizeof(reg),
73 			VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
74 			PCI_CAPABILITY_LIST);
75 	if (ret != sizeof(reg)) {
76 		RTE_LOG(ERR, EAL, "Cannot read capability pointer from PCI "
77 				"config space!\n");
78 		return -1;
79 	}
80 
81 	/* we need first byte */
82 	cap_offset = reg & 0xFF;
83 
84 	while (cap_offset) {
85 
86 		/* read PCI capability ID */
87 		ret = pread64(fd, &reg, sizeof(reg),
88 				VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
89 				cap_offset);
90 		if (ret != sizeof(reg)) {
91 			RTE_LOG(ERR, EAL, "Cannot read capability ID from PCI "
92 					"config space!\n");
93 			return -1;
94 		}
95 
96 		/* we need first byte */
97 		cap_id = reg & 0xFF;
98 
99 		/* if we haven't reached MSI-X, check next capability */
100 		if (cap_id != PCI_CAP_ID_MSIX) {
101 			ret = pread64(fd, &reg, sizeof(reg),
102 					VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
103 					cap_offset);
104 			if (ret != sizeof(reg)) {
105 				RTE_LOG(ERR, EAL, "Cannot read capability pointer from PCI "
106 						"config space!\n");
107 				return -1;
108 			}
109 
110 			/* we need second byte */
111 			cap_offset = (reg & 0xFF00) >> 8;
112 
113 			continue;
114 		}
115 		/* else, read table offset */
116 		else {
117 			/* table offset resides in the next 4 bytes */
118 			ret = pread64(fd, &reg, sizeof(reg),
119 					VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
120 					cap_offset + 4);
121 			if (ret != sizeof(reg)) {
122 				RTE_LOG(ERR, EAL, "Cannot read table offset from PCI config "
123 						"space!\n");
124 				return -1;
125 			}
126 
127 			ret = pread64(fd, &flags, sizeof(flags),
128 					VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
129 					cap_offset + 2);
130 			if (ret != sizeof(flags)) {
131 				RTE_LOG(ERR, EAL, "Cannot read table flags from PCI config "
132 						"space!\n");
133 				return -1;
134 			}
135 
136 			msix_table->bar_index = reg & RTE_PCI_MSIX_TABLE_BIR;
137 			msix_table->offset = reg & RTE_PCI_MSIX_TABLE_OFFSET;
138 			msix_table->size =
139 				16 * (1 + (flags & RTE_PCI_MSIX_FLAGS_QSIZE));
140 
141 			return 0;
142 		}
143 	}
144 	return 0;
145 }
146 
147 /* enable PCI bus memory space */
148 static int
149 pci_vfio_enable_bus_memory(int dev_fd)
150 {
151 	uint16_t cmd;
152 	int ret;
153 
154 	ret = pread64(dev_fd, &cmd, sizeof(cmd),
155 		      VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
156 		      PCI_COMMAND);
157 
158 	if (ret != sizeof(cmd)) {
159 		RTE_LOG(ERR, EAL, "Cannot read command from PCI config space!\n");
160 		return -1;
161 	}
162 
163 	if (cmd & PCI_COMMAND_MEMORY)
164 		return 0;
165 
166 	cmd |= PCI_COMMAND_MEMORY;
167 	ret = pwrite64(dev_fd, &cmd, sizeof(cmd),
168 		       VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
169 		       PCI_COMMAND);
170 
171 	if (ret != sizeof(cmd)) {
172 		RTE_LOG(ERR, EAL, "Cannot write command to PCI config space!\n");
173 		return -1;
174 	}
175 
176 	return 0;
177 }
178 
179 /* set PCI bus mastering */
180 static int
181 pci_vfio_set_bus_master(int dev_fd, bool op)
182 {
183 	uint16_t reg;
184 	int ret;
185 
186 	ret = pread64(dev_fd, &reg, sizeof(reg),
187 			VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
188 			PCI_COMMAND);
189 	if (ret != sizeof(reg)) {
190 		RTE_LOG(ERR, EAL, "Cannot read command from PCI config space!\n");
191 		return -1;
192 	}
193 
194 	if (op)
195 		/* set the master bit */
196 		reg |= PCI_COMMAND_MASTER;
197 	else
198 		reg &= ~(PCI_COMMAND_MASTER);
199 
200 	ret = pwrite64(dev_fd, &reg, sizeof(reg),
201 			VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
202 			PCI_COMMAND);
203 
204 	if (ret != sizeof(reg)) {
205 		RTE_LOG(ERR, EAL, "Cannot write command to PCI config space!\n");
206 		return -1;
207 	}
208 
209 	return 0;
210 }
211 
212 /* set up interrupt support (but not enable interrupts) */
213 static int
214 pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd)
215 {
216 	int i, ret, intr_idx;
217 	enum rte_intr_mode intr_mode;
218 
219 	/* default to invalid index */
220 	intr_idx = VFIO_PCI_NUM_IRQS;
221 
222 	/* Get default / configured intr_mode */
223 	intr_mode = rte_eal_vfio_intr_mode();
224 
225 	/* get interrupt type from internal config (MSI-X by default, can be
226 	 * overridden from the command line
227 	 */
228 	switch (intr_mode) {
229 	case RTE_INTR_MODE_MSIX:
230 		intr_idx = VFIO_PCI_MSIX_IRQ_INDEX;
231 		break;
232 	case RTE_INTR_MODE_MSI:
233 		intr_idx = VFIO_PCI_MSI_IRQ_INDEX;
234 		break;
235 	case RTE_INTR_MODE_LEGACY:
236 		intr_idx = VFIO_PCI_INTX_IRQ_INDEX;
237 		break;
238 	/* don't do anything if we want to automatically determine interrupt type */
239 	case RTE_INTR_MODE_NONE:
240 		break;
241 	default:
242 		RTE_LOG(ERR, EAL, "  unknown default interrupt type!\n");
243 		return -1;
244 	}
245 
246 	/* start from MSI-X interrupt type */
247 	for (i = VFIO_PCI_MSIX_IRQ_INDEX; i >= 0; i--) {
248 		struct vfio_irq_info irq = { .argsz = sizeof(irq) };
249 		int fd = -1;
250 
251 		/* skip interrupt modes we don't want */
252 		if (intr_mode != RTE_INTR_MODE_NONE &&
253 				i != intr_idx)
254 			continue;
255 
256 		irq.index = i;
257 
258 		ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
259 		if (ret < 0) {
260 			RTE_LOG(ERR, EAL, "  cannot get IRQ info, "
261 					"error %i (%s)\n", errno, strerror(errno));
262 			return -1;
263 		}
264 
265 		/* if this vector cannot be used with eventfd, fail if we explicitly
266 		 * specified interrupt type, otherwise continue */
267 		if ((irq.flags & VFIO_IRQ_INFO_EVENTFD) == 0) {
268 			if (intr_mode != RTE_INTR_MODE_NONE) {
269 				RTE_LOG(ERR, EAL,
270 						"  interrupt vector does not support eventfd!\n");
271 				return -1;
272 			} else
273 				continue;
274 		}
275 
276 		/* set up an eventfd for interrupts */
277 		fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
278 		if (fd < 0) {
279 			RTE_LOG(ERR, EAL, "  cannot set up eventfd, "
280 					"error %i (%s)\n", errno, strerror(errno));
281 			return -1;
282 		}
283 
284 		dev->intr_handle.fd = fd;
285 		dev->intr_handle.vfio_dev_fd = vfio_dev_fd;
286 
287 		switch (i) {
288 		case VFIO_PCI_MSIX_IRQ_INDEX:
289 			intr_mode = RTE_INTR_MODE_MSIX;
290 			dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSIX;
291 			break;
292 		case VFIO_PCI_MSI_IRQ_INDEX:
293 			intr_mode = RTE_INTR_MODE_MSI;
294 			dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSI;
295 			break;
296 		case VFIO_PCI_INTX_IRQ_INDEX:
297 			intr_mode = RTE_INTR_MODE_LEGACY;
298 			dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_LEGACY;
299 			break;
300 		default:
301 			RTE_LOG(ERR, EAL, "  unknown interrupt type!\n");
302 			return -1;
303 		}
304 
305 		return 0;
306 	}
307 
308 	/* if we're here, we haven't found a suitable interrupt vector */
309 	return -1;
310 }
311 
312 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
313 /*
314  * Spinlock for device hot-unplug failure handling.
315  * If it tries to access bus or device, such as handle sigbus on bus
316  * or handle memory failure for device, just need to use this lock.
317  * It could protect the bus and the device to avoid race condition.
318  */
319 static rte_spinlock_t failure_handle_lock = RTE_SPINLOCK_INITIALIZER;
320 
321 static void
322 pci_vfio_req_handler(void *param)
323 {
324 	struct rte_bus *bus;
325 	int ret;
326 	struct rte_device *device = (struct rte_device *)param;
327 
328 	rte_spinlock_lock(&failure_handle_lock);
329 	bus = rte_bus_find_by_device(device);
330 	if (bus == NULL) {
331 		RTE_LOG(ERR, EAL, "Cannot find bus for device (%s)\n",
332 			device->name);
333 		goto handle_end;
334 	}
335 
336 	/*
337 	 * vfio kernel module request user space to release allocated
338 	 * resources before device be deleted in kernel, so it can directly
339 	 * call the vfio bus hot-unplug handler to process it.
340 	 */
341 	ret = bus->hot_unplug_handler(device);
342 	if (ret)
343 		RTE_LOG(ERR, EAL,
344 			"Can not handle hot-unplug for device (%s)\n",
345 			device->name);
346 handle_end:
347 	rte_spinlock_unlock(&failure_handle_lock);
348 }
349 
350 /* enable notifier (only enable req now) */
351 static int
352 pci_vfio_enable_notifier(struct rte_pci_device *dev, int vfio_dev_fd)
353 {
354 	int ret;
355 	int fd = -1;
356 
357 	/* set up an eventfd for req notifier */
358 	fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
359 	if (fd < 0) {
360 		RTE_LOG(ERR, EAL, "Cannot set up eventfd, error %i (%s)\n",
361 			errno, strerror(errno));
362 		return -1;
363 	}
364 
365 	dev->vfio_req_intr_handle.fd = fd;
366 	dev->vfio_req_intr_handle.type = RTE_INTR_HANDLE_VFIO_REQ;
367 	dev->vfio_req_intr_handle.vfio_dev_fd = vfio_dev_fd;
368 
369 	ret = rte_intr_callback_register(&dev->vfio_req_intr_handle,
370 					 pci_vfio_req_handler,
371 					 (void *)&dev->device);
372 	if (ret) {
373 		RTE_LOG(ERR, EAL, "Fail to register req notifier handler.\n");
374 		goto error;
375 	}
376 
377 	ret = rte_intr_enable(&dev->vfio_req_intr_handle);
378 	if (ret) {
379 		RTE_LOG(ERR, EAL, "Fail to enable req notifier.\n");
380 		ret = rte_intr_callback_unregister(&dev->vfio_req_intr_handle,
381 						 pci_vfio_req_handler,
382 						 (void *)&dev->device);
383 		if (ret < 0)
384 			RTE_LOG(ERR, EAL,
385 				"Fail to unregister req notifier handler.\n");
386 		goto error;
387 	}
388 
389 	return 0;
390 error:
391 	close(fd);
392 
393 	dev->vfio_req_intr_handle.fd = -1;
394 	dev->vfio_req_intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
395 	dev->vfio_req_intr_handle.vfio_dev_fd = -1;
396 
397 	return -1;
398 }
399 
400 /* disable notifier (only disable req now) */
401 static int
402 pci_vfio_disable_notifier(struct rte_pci_device *dev)
403 {
404 	int ret;
405 
406 	ret = rte_intr_disable(&dev->vfio_req_intr_handle);
407 	if (ret) {
408 		RTE_LOG(ERR, EAL, "fail to disable req notifier.\n");
409 		return -1;
410 	}
411 
412 	ret = rte_intr_callback_unregister(&dev->vfio_req_intr_handle,
413 					   pci_vfio_req_handler,
414 					   (void *)&dev->device);
415 	if (ret < 0) {
416 		RTE_LOG(ERR, EAL,
417 			 "fail to unregister req notifier handler.\n");
418 		return -1;
419 	}
420 
421 	close(dev->vfio_req_intr_handle.fd);
422 
423 	dev->vfio_req_intr_handle.fd = -1;
424 	dev->vfio_req_intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
425 	dev->vfio_req_intr_handle.vfio_dev_fd = -1;
426 
427 	return 0;
428 }
429 #endif
430 
431 static int
432 pci_vfio_is_ioport_bar(int vfio_dev_fd, int bar_index)
433 {
434 	uint32_t ioport_bar;
435 	int ret;
436 
437 	ret = pread64(vfio_dev_fd, &ioport_bar, sizeof(ioport_bar),
438 			  VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX)
439 			  + PCI_BASE_ADDRESS_0 + bar_index*4);
440 	if (ret != sizeof(ioport_bar)) {
441 		RTE_LOG(ERR, EAL, "Cannot read command (%x) from config space!\n",
442 			PCI_BASE_ADDRESS_0 + bar_index*4);
443 		return -1;
444 	}
445 
446 	return (ioport_bar & PCI_BASE_ADDRESS_SPACE_IO) != 0;
447 }
448 
449 static int
450 pci_rte_vfio_setup_device(struct rte_pci_device *dev, int vfio_dev_fd)
451 {
452 	if (pci_vfio_setup_interrupts(dev, vfio_dev_fd) != 0) {
453 		RTE_LOG(ERR, EAL, "Error setting up interrupts!\n");
454 		return -1;
455 	}
456 
457 	if (pci_vfio_enable_bus_memory(vfio_dev_fd)) {
458 		RTE_LOG(ERR, EAL, "Cannot enable bus memory!\n");
459 		return -1;
460 	}
461 
462 	/* set bus mastering for the device */
463 	if (pci_vfio_set_bus_master(vfio_dev_fd, true)) {
464 		RTE_LOG(ERR, EAL, "Cannot set up bus mastering!\n");
465 		return -1;
466 	}
467 
468 	/*
469 	 * Reset the device. If the device is not capable of resetting,
470 	 * then it updates errno as EINVAL.
471 	 */
472 	if (ioctl(vfio_dev_fd, VFIO_DEVICE_RESET) && errno != EINVAL) {
473 		RTE_LOG(ERR, EAL, "Unable to reset device! Error: %d (%s)\n",
474 				errno, strerror(errno));
475 		return -1;
476 	}
477 
478 	return 0;
479 }
480 
481 static int
482 pci_vfio_mmap_bar(int vfio_dev_fd, struct mapped_pci_resource *vfio_res,
483 		int bar_index, int additional_flags)
484 {
485 	struct memreg {
486 		uint64_t offset;
487 		size_t   size;
488 	} memreg[2] = {};
489 	void *bar_addr;
490 	struct pci_msix_table *msix_table = &vfio_res->msix_table;
491 	struct pci_map *bar = &vfio_res->maps[bar_index];
492 
493 	if (bar->size == 0) {
494 		RTE_LOG(DEBUG, EAL, "Bar size is 0, skip BAR%d\n", bar_index);
495 		return 0;
496 	}
497 
498 	if (msix_table->bar_index == bar_index) {
499 		/*
500 		 * VFIO will not let us map the MSI-X table,
501 		 * but we can map around it.
502 		 */
503 		uint32_t table_start = msix_table->offset;
504 		uint32_t table_end = table_start + msix_table->size;
505 		table_end = RTE_ALIGN(table_end, rte_mem_page_size());
506 		table_start = RTE_ALIGN_FLOOR(table_start, rte_mem_page_size());
507 
508 		/* If page-aligned start of MSI-X table is less than the
509 		 * actual MSI-X table start address, reassign to the actual
510 		 * start address.
511 		 */
512 		if (table_start < msix_table->offset)
513 			table_start = msix_table->offset;
514 
515 		if (table_start == 0 && table_end >= bar->size) {
516 			/* Cannot map this BAR */
517 			RTE_LOG(DEBUG, EAL, "Skipping BAR%d\n", bar_index);
518 			bar->size = 0;
519 			bar->addr = 0;
520 			return 0;
521 		}
522 
523 		memreg[0].offset = bar->offset;
524 		memreg[0].size = table_start;
525 		if (bar->size < table_end) {
526 			/*
527 			 * If MSI-X table end is beyond BAR end, don't attempt
528 			 * to perform second mapping.
529 			 */
530 			memreg[1].offset = 0;
531 			memreg[1].size = 0;
532 		} else {
533 			memreg[1].offset = bar->offset + table_end;
534 			memreg[1].size = bar->size - table_end;
535 		}
536 
537 		RTE_LOG(DEBUG, EAL,
538 			"Trying to map BAR%d that contains the MSI-X "
539 			"table. Trying offsets: "
540 			"0x%04" PRIx64 ":0x%04zx, 0x%04" PRIx64 ":0x%04zx\n",
541 			bar_index,
542 			memreg[0].offset, memreg[0].size,
543 			memreg[1].offset, memreg[1].size);
544 	} else {
545 		memreg[0].offset = bar->offset;
546 		memreg[0].size = bar->size;
547 	}
548 
549 	/* reserve the address using an inaccessible mapping */
550 	bar_addr = mmap(bar->addr, bar->size, 0, MAP_PRIVATE |
551 			MAP_ANONYMOUS | additional_flags, -1, 0);
552 	if (bar_addr != MAP_FAILED) {
553 		void *map_addr = NULL;
554 		if (memreg[0].size) {
555 			/* actual map of first part */
556 			map_addr = pci_map_resource(bar_addr, vfio_dev_fd,
557 							memreg[0].offset,
558 							memreg[0].size,
559 							RTE_MAP_FORCE_ADDRESS);
560 		}
561 
562 		/*
563 		 * Regarding "memreg[0].size == 0":
564 		 * If this BAR has MSI-X table, memreg[0].size (the
565 		 * first part or the part before the table) can
566 		 * legitimately be 0 for hardware using vector table
567 		 * offset 0 (i.e. first part does not exist).
568 		 *
569 		 * When memreg[0].size is 0, "mapping the first part"
570 		 * never happens, and map_addr is NULL at this
571 		 * point. So check that mapping has been actually
572 		 * attempted.
573 		 */
574 		/* if there's a second part, try to map it */
575 		if ((map_addr != NULL || memreg[0].size == 0)
576 			&& memreg[1].offset && memreg[1].size) {
577 			void *second_addr = RTE_PTR_ADD(bar_addr,
578 						(uintptr_t)(memreg[1].offset -
579 						bar->offset));
580 			map_addr = pci_map_resource(second_addr,
581 							vfio_dev_fd,
582 							memreg[1].offset,
583 							memreg[1].size,
584 							RTE_MAP_FORCE_ADDRESS);
585 		}
586 
587 		if (map_addr == NULL) {
588 			munmap(bar_addr, bar->size);
589 			bar_addr = MAP_FAILED;
590 			RTE_LOG(ERR, EAL, "Failed to map pci BAR%d\n",
591 					bar_index);
592 			return -1;
593 		}
594 	} else {
595 		RTE_LOG(ERR, EAL,
596 				"Failed to create inaccessible mapping for BAR%d\n",
597 				bar_index);
598 		return -1;
599 	}
600 
601 	bar->addr = bar_addr;
602 	return 0;
603 }
604 
605 /*
606  * region info may contain capability headers, so we need to keep reallocating
607  * the memory until we match allocated memory size with argsz.
608  */
609 static int
610 pci_vfio_get_region_info(int vfio_dev_fd, struct vfio_region_info **info,
611 		int region)
612 {
613 	struct vfio_region_info *ri;
614 	size_t argsz = sizeof(*ri);
615 	int ret;
616 
617 	ri = malloc(sizeof(*ri));
618 	if (ri == NULL) {
619 		RTE_LOG(ERR, EAL, "Cannot allocate memory for region info\n");
620 		return -1;
621 	}
622 again:
623 	memset(ri, 0, argsz);
624 	ri->argsz = argsz;
625 	ri->index = region;
626 
627 	ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ri);
628 	if (ret < 0) {
629 		free(ri);
630 		return ret;
631 	}
632 	if (ri->argsz != argsz) {
633 		struct vfio_region_info *tmp;
634 
635 		argsz = ri->argsz;
636 		tmp = realloc(ri, argsz);
637 
638 		if (tmp == NULL) {
639 			/* realloc failed but the ri is still there */
640 			free(ri);
641 			RTE_LOG(ERR, EAL, "Cannot reallocate memory for region info\n");
642 			return -1;
643 		}
644 		ri = tmp;
645 		goto again;
646 	}
647 	*info = ri;
648 
649 	return 0;
650 }
651 
652 static struct vfio_info_cap_header *
653 pci_vfio_info_cap(struct vfio_region_info *info, int cap)
654 {
655 	struct vfio_info_cap_header *h;
656 	size_t offset;
657 
658 	if ((info->flags & RTE_VFIO_INFO_FLAG_CAPS) == 0) {
659 		/* VFIO info does not advertise capabilities */
660 		return NULL;
661 	}
662 
663 	offset = VFIO_CAP_OFFSET(info);
664 	while (offset != 0) {
665 		h = RTE_PTR_ADD(info, offset);
666 		if (h->id == cap)
667 			return h;
668 		offset = h->next;
669 	}
670 	return NULL;
671 }
672 
673 static int
674 pci_vfio_msix_is_mappable(int vfio_dev_fd, int msix_region)
675 {
676 	struct vfio_region_info *info;
677 	int ret;
678 
679 	ret = pci_vfio_get_region_info(vfio_dev_fd, &info, msix_region);
680 	if (ret < 0)
681 		return -1;
682 
683 	ret = pci_vfio_info_cap(info, RTE_VFIO_CAP_MSIX_MAPPABLE) != NULL;
684 
685 	/* cleanup */
686 	free(info);
687 
688 	return ret;
689 }
690 
691 
692 static int
693 pci_vfio_map_resource_primary(struct rte_pci_device *dev)
694 {
695 	struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
696 	char pci_addr[PATH_MAX] = {0};
697 	int vfio_dev_fd;
698 	struct rte_pci_addr *loc = &dev->addr;
699 	int i, ret;
700 	struct mapped_pci_resource *vfio_res = NULL;
701 	struct mapped_pci_res_list *vfio_res_list =
702 		RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
703 
704 	struct pci_map *maps;
705 
706 	dev->intr_handle.fd = -1;
707 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
708 	dev->vfio_req_intr_handle.fd = -1;
709 #endif
710 
711 	/* store PCI address string */
712 	snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
713 			loc->domain, loc->bus, loc->devid, loc->function);
714 
715 	ret = rte_vfio_setup_device(rte_pci_get_sysfs_path(), pci_addr,
716 					&vfio_dev_fd, &device_info);
717 	if (ret)
718 		return ret;
719 
720 	/* allocate vfio_res and get region info */
721 	vfio_res = rte_zmalloc("VFIO_RES", sizeof(*vfio_res), 0);
722 	if (vfio_res == NULL) {
723 		RTE_LOG(ERR, EAL,
724 			"%s(): cannot store vfio mmap details\n", __func__);
725 		goto err_vfio_dev_fd;
726 	}
727 	memcpy(&vfio_res->pci_addr, &dev->addr, sizeof(vfio_res->pci_addr));
728 
729 	/* get number of registers (up to BAR5) */
730 	vfio_res->nb_maps = RTE_MIN((int) device_info.num_regions,
731 			VFIO_PCI_BAR5_REGION_INDEX + 1);
732 
733 	/* map BARs */
734 	maps = vfio_res->maps;
735 
736 	vfio_res->msix_table.bar_index = -1;
737 	/* get MSI-X BAR, if any (we have to know where it is because we can't
738 	 * easily mmap it when using VFIO)
739 	 */
740 	ret = pci_vfio_get_msix_bar(vfio_dev_fd, &vfio_res->msix_table);
741 	if (ret < 0) {
742 		RTE_LOG(ERR, EAL, "  %s cannot get MSI-X BAR number!\n",
743 				pci_addr);
744 		goto err_vfio_res;
745 	}
746 	/* if we found our MSI-X BAR region, check if we can mmap it */
747 	if (vfio_res->msix_table.bar_index != -1) {
748 		int ret = pci_vfio_msix_is_mappable(vfio_dev_fd,
749 				vfio_res->msix_table.bar_index);
750 		if (ret < 0) {
751 			RTE_LOG(ERR, EAL, "Couldn't check if MSI-X BAR is mappable\n");
752 			goto err_vfio_res;
753 		} else if (ret != 0) {
754 			/* we can map it, so we don't care where it is */
755 			RTE_LOG(DEBUG, EAL, "VFIO reports MSI-X BAR as mappable\n");
756 			vfio_res->msix_table.bar_index = -1;
757 		}
758 	}
759 
760 	for (i = 0; i < vfio_res->nb_maps; i++) {
761 		struct vfio_region_info *reg = NULL;
762 		void *bar_addr;
763 
764 		ret = pci_vfio_get_region_info(vfio_dev_fd, &reg, i);
765 		if (ret < 0) {
766 			RTE_LOG(ERR, EAL, "  %s cannot get device region info "
767 				"error %i (%s)\n", pci_addr, errno,
768 				strerror(errno));
769 			goto err_vfio_res;
770 		}
771 
772 		/* chk for io port region */
773 		ret = pci_vfio_is_ioport_bar(vfio_dev_fd, i);
774 		if (ret < 0) {
775 			free(reg);
776 			goto err_vfio_res;
777 		} else if (ret) {
778 			RTE_LOG(INFO, EAL, "Ignore mapping IO port bar(%d)\n",
779 					i);
780 			free(reg);
781 			continue;
782 		}
783 
784 		/* skip non-mmapable BARs */
785 		if ((reg->flags & VFIO_REGION_INFO_FLAG_MMAP) == 0) {
786 			free(reg);
787 			continue;
788 		}
789 
790 		/* try mapping somewhere close to the end of hugepages */
791 		if (pci_map_addr == NULL)
792 			pci_map_addr = pci_find_max_end_va();
793 
794 		bar_addr = pci_map_addr;
795 		pci_map_addr = RTE_PTR_ADD(bar_addr, (size_t) reg->size);
796 
797 		pci_map_addr = RTE_PTR_ALIGN(pci_map_addr,
798 					sysconf(_SC_PAGE_SIZE));
799 
800 		maps[i].addr = bar_addr;
801 		maps[i].offset = reg->offset;
802 		maps[i].size = reg->size;
803 		maps[i].path = NULL; /* vfio doesn't have per-resource paths */
804 
805 		ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, 0);
806 		if (ret < 0) {
807 			RTE_LOG(ERR, EAL, "  %s mapping BAR%i failed: %s\n",
808 					pci_addr, i, strerror(errno));
809 			free(reg);
810 			goto err_vfio_res;
811 		}
812 
813 		dev->mem_resource[i].addr = maps[i].addr;
814 
815 		free(reg);
816 	}
817 
818 	if (pci_rte_vfio_setup_device(dev, vfio_dev_fd) < 0) {
819 		RTE_LOG(ERR, EAL, "  %s setup device failed\n", pci_addr);
820 		goto err_vfio_res;
821 	}
822 
823 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
824 	if (pci_vfio_enable_notifier(dev, vfio_dev_fd) != 0) {
825 		RTE_LOG(ERR, EAL, "Error setting up notifier!\n");
826 		goto err_vfio_res;
827 	}
828 
829 #endif
830 	TAILQ_INSERT_TAIL(vfio_res_list, vfio_res, next);
831 
832 	return 0;
833 err_vfio_res:
834 	rte_free(vfio_res);
835 err_vfio_dev_fd:
836 	rte_vfio_release_device(rte_pci_get_sysfs_path(),
837 			pci_addr, vfio_dev_fd);
838 	return -1;
839 }
840 
841 static int
842 pci_vfio_map_resource_secondary(struct rte_pci_device *dev)
843 {
844 	struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
845 	char pci_addr[PATH_MAX] = {0};
846 	int vfio_dev_fd;
847 	struct rte_pci_addr *loc = &dev->addr;
848 	int i, ret;
849 	struct mapped_pci_resource *vfio_res = NULL;
850 	struct mapped_pci_res_list *vfio_res_list =
851 		RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
852 
853 	struct pci_map *maps;
854 
855 	dev->intr_handle.fd = -1;
856 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
857 	dev->vfio_req_intr_handle.fd = -1;
858 #endif
859 
860 	/* store PCI address string */
861 	snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
862 			loc->domain, loc->bus, loc->devid, loc->function);
863 
864 	/* if we're in a secondary process, just find our tailq entry */
865 	TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
866 		if (rte_pci_addr_cmp(&vfio_res->pci_addr,
867 						 &dev->addr))
868 			continue;
869 		break;
870 	}
871 	/* if we haven't found our tailq entry, something's wrong */
872 	if (vfio_res == NULL) {
873 		RTE_LOG(ERR, EAL, "  %s cannot find TAILQ entry for PCI device!\n",
874 				pci_addr);
875 		return -1;
876 	}
877 
878 	ret = rte_vfio_setup_device(rte_pci_get_sysfs_path(), pci_addr,
879 					&vfio_dev_fd, &device_info);
880 	if (ret)
881 		return ret;
882 
883 	/* map BARs */
884 	maps = vfio_res->maps;
885 
886 	for (i = 0; i < vfio_res->nb_maps; i++) {
887 		ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, MAP_FIXED);
888 		if (ret < 0) {
889 			RTE_LOG(ERR, EAL, "  %s mapping BAR%i failed: %s\n",
890 					pci_addr, i, strerror(errno));
891 			goto err_vfio_dev_fd;
892 		}
893 
894 		dev->mem_resource[i].addr = maps[i].addr;
895 	}
896 
897 	/* we need save vfio_dev_fd, so it can be used during release */
898 	dev->intr_handle.vfio_dev_fd = vfio_dev_fd;
899 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
900 	dev->vfio_req_intr_handle.vfio_dev_fd = vfio_dev_fd;
901 #endif
902 
903 	return 0;
904 err_vfio_dev_fd:
905 	rte_vfio_release_device(rte_pci_get_sysfs_path(),
906 			pci_addr, vfio_dev_fd);
907 	return -1;
908 }
909 
910 /*
911  * map the PCI resources of a PCI device in virtual memory (VFIO version).
912  * primary and secondary processes follow almost exactly the same path
913  */
914 int
915 pci_vfio_map_resource(struct rte_pci_device *dev)
916 {
917 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
918 		return pci_vfio_map_resource_primary(dev);
919 	else
920 		return pci_vfio_map_resource_secondary(dev);
921 }
922 
923 static struct mapped_pci_resource *
924 find_and_unmap_vfio_resource(struct mapped_pci_res_list *vfio_res_list,
925 			struct rte_pci_device *dev,
926 			const char *pci_addr)
927 {
928 	struct mapped_pci_resource *vfio_res = NULL;
929 	struct pci_map *maps;
930 	int i;
931 
932 	/* Get vfio_res */
933 	TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
934 		if (rte_pci_addr_cmp(&vfio_res->pci_addr, &dev->addr))
935 			continue;
936 		break;
937 	}
938 
939 	if  (vfio_res == NULL)
940 		return vfio_res;
941 
942 	RTE_LOG(INFO, EAL, "Releasing pci mapped resource for %s\n",
943 		pci_addr);
944 
945 	maps = vfio_res->maps;
946 	for (i = 0; i < vfio_res->nb_maps; i++) {
947 
948 		/*
949 		 * We do not need to be aware of MSI-X table BAR mappings as
950 		 * when mapping. Just using current maps array is enough
951 		 */
952 		if (maps[i].addr) {
953 			RTE_LOG(INFO, EAL, "Calling pci_unmap_resource for %s at %p\n",
954 				pci_addr, maps[i].addr);
955 			pci_unmap_resource(maps[i].addr, maps[i].size);
956 		}
957 	}
958 
959 	return vfio_res;
960 }
961 
962 static int
963 pci_vfio_unmap_resource_primary(struct rte_pci_device *dev)
964 {
965 	char pci_addr[PATH_MAX] = {0};
966 	struct rte_pci_addr *loc = &dev->addr;
967 	struct mapped_pci_resource *vfio_res = NULL;
968 	struct mapped_pci_res_list *vfio_res_list;
969 	int ret;
970 
971 	/* store PCI address string */
972 	snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
973 			loc->domain, loc->bus, loc->devid, loc->function);
974 
975 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
976 	ret = pci_vfio_disable_notifier(dev);
977 	if (ret) {
978 		RTE_LOG(ERR, EAL, "fail to disable req notifier.\n");
979 		return -1;
980 	}
981 
982 #endif
983 	if (close(dev->intr_handle.fd) < 0) {
984 		RTE_LOG(INFO, EAL, "Error when closing eventfd file descriptor for %s\n",
985 			pci_addr);
986 		return -1;
987 	}
988 
989 	if (pci_vfio_set_bus_master(dev->intr_handle.vfio_dev_fd, false)) {
990 		RTE_LOG(ERR, EAL, "  %s cannot unset bus mastering for PCI device!\n",
991 				pci_addr);
992 		return -1;
993 	}
994 
995 	ret = rte_vfio_release_device(rte_pci_get_sysfs_path(), pci_addr,
996 				  dev->intr_handle.vfio_dev_fd);
997 	if (ret < 0) {
998 		RTE_LOG(ERR, EAL,
999 			"%s(): cannot release device\n", __func__);
1000 		return ret;
1001 	}
1002 
1003 	vfio_res_list =
1004 		RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
1005 	vfio_res = find_and_unmap_vfio_resource(vfio_res_list, dev, pci_addr);
1006 
1007 	/* if we haven't found our tailq entry, something's wrong */
1008 	if (vfio_res == NULL) {
1009 		RTE_LOG(ERR, EAL, "  %s cannot find TAILQ entry for PCI device!\n",
1010 				pci_addr);
1011 		return -1;
1012 	}
1013 
1014 	TAILQ_REMOVE(vfio_res_list, vfio_res, next);
1015 	rte_free(vfio_res);
1016 	return 0;
1017 }
1018 
1019 static int
1020 pci_vfio_unmap_resource_secondary(struct rte_pci_device *dev)
1021 {
1022 	char pci_addr[PATH_MAX] = {0};
1023 	struct rte_pci_addr *loc = &dev->addr;
1024 	struct mapped_pci_resource *vfio_res = NULL;
1025 	struct mapped_pci_res_list *vfio_res_list;
1026 	int ret;
1027 
1028 	/* store PCI address string */
1029 	snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
1030 			loc->domain, loc->bus, loc->devid, loc->function);
1031 
1032 	ret = rte_vfio_release_device(rte_pci_get_sysfs_path(), pci_addr,
1033 				  dev->intr_handle.vfio_dev_fd);
1034 	if (ret < 0) {
1035 		RTE_LOG(ERR, EAL,
1036 			"%s(): cannot release device\n", __func__);
1037 		return ret;
1038 	}
1039 
1040 	vfio_res_list =
1041 		RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
1042 	vfio_res = find_and_unmap_vfio_resource(vfio_res_list, dev, pci_addr);
1043 
1044 	/* if we haven't found our tailq entry, something's wrong */
1045 	if (vfio_res == NULL) {
1046 		RTE_LOG(ERR, EAL, "  %s cannot find TAILQ entry for PCI device!\n",
1047 				pci_addr);
1048 		return -1;
1049 	}
1050 
1051 	return 0;
1052 }
1053 
1054 int
1055 pci_vfio_unmap_resource(struct rte_pci_device *dev)
1056 {
1057 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1058 		return pci_vfio_unmap_resource_primary(dev);
1059 	else
1060 		return pci_vfio_unmap_resource_secondary(dev);
1061 }
1062 
1063 int
1064 pci_vfio_ioport_map(struct rte_pci_device *dev, int bar,
1065 		    struct rte_pci_ioport *p)
1066 {
1067 	if (bar < VFIO_PCI_BAR0_REGION_INDEX ||
1068 	    bar > VFIO_PCI_BAR5_REGION_INDEX) {
1069 		RTE_LOG(ERR, EAL, "invalid bar (%d)!\n", bar);
1070 		return -1;
1071 	}
1072 
1073 	p->dev = dev;
1074 	p->base = VFIO_GET_REGION_ADDR(bar);
1075 	return 0;
1076 }
1077 
1078 void
1079 pci_vfio_ioport_read(struct rte_pci_ioport *p,
1080 		     void *data, size_t len, off_t offset)
1081 {
1082 	const struct rte_intr_handle *intr_handle = &p->dev->intr_handle;
1083 
1084 	if (pread64(intr_handle->vfio_dev_fd, data,
1085 		    len, p->base + offset) <= 0)
1086 		RTE_LOG(ERR, EAL,
1087 			"Can't read from PCI bar (%" PRIu64 ") : offset (%x)\n",
1088 			VFIO_GET_REGION_IDX(p->base), (int)offset);
1089 }
1090 
1091 void
1092 pci_vfio_ioport_write(struct rte_pci_ioport *p,
1093 		      const void *data, size_t len, off_t offset)
1094 {
1095 	const struct rte_intr_handle *intr_handle = &p->dev->intr_handle;
1096 
1097 	if (pwrite64(intr_handle->vfio_dev_fd, data,
1098 		     len, p->base + offset) <= 0)
1099 		RTE_LOG(ERR, EAL,
1100 			"Can't write to PCI bar (%" PRIu64 ") : offset (%x)\n",
1101 			VFIO_GET_REGION_IDX(p->base), (int)offset);
1102 }
1103 
1104 int
1105 pci_vfio_ioport_unmap(struct rte_pci_ioport *p)
1106 {
1107 	RTE_SET_USED(p);
1108 	return -1;
1109 }
1110 
1111 int
1112 pci_vfio_is_enabled(void)
1113 {
1114 	return rte_vfio_is_enabled("vfio_pci");
1115 }
1116 #endif
1117