xref: /dpdk/drivers/bus/pci/linux/pci_vfio.c (revision 3b6431396afa7e5ac28bc8f461b93a3841f5e62a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <string.h>
6 #include <fcntl.h>
7 #include <linux/pci_regs.h>
8 #include <sys/eventfd.h>
9 #include <sys/socket.h>
10 #include <sys/ioctl.h>
11 #include <sys/mman.h>
12 #include <stdbool.h>
13 
14 #include <rte_log.h>
15 #include <rte_pci.h>
16 #include <rte_bus_pci.h>
17 #include <rte_eal_paging.h>
18 #include <rte_malloc.h>
19 #include <rte_vfio.h>
20 #include <rte_eal.h>
21 #include <rte_bus.h>
22 #include <rte_spinlock.h>
23 #include <rte_tailq.h>
24 
25 #include "eal_filesystem.h"
26 
27 #include "pci_init.h"
28 #include "private.h"
29 
30 /**
31  * @file
32  * PCI probing under linux (VFIO version)
33  *
34  * This code tries to determine if the PCI device is bound to VFIO driver,
35  * and initialize it (map BARs, set up interrupts) if that's the case.
36  *
37  * This file is only compiled if CONFIG_RTE_EAL_VFIO is set to "y".
38  */
39 
40 #ifdef VFIO_PRESENT
41 
42 #ifndef PAGE_SIZE
43 #define PAGE_SIZE   (sysconf(_SC_PAGESIZE))
44 #endif
45 #define PAGE_MASK   (~(PAGE_SIZE - 1))
46 
47 static struct rte_tailq_elem rte_vfio_tailq = {
48 	.name = "VFIO_RESOURCE_LIST",
49 };
50 EAL_REGISTER_TAILQ(rte_vfio_tailq)
51 
52 int
53 pci_vfio_read_config(const struct rte_intr_handle *intr_handle,
54 		    void *buf, size_t len, off_t offs)
55 {
56 	return pread64(intr_handle->vfio_dev_fd, buf, len,
57 	       VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs);
58 }
59 
60 int
61 pci_vfio_write_config(const struct rte_intr_handle *intr_handle,
62 		    const void *buf, size_t len, off_t offs)
63 {
64 	return pwrite64(intr_handle->vfio_dev_fd, buf, len,
65 	       VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs);
66 }
67 
68 /* get PCI BAR number where MSI-X interrupts are */
69 static int
70 pci_vfio_get_msix_bar(int fd, struct pci_msix_table *msix_table)
71 {
72 	int ret;
73 	uint32_t reg;
74 	uint16_t flags;
75 	uint8_t cap_id, cap_offset;
76 
77 	/* read PCI capability pointer from config space */
78 	ret = pread64(fd, &reg, sizeof(reg),
79 			VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
80 			PCI_CAPABILITY_LIST);
81 	if (ret != sizeof(reg)) {
82 		RTE_LOG(ERR, EAL, "Cannot read capability pointer from PCI "
83 				"config space!\n");
84 		return -1;
85 	}
86 
87 	/* we need first byte */
88 	cap_offset = reg & 0xFF;
89 
90 	while (cap_offset) {
91 
92 		/* read PCI capability ID */
93 		ret = pread64(fd, &reg, sizeof(reg),
94 				VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
95 				cap_offset);
96 		if (ret != sizeof(reg)) {
97 			RTE_LOG(ERR, EAL, "Cannot read capability ID from PCI "
98 					"config space!\n");
99 			return -1;
100 		}
101 
102 		/* we need first byte */
103 		cap_id = reg & 0xFF;
104 
105 		/* if we haven't reached MSI-X, check next capability */
106 		if (cap_id != PCI_CAP_ID_MSIX) {
107 			ret = pread64(fd, &reg, sizeof(reg),
108 					VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
109 					cap_offset);
110 			if (ret != sizeof(reg)) {
111 				RTE_LOG(ERR, EAL, "Cannot read capability pointer from PCI "
112 						"config space!\n");
113 				return -1;
114 			}
115 
116 			/* we need second byte */
117 			cap_offset = (reg & 0xFF00) >> 8;
118 
119 			continue;
120 		}
121 		/* else, read table offset */
122 		else {
123 			/* table offset resides in the next 4 bytes */
124 			ret = pread64(fd, &reg, sizeof(reg),
125 					VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
126 					cap_offset + 4);
127 			if (ret != sizeof(reg)) {
128 				RTE_LOG(ERR, EAL, "Cannot read table offset from PCI config "
129 						"space!\n");
130 				return -1;
131 			}
132 
133 			ret = pread64(fd, &flags, sizeof(flags),
134 					VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
135 					cap_offset + 2);
136 			if (ret != sizeof(flags)) {
137 				RTE_LOG(ERR, EAL, "Cannot read table flags from PCI config "
138 						"space!\n");
139 				return -1;
140 			}
141 
142 			msix_table->bar_index = reg & RTE_PCI_MSIX_TABLE_BIR;
143 			msix_table->offset = reg & RTE_PCI_MSIX_TABLE_OFFSET;
144 			msix_table->size =
145 				16 * (1 + (flags & RTE_PCI_MSIX_FLAGS_QSIZE));
146 
147 			return 0;
148 		}
149 	}
150 	return 0;
151 }
152 
153 /* enable PCI bus memory space */
154 static int
155 pci_vfio_enable_bus_memory(int dev_fd)
156 {
157 	uint16_t cmd;
158 	int ret;
159 
160 	ret = pread64(dev_fd, &cmd, sizeof(cmd),
161 		      VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
162 		      PCI_COMMAND);
163 
164 	if (ret != sizeof(cmd)) {
165 		RTE_LOG(ERR, EAL, "Cannot read command from PCI config space!\n");
166 		return -1;
167 	}
168 
169 	if (cmd & PCI_COMMAND_MEMORY)
170 		return 0;
171 
172 	cmd |= PCI_COMMAND_MEMORY;
173 	ret = pwrite64(dev_fd, &cmd, sizeof(cmd),
174 		       VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
175 		       PCI_COMMAND);
176 
177 	if (ret != sizeof(cmd)) {
178 		RTE_LOG(ERR, EAL, "Cannot write command to PCI config space!\n");
179 		return -1;
180 	}
181 
182 	return 0;
183 }
184 
185 /* set PCI bus mastering */
186 static int
187 pci_vfio_set_bus_master(int dev_fd, bool op)
188 {
189 	uint16_t reg;
190 	int ret;
191 
192 	ret = pread64(dev_fd, &reg, sizeof(reg),
193 			VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
194 			PCI_COMMAND);
195 	if (ret != sizeof(reg)) {
196 		RTE_LOG(ERR, EAL, "Cannot read command from PCI config space!\n");
197 		return -1;
198 	}
199 
200 	if (op)
201 		/* set the master bit */
202 		reg |= PCI_COMMAND_MASTER;
203 	else
204 		reg &= ~(PCI_COMMAND_MASTER);
205 
206 	ret = pwrite64(dev_fd, &reg, sizeof(reg),
207 			VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
208 			PCI_COMMAND);
209 
210 	if (ret != sizeof(reg)) {
211 		RTE_LOG(ERR, EAL, "Cannot write command to PCI config space!\n");
212 		return -1;
213 	}
214 
215 	return 0;
216 }
217 
218 /* set up interrupt support (but not enable interrupts) */
219 static int
220 pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd)
221 {
222 	int i, ret, intr_idx;
223 	enum rte_intr_mode intr_mode;
224 
225 	/* default to invalid index */
226 	intr_idx = VFIO_PCI_NUM_IRQS;
227 
228 	/* Get default / configured intr_mode */
229 	intr_mode = rte_eal_vfio_intr_mode();
230 
231 	/* get interrupt type from internal config (MSI-X by default, can be
232 	 * overridden from the command line
233 	 */
234 	switch (intr_mode) {
235 	case RTE_INTR_MODE_MSIX:
236 		intr_idx = VFIO_PCI_MSIX_IRQ_INDEX;
237 		break;
238 	case RTE_INTR_MODE_MSI:
239 		intr_idx = VFIO_PCI_MSI_IRQ_INDEX;
240 		break;
241 	case RTE_INTR_MODE_LEGACY:
242 		intr_idx = VFIO_PCI_INTX_IRQ_INDEX;
243 		break;
244 	/* don't do anything if we want to automatically determine interrupt type */
245 	case RTE_INTR_MODE_NONE:
246 		break;
247 	default:
248 		RTE_LOG(ERR, EAL, "  unknown default interrupt type!\n");
249 		return -1;
250 	}
251 
252 	/* start from MSI-X interrupt type */
253 	for (i = VFIO_PCI_MSIX_IRQ_INDEX; i >= 0; i--) {
254 		struct vfio_irq_info irq = { .argsz = sizeof(irq) };
255 		int fd = -1;
256 
257 		/* skip interrupt modes we don't want */
258 		if (intr_mode != RTE_INTR_MODE_NONE &&
259 				i != intr_idx)
260 			continue;
261 
262 		irq.index = i;
263 
264 		ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
265 		if (ret < 0) {
266 			RTE_LOG(ERR, EAL, "  cannot get IRQ info, "
267 					"error %i (%s)\n", errno, strerror(errno));
268 			return -1;
269 		}
270 
271 		/* if this vector cannot be used with eventfd, fail if we explicitly
272 		 * specified interrupt type, otherwise continue */
273 		if ((irq.flags & VFIO_IRQ_INFO_EVENTFD) == 0) {
274 			if (intr_mode != RTE_INTR_MODE_NONE) {
275 				RTE_LOG(ERR, EAL,
276 						"  interrupt vector does not support eventfd!\n");
277 				return -1;
278 			} else
279 				continue;
280 		}
281 
282 		/* set up an eventfd for interrupts */
283 		fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
284 		if (fd < 0) {
285 			RTE_LOG(ERR, EAL, "  cannot set up eventfd, "
286 					"error %i (%s)\n", errno, strerror(errno));
287 			return -1;
288 		}
289 
290 		dev->intr_handle.fd = fd;
291 		dev->intr_handle.vfio_dev_fd = vfio_dev_fd;
292 
293 		switch (i) {
294 		case VFIO_PCI_MSIX_IRQ_INDEX:
295 			intr_mode = RTE_INTR_MODE_MSIX;
296 			dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSIX;
297 			break;
298 		case VFIO_PCI_MSI_IRQ_INDEX:
299 			intr_mode = RTE_INTR_MODE_MSI;
300 			dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSI;
301 			break;
302 		case VFIO_PCI_INTX_IRQ_INDEX:
303 			intr_mode = RTE_INTR_MODE_LEGACY;
304 			dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_LEGACY;
305 			break;
306 		default:
307 			RTE_LOG(ERR, EAL, "  unknown interrupt type!\n");
308 			return -1;
309 		}
310 
311 		return 0;
312 	}
313 
314 	/* if we're here, we haven't found a suitable interrupt vector */
315 	return -1;
316 }
317 
318 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
319 /*
320  * Spinlock for device hot-unplug failure handling.
321  * If it tries to access bus or device, such as handle sigbus on bus
322  * or handle memory failure for device, just need to use this lock.
323  * It could protect the bus and the device to avoid race condition.
324  */
325 static rte_spinlock_t failure_handle_lock = RTE_SPINLOCK_INITIALIZER;
326 
327 static void
328 pci_vfio_req_handler(void *param)
329 {
330 	struct rte_bus *bus;
331 	int ret;
332 	struct rte_device *device = (struct rte_device *)param;
333 
334 	rte_spinlock_lock(&failure_handle_lock);
335 	bus = rte_bus_find_by_device(device);
336 	if (bus == NULL) {
337 		RTE_LOG(ERR, EAL, "Cannot find bus for device (%s)\n",
338 			device->name);
339 		goto handle_end;
340 	}
341 
342 	/*
343 	 * vfio kernel module request user space to release allocated
344 	 * resources before device be deleted in kernel, so it can directly
345 	 * call the vfio bus hot-unplug handler to process it.
346 	 */
347 	ret = bus->hot_unplug_handler(device);
348 	if (ret)
349 		RTE_LOG(ERR, EAL,
350 			"Can not handle hot-unplug for device (%s)\n",
351 			device->name);
352 handle_end:
353 	rte_spinlock_unlock(&failure_handle_lock);
354 }
355 
356 /* enable notifier (only enable req now) */
357 static int
358 pci_vfio_enable_notifier(struct rte_pci_device *dev, int vfio_dev_fd)
359 {
360 	int ret;
361 	int fd = -1;
362 
363 	/* set up an eventfd for req notifier */
364 	fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
365 	if (fd < 0) {
366 		RTE_LOG(ERR, EAL, "Cannot set up eventfd, error %i (%s)\n",
367 			errno, strerror(errno));
368 		return -1;
369 	}
370 
371 	dev->vfio_req_intr_handle.fd = fd;
372 	dev->vfio_req_intr_handle.type = RTE_INTR_HANDLE_VFIO_REQ;
373 	dev->vfio_req_intr_handle.vfio_dev_fd = vfio_dev_fd;
374 
375 	ret = rte_intr_callback_register(&dev->vfio_req_intr_handle,
376 					 pci_vfio_req_handler,
377 					 (void *)&dev->device);
378 	if (ret) {
379 		RTE_LOG(ERR, EAL, "Fail to register req notifier handler.\n");
380 		goto error;
381 	}
382 
383 	ret = rte_intr_enable(&dev->vfio_req_intr_handle);
384 	if (ret) {
385 		RTE_LOG(ERR, EAL, "Fail to enable req notifier.\n");
386 		ret = rte_intr_callback_unregister(&dev->vfio_req_intr_handle,
387 						 pci_vfio_req_handler,
388 						 (void *)&dev->device);
389 		if (ret < 0)
390 			RTE_LOG(ERR, EAL,
391 				"Fail to unregister req notifier handler.\n");
392 		goto error;
393 	}
394 
395 	return 0;
396 error:
397 	close(fd);
398 
399 	dev->vfio_req_intr_handle.fd = -1;
400 	dev->vfio_req_intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
401 	dev->vfio_req_intr_handle.vfio_dev_fd = -1;
402 
403 	return -1;
404 }
405 
406 /* disable notifier (only disable req now) */
407 static int
408 pci_vfio_disable_notifier(struct rte_pci_device *dev)
409 {
410 	int ret;
411 
412 	ret = rte_intr_disable(&dev->vfio_req_intr_handle);
413 	if (ret) {
414 		RTE_LOG(ERR, EAL, "fail to disable req notifier.\n");
415 		return -1;
416 	}
417 
418 	ret = rte_intr_callback_unregister(&dev->vfio_req_intr_handle,
419 					   pci_vfio_req_handler,
420 					   (void *)&dev->device);
421 	if (ret < 0) {
422 		RTE_LOG(ERR, EAL,
423 			 "fail to unregister req notifier handler.\n");
424 		return -1;
425 	}
426 
427 	close(dev->vfio_req_intr_handle.fd);
428 
429 	dev->vfio_req_intr_handle.fd = -1;
430 	dev->vfio_req_intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
431 	dev->vfio_req_intr_handle.vfio_dev_fd = -1;
432 
433 	return 0;
434 }
435 #endif
436 
437 static int
438 pci_vfio_is_ioport_bar(int vfio_dev_fd, int bar_index)
439 {
440 	uint32_t ioport_bar;
441 	int ret;
442 
443 	ret = pread64(vfio_dev_fd, &ioport_bar, sizeof(ioport_bar),
444 			  VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX)
445 			  + PCI_BASE_ADDRESS_0 + bar_index*4);
446 	if (ret != sizeof(ioport_bar)) {
447 		RTE_LOG(ERR, EAL, "Cannot read command (%x) from config space!\n",
448 			PCI_BASE_ADDRESS_0 + bar_index*4);
449 		return -1;
450 	}
451 
452 	return (ioport_bar & PCI_BASE_ADDRESS_SPACE_IO) != 0;
453 }
454 
455 static int
456 pci_rte_vfio_setup_device(struct rte_pci_device *dev, int vfio_dev_fd)
457 {
458 	if (pci_vfio_setup_interrupts(dev, vfio_dev_fd) != 0) {
459 		RTE_LOG(ERR, EAL, "Error setting up interrupts!\n");
460 		return -1;
461 	}
462 
463 	if (pci_vfio_enable_bus_memory(vfio_dev_fd)) {
464 		RTE_LOG(ERR, EAL, "Cannot enable bus memory!\n");
465 		return -1;
466 	}
467 
468 	/* set bus mastering for the device */
469 	if (pci_vfio_set_bus_master(vfio_dev_fd, true)) {
470 		RTE_LOG(ERR, EAL, "Cannot set up bus mastering!\n");
471 		return -1;
472 	}
473 
474 	/*
475 	 * Reset the device. If the device is not capable of resetting,
476 	 * then it updates errno as EINVAL.
477 	 */
478 	if (ioctl(vfio_dev_fd, VFIO_DEVICE_RESET) && errno != EINVAL) {
479 		RTE_LOG(ERR, EAL, "Unable to reset device! Error: %d (%s)\n",
480 				errno, strerror(errno));
481 		return -1;
482 	}
483 
484 	return 0;
485 }
486 
487 static int
488 pci_vfio_mmap_bar(int vfio_dev_fd, struct mapped_pci_resource *vfio_res,
489 		int bar_index, int additional_flags)
490 {
491 	struct memreg {
492 		uint64_t offset;
493 		size_t   size;
494 	} memreg[2] = {};
495 	void *bar_addr;
496 	struct pci_msix_table *msix_table = &vfio_res->msix_table;
497 	struct pci_map *bar = &vfio_res->maps[bar_index];
498 
499 	if (bar->size == 0) {
500 		RTE_LOG(DEBUG, EAL, "Bar size is 0, skip BAR%d\n", bar_index);
501 		return 0;
502 	}
503 
504 	if (msix_table->bar_index == bar_index) {
505 		/*
506 		 * VFIO will not let us map the MSI-X table,
507 		 * but we can map around it.
508 		 */
509 		uint32_t table_start = msix_table->offset;
510 		uint32_t table_end = table_start + msix_table->size;
511 		table_end = RTE_ALIGN(table_end, PAGE_SIZE);
512 		table_start = RTE_ALIGN_FLOOR(table_start, PAGE_SIZE);
513 
514 		/* If page-aligned start of MSI-X table is less than the
515 		 * actual MSI-X table start address, reassign to the actual
516 		 * start address.
517 		 */
518 		if (table_start < msix_table->offset)
519 			table_start = msix_table->offset;
520 
521 		if (table_start == 0 && table_end >= bar->size) {
522 			/* Cannot map this BAR */
523 			RTE_LOG(DEBUG, EAL, "Skipping BAR%d\n", bar_index);
524 			bar->size = 0;
525 			bar->addr = 0;
526 			return 0;
527 		}
528 
529 		memreg[0].offset = bar->offset;
530 		memreg[0].size = table_start;
531 		if (bar->size < table_end) {
532 			/*
533 			 * If MSI-X table end is beyond BAR end, don't attempt
534 			 * to perform second mapping.
535 			 */
536 			memreg[1].offset = 0;
537 			memreg[1].size = 0;
538 		} else {
539 			memreg[1].offset = bar->offset + table_end;
540 			memreg[1].size = bar->size - table_end;
541 		}
542 
543 		RTE_LOG(DEBUG, EAL,
544 			"Trying to map BAR%d that contains the MSI-X "
545 			"table. Trying offsets: "
546 			"0x%04" PRIx64 ":0x%04zx, 0x%04" PRIx64 ":0x%04zx\n",
547 			bar_index,
548 			memreg[0].offset, memreg[0].size,
549 			memreg[1].offset, memreg[1].size);
550 	} else {
551 		memreg[0].offset = bar->offset;
552 		memreg[0].size = bar->size;
553 	}
554 
555 	/* reserve the address using an inaccessible mapping */
556 	bar_addr = mmap(bar->addr, bar->size, 0, MAP_PRIVATE |
557 			MAP_ANONYMOUS | additional_flags, -1, 0);
558 	if (bar_addr != MAP_FAILED) {
559 		void *map_addr = NULL;
560 		if (memreg[0].size) {
561 			/* actual map of first part */
562 			map_addr = pci_map_resource(bar_addr, vfio_dev_fd,
563 							memreg[0].offset,
564 							memreg[0].size,
565 							RTE_MAP_FORCE_ADDRESS);
566 		}
567 
568 		/* if there's a second part, try to map it */
569 		if (map_addr != NULL
570 			&& memreg[1].offset && memreg[1].size) {
571 			void *second_addr = RTE_PTR_ADD(bar_addr,
572 						(uintptr_t)(memreg[1].offset -
573 						bar->offset));
574 			map_addr = pci_map_resource(second_addr,
575 							vfio_dev_fd,
576 							memreg[1].offset,
577 							memreg[1].size,
578 							RTE_MAP_FORCE_ADDRESS);
579 		}
580 
581 		if (map_addr == NULL) {
582 			munmap(bar_addr, bar->size);
583 			bar_addr = MAP_FAILED;
584 			RTE_LOG(ERR, EAL, "Failed to map pci BAR%d\n",
585 					bar_index);
586 			return -1;
587 		}
588 	} else {
589 		RTE_LOG(ERR, EAL,
590 				"Failed to create inaccessible mapping for BAR%d\n",
591 				bar_index);
592 		return -1;
593 	}
594 
595 	bar->addr = bar_addr;
596 	return 0;
597 }
598 
599 /*
600  * region info may contain capability headers, so we need to keep reallocating
601  * the memory until we match allocated memory size with argsz.
602  */
603 static int
604 pci_vfio_get_region_info(int vfio_dev_fd, struct vfio_region_info **info,
605 		int region)
606 {
607 	struct vfio_region_info *ri;
608 	size_t argsz = sizeof(*ri);
609 	int ret;
610 
611 	ri = malloc(sizeof(*ri));
612 	if (ri == NULL) {
613 		RTE_LOG(ERR, EAL, "Cannot allocate memory for region info\n");
614 		return -1;
615 	}
616 again:
617 	memset(ri, 0, argsz);
618 	ri->argsz = argsz;
619 	ri->index = region;
620 
621 	ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ri);
622 	if (ret < 0) {
623 		free(ri);
624 		return ret;
625 	}
626 	if (ri->argsz != argsz) {
627 		struct vfio_region_info *tmp;
628 
629 		argsz = ri->argsz;
630 		tmp = realloc(ri, argsz);
631 
632 		if (tmp == NULL) {
633 			/* realloc failed but the ri is still there */
634 			free(ri);
635 			RTE_LOG(ERR, EAL, "Cannot reallocate memory for region info\n");
636 			return -1;
637 		}
638 		ri = tmp;
639 		goto again;
640 	}
641 	*info = ri;
642 
643 	return 0;
644 }
645 
646 static struct vfio_info_cap_header *
647 pci_vfio_info_cap(struct vfio_region_info *info, int cap)
648 {
649 	struct vfio_info_cap_header *h;
650 	size_t offset;
651 
652 	if ((info->flags & RTE_VFIO_INFO_FLAG_CAPS) == 0) {
653 		/* VFIO info does not advertise capabilities */
654 		return NULL;
655 	}
656 
657 	offset = VFIO_CAP_OFFSET(info);
658 	while (offset != 0) {
659 		h = RTE_PTR_ADD(info, offset);
660 		if (h->id == cap)
661 			return h;
662 		offset = h->next;
663 	}
664 	return NULL;
665 }
666 
667 static int
668 pci_vfio_msix_is_mappable(int vfio_dev_fd, int msix_region)
669 {
670 	struct vfio_region_info *info;
671 	int ret;
672 
673 	ret = pci_vfio_get_region_info(vfio_dev_fd, &info, msix_region);
674 	if (ret < 0)
675 		return -1;
676 
677 	ret = pci_vfio_info_cap(info, RTE_VFIO_CAP_MSIX_MAPPABLE) != NULL;
678 
679 	/* cleanup */
680 	free(info);
681 
682 	return ret;
683 }
684 
685 
686 static int
687 pci_vfio_map_resource_primary(struct rte_pci_device *dev)
688 {
689 	struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
690 	char pci_addr[PATH_MAX] = {0};
691 	int vfio_dev_fd;
692 	struct rte_pci_addr *loc = &dev->addr;
693 	int i, ret;
694 	struct mapped_pci_resource *vfio_res = NULL;
695 	struct mapped_pci_res_list *vfio_res_list =
696 		RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
697 
698 	struct pci_map *maps;
699 
700 	dev->intr_handle.fd = -1;
701 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
702 	dev->vfio_req_intr_handle.fd = -1;
703 #endif
704 
705 	/* store PCI address string */
706 	snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
707 			loc->domain, loc->bus, loc->devid, loc->function);
708 
709 	ret = rte_vfio_setup_device(rte_pci_get_sysfs_path(), pci_addr,
710 					&vfio_dev_fd, &device_info);
711 	if (ret)
712 		return ret;
713 
714 	/* allocate vfio_res and get region info */
715 	vfio_res = rte_zmalloc("VFIO_RES", sizeof(*vfio_res), 0);
716 	if (vfio_res == NULL) {
717 		RTE_LOG(ERR, EAL,
718 			"%s(): cannot store vfio mmap details\n", __func__);
719 		goto err_vfio_dev_fd;
720 	}
721 	memcpy(&vfio_res->pci_addr, &dev->addr, sizeof(vfio_res->pci_addr));
722 
723 	/* get number of registers (up to BAR5) */
724 	vfio_res->nb_maps = RTE_MIN((int) device_info.num_regions,
725 			VFIO_PCI_BAR5_REGION_INDEX + 1);
726 
727 	/* map BARs */
728 	maps = vfio_res->maps;
729 
730 	vfio_res->msix_table.bar_index = -1;
731 	/* get MSI-X BAR, if any (we have to know where it is because we can't
732 	 * easily mmap it when using VFIO)
733 	 */
734 	ret = pci_vfio_get_msix_bar(vfio_dev_fd, &vfio_res->msix_table);
735 	if (ret < 0) {
736 		RTE_LOG(ERR, EAL, "  %s cannot get MSI-X BAR number!\n",
737 				pci_addr);
738 		goto err_vfio_res;
739 	}
740 	/* if we found our MSI-X BAR region, check if we can mmap it */
741 	if (vfio_res->msix_table.bar_index != -1) {
742 		int ret = pci_vfio_msix_is_mappable(vfio_dev_fd,
743 				vfio_res->msix_table.bar_index);
744 		if (ret < 0) {
745 			RTE_LOG(ERR, EAL, "Couldn't check if MSI-X BAR is mappable\n");
746 			goto err_vfio_res;
747 		} else if (ret != 0) {
748 			/* we can map it, so we don't care where it is */
749 			RTE_LOG(DEBUG, EAL, "VFIO reports MSI-X BAR as mappable\n");
750 			vfio_res->msix_table.bar_index = -1;
751 		}
752 	}
753 
754 	for (i = 0; i < (int) vfio_res->nb_maps; i++) {
755 		struct vfio_region_info *reg = NULL;
756 		void *bar_addr;
757 
758 		ret = pci_vfio_get_region_info(vfio_dev_fd, &reg, i);
759 		if (ret < 0) {
760 			RTE_LOG(ERR, EAL, "  %s cannot get device region info "
761 				"error %i (%s)\n", pci_addr, errno,
762 				strerror(errno));
763 			goto err_vfio_res;
764 		}
765 
766 		/* chk for io port region */
767 		ret = pci_vfio_is_ioport_bar(vfio_dev_fd, i);
768 		if (ret < 0) {
769 			free(reg);
770 			goto err_vfio_res;
771 		} else if (ret) {
772 			RTE_LOG(INFO, EAL, "Ignore mapping IO port bar(%d)\n",
773 					i);
774 			free(reg);
775 			continue;
776 		}
777 
778 		/* skip non-mmapable BARs */
779 		if ((reg->flags & VFIO_REGION_INFO_FLAG_MMAP) == 0) {
780 			free(reg);
781 			continue;
782 		}
783 
784 		/* try mapping somewhere close to the end of hugepages */
785 		if (pci_map_addr == NULL)
786 			pci_map_addr = pci_find_max_end_va();
787 
788 		bar_addr = pci_map_addr;
789 		pci_map_addr = RTE_PTR_ADD(bar_addr, (size_t) reg->size);
790 
791 		pci_map_addr = RTE_PTR_ALIGN(pci_map_addr,
792 					sysconf(_SC_PAGE_SIZE));
793 
794 		maps[i].addr = bar_addr;
795 		maps[i].offset = reg->offset;
796 		maps[i].size = reg->size;
797 		maps[i].path = NULL; /* vfio doesn't have per-resource paths */
798 
799 		ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, 0);
800 		if (ret < 0) {
801 			RTE_LOG(ERR, EAL, "  %s mapping BAR%i failed: %s\n",
802 					pci_addr, i, strerror(errno));
803 			free(reg);
804 			goto err_vfio_res;
805 		}
806 
807 		dev->mem_resource[i].addr = maps[i].addr;
808 
809 		free(reg);
810 	}
811 
812 	if (pci_rte_vfio_setup_device(dev, vfio_dev_fd) < 0) {
813 		RTE_LOG(ERR, EAL, "  %s setup device failed\n", pci_addr);
814 		goto err_vfio_res;
815 	}
816 
817 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
818 	if (pci_vfio_enable_notifier(dev, vfio_dev_fd) != 0) {
819 		RTE_LOG(ERR, EAL, "Error setting up notifier!\n");
820 		goto err_vfio_res;
821 	}
822 
823 #endif
824 	TAILQ_INSERT_TAIL(vfio_res_list, vfio_res, next);
825 
826 	return 0;
827 err_vfio_res:
828 	rte_free(vfio_res);
829 err_vfio_dev_fd:
830 	close(vfio_dev_fd);
831 	return -1;
832 }
833 
834 static int
835 pci_vfio_map_resource_secondary(struct rte_pci_device *dev)
836 {
837 	struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
838 	char pci_addr[PATH_MAX] = {0};
839 	int vfio_dev_fd;
840 	struct rte_pci_addr *loc = &dev->addr;
841 	int i, ret;
842 	struct mapped_pci_resource *vfio_res = NULL;
843 	struct mapped_pci_res_list *vfio_res_list =
844 		RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
845 
846 	struct pci_map *maps;
847 
848 	dev->intr_handle.fd = -1;
849 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
850 	dev->vfio_req_intr_handle.fd = -1;
851 #endif
852 
853 	/* store PCI address string */
854 	snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
855 			loc->domain, loc->bus, loc->devid, loc->function);
856 
857 	/* if we're in a secondary process, just find our tailq entry */
858 	TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
859 		if (rte_pci_addr_cmp(&vfio_res->pci_addr,
860 						 &dev->addr))
861 			continue;
862 		break;
863 	}
864 	/* if we haven't found our tailq entry, something's wrong */
865 	if (vfio_res == NULL) {
866 		RTE_LOG(ERR, EAL, "  %s cannot find TAILQ entry for PCI device!\n",
867 				pci_addr);
868 		return -1;
869 	}
870 
871 	ret = rte_vfio_setup_device(rte_pci_get_sysfs_path(), pci_addr,
872 					&vfio_dev_fd, &device_info);
873 	if (ret)
874 		return ret;
875 
876 	/* map BARs */
877 	maps = vfio_res->maps;
878 
879 	for (i = 0; i < (int) vfio_res->nb_maps; i++) {
880 		ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, MAP_FIXED);
881 		if (ret < 0) {
882 			RTE_LOG(ERR, EAL, "  %s mapping BAR%i failed: %s\n",
883 					pci_addr, i, strerror(errno));
884 			goto err_vfio_dev_fd;
885 		}
886 
887 		dev->mem_resource[i].addr = maps[i].addr;
888 	}
889 
890 	/* we need save vfio_dev_fd, so it can be used during release */
891 	dev->intr_handle.vfio_dev_fd = vfio_dev_fd;
892 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
893 	dev->vfio_req_intr_handle.vfio_dev_fd = vfio_dev_fd;
894 #endif
895 
896 	return 0;
897 err_vfio_dev_fd:
898 	close(vfio_dev_fd);
899 	return -1;
900 }
901 
902 /*
903  * map the PCI resources of a PCI device in virtual memory (VFIO version).
904  * primary and secondary processes follow almost exactly the same path
905  */
906 int
907 pci_vfio_map_resource(struct rte_pci_device *dev)
908 {
909 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
910 		return pci_vfio_map_resource_primary(dev);
911 	else
912 		return pci_vfio_map_resource_secondary(dev);
913 }
914 
915 static struct mapped_pci_resource *
916 find_and_unmap_vfio_resource(struct mapped_pci_res_list *vfio_res_list,
917 			struct rte_pci_device *dev,
918 			const char *pci_addr)
919 {
920 	struct mapped_pci_resource *vfio_res = NULL;
921 	struct pci_map *maps;
922 	int i;
923 
924 	/* Get vfio_res */
925 	TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
926 		if (rte_pci_addr_cmp(&vfio_res->pci_addr, &dev->addr))
927 			continue;
928 		break;
929 	}
930 
931 	if  (vfio_res == NULL)
932 		return vfio_res;
933 
934 	RTE_LOG(INFO, EAL, "Releasing pci mapped resource for %s\n",
935 		pci_addr);
936 
937 	maps = vfio_res->maps;
938 	for (i = 0; i < (int) vfio_res->nb_maps; i++) {
939 
940 		/*
941 		 * We do not need to be aware of MSI-X table BAR mappings as
942 		 * when mapping. Just using current maps array is enough
943 		 */
944 		if (maps[i].addr) {
945 			RTE_LOG(INFO, EAL, "Calling pci_unmap_resource for %s at %p\n",
946 				pci_addr, maps[i].addr);
947 			pci_unmap_resource(maps[i].addr, maps[i].size);
948 		}
949 	}
950 
951 	return vfio_res;
952 }
953 
954 static int
955 pci_vfio_unmap_resource_primary(struct rte_pci_device *dev)
956 {
957 	char pci_addr[PATH_MAX] = {0};
958 	struct rte_pci_addr *loc = &dev->addr;
959 	struct mapped_pci_resource *vfio_res = NULL;
960 	struct mapped_pci_res_list *vfio_res_list;
961 	int ret;
962 
963 	/* store PCI address string */
964 	snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
965 			loc->domain, loc->bus, loc->devid, loc->function);
966 
967 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
968 	ret = pci_vfio_disable_notifier(dev);
969 	if (ret) {
970 		RTE_LOG(ERR, EAL, "fail to disable req notifier.\n");
971 		return -1;
972 	}
973 
974 #endif
975 	if (close(dev->intr_handle.fd) < 0) {
976 		RTE_LOG(INFO, EAL, "Error when closing eventfd file descriptor for %s\n",
977 			pci_addr);
978 		return -1;
979 	}
980 
981 	if (pci_vfio_set_bus_master(dev->intr_handle.vfio_dev_fd, false)) {
982 		RTE_LOG(ERR, EAL, "  %s cannot unset bus mastering for PCI device!\n",
983 				pci_addr);
984 		return -1;
985 	}
986 
987 	ret = rte_vfio_release_device(rte_pci_get_sysfs_path(), pci_addr,
988 				  dev->intr_handle.vfio_dev_fd);
989 	if (ret < 0) {
990 		RTE_LOG(ERR, EAL,
991 			"%s(): cannot release device\n", __func__);
992 		return ret;
993 	}
994 
995 	vfio_res_list =
996 		RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
997 	vfio_res = find_and_unmap_vfio_resource(vfio_res_list, dev, pci_addr);
998 
999 	/* if we haven't found our tailq entry, something's wrong */
1000 	if (vfio_res == NULL) {
1001 		RTE_LOG(ERR, EAL, "  %s cannot find TAILQ entry for PCI device!\n",
1002 				pci_addr);
1003 		return -1;
1004 	}
1005 
1006 	TAILQ_REMOVE(vfio_res_list, vfio_res, next);
1007 
1008 	return 0;
1009 }
1010 
1011 static int
1012 pci_vfio_unmap_resource_secondary(struct rte_pci_device *dev)
1013 {
1014 	char pci_addr[PATH_MAX] = {0};
1015 	struct rte_pci_addr *loc = &dev->addr;
1016 	struct mapped_pci_resource *vfio_res = NULL;
1017 	struct mapped_pci_res_list *vfio_res_list;
1018 	int ret;
1019 
1020 	/* store PCI address string */
1021 	snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
1022 			loc->domain, loc->bus, loc->devid, loc->function);
1023 
1024 	ret = rte_vfio_release_device(rte_pci_get_sysfs_path(), pci_addr,
1025 				  dev->intr_handle.vfio_dev_fd);
1026 	if (ret < 0) {
1027 		RTE_LOG(ERR, EAL,
1028 			"%s(): cannot release device\n", __func__);
1029 		return ret;
1030 	}
1031 
1032 	vfio_res_list =
1033 		RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
1034 	vfio_res = find_and_unmap_vfio_resource(vfio_res_list, dev, pci_addr);
1035 
1036 	/* if we haven't found our tailq entry, something's wrong */
1037 	if (vfio_res == NULL) {
1038 		RTE_LOG(ERR, EAL, "  %s cannot find TAILQ entry for PCI device!\n",
1039 				pci_addr);
1040 		return -1;
1041 	}
1042 
1043 	return 0;
1044 }
1045 
1046 int
1047 pci_vfio_unmap_resource(struct rte_pci_device *dev)
1048 {
1049 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1050 		return pci_vfio_unmap_resource_primary(dev);
1051 	else
1052 		return pci_vfio_unmap_resource_secondary(dev);
1053 }
1054 
1055 int
1056 pci_vfio_ioport_map(struct rte_pci_device *dev, int bar,
1057 		    struct rte_pci_ioport *p)
1058 {
1059 	if (bar < VFIO_PCI_BAR0_REGION_INDEX ||
1060 	    bar > VFIO_PCI_BAR5_REGION_INDEX) {
1061 		RTE_LOG(ERR, EAL, "invalid bar (%d)!\n", bar);
1062 		return -1;
1063 	}
1064 
1065 	p->dev = dev;
1066 	p->base = VFIO_GET_REGION_ADDR(bar);
1067 	return 0;
1068 }
1069 
1070 void
1071 pci_vfio_ioport_read(struct rte_pci_ioport *p,
1072 		     void *data, size_t len, off_t offset)
1073 {
1074 	const struct rte_intr_handle *intr_handle = &p->dev->intr_handle;
1075 
1076 	if (pread64(intr_handle->vfio_dev_fd, data,
1077 		    len, p->base + offset) <= 0)
1078 		RTE_LOG(ERR, EAL,
1079 			"Can't read from PCI bar (%" PRIu64 ") : offset (%x)\n",
1080 			VFIO_GET_REGION_IDX(p->base), (int)offset);
1081 }
1082 
1083 void
1084 pci_vfio_ioport_write(struct rte_pci_ioport *p,
1085 		      const void *data, size_t len, off_t offset)
1086 {
1087 	const struct rte_intr_handle *intr_handle = &p->dev->intr_handle;
1088 
1089 	if (pwrite64(intr_handle->vfio_dev_fd, data,
1090 		     len, p->base + offset) <= 0)
1091 		RTE_LOG(ERR, EAL,
1092 			"Can't write to PCI bar (%" PRIu64 ") : offset (%x)\n",
1093 			VFIO_GET_REGION_IDX(p->base), (int)offset);
1094 }
1095 
1096 int
1097 pci_vfio_ioport_unmap(struct rte_pci_ioport *p)
1098 {
1099 	RTE_SET_USED(p);
1100 	return -1;
1101 }
1102 
1103 int
1104 pci_vfio_is_enabled(void)
1105 {
1106 	return rte_vfio_is_enabled("vfio_pci");
1107 }
1108 #endif
1109