xref: /dpdk/drivers/bus/pci/pci_common.c (revision f5057be340e44f3edc0fe90fa875eb89a4c49b4f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright 2013-2014 6WIND S.A.
4  */
5 
6 #include <string.h>
7 #include <inttypes.h>
8 #include <stdint.h>
9 #include <stdbool.h>
10 #include <stdlib.h>
11 #include <stdio.h>
12 #include <sys/queue.h>
13 #include <rte_errno.h>
14 #include <rte_interrupts.h>
15 #include <rte_log.h>
16 #include <rte_bus.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_per_lcore.h>
20 #include <rte_memory.h>
21 #include <rte_eal.h>
22 #include <rte_eal_paging.h>
23 #include <rte_string_fns.h>
24 #include <rte_common.h>
25 #include <rte_devargs.h>
26 #include <rte_vfio.h>
27 
28 #include "private.h"
29 
30 
31 #define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
32 
33 const char *rte_pci_get_sysfs_path(void)
34 {
35 	const char *path = NULL;
36 
37 #ifdef RTE_EXEC_ENV_LINUX
38 	path = getenv("SYSFS_PCI_DEVICES");
39 	if (path == NULL)
40 		return SYSFS_PCI_DEVICES;
41 #endif
42 
43 	return path;
44 }
45 
46 static struct rte_devargs *
47 pci_devargs_lookup(const struct rte_pci_addr *pci_addr)
48 {
49 	struct rte_devargs *devargs;
50 	struct rte_pci_addr addr;
51 
52 	RTE_EAL_DEVARGS_FOREACH("pci", devargs) {
53 		devargs->bus->parse(devargs->name, &addr);
54 		if (!rte_pci_addr_cmp(pci_addr, &addr))
55 			return devargs;
56 	}
57 	return NULL;
58 }
59 
60 void
61 pci_name_set(struct rte_pci_device *dev)
62 {
63 	struct rte_devargs *devargs;
64 
65 	/* Each device has its internal, canonical name set. */
66 	rte_pci_device_name(&dev->addr,
67 			dev->name, sizeof(dev->name));
68 	devargs = pci_devargs_lookup(&dev->addr);
69 	dev->device.devargs = devargs;
70 	/* In blacklist mode, if the device is not blacklisted, no
71 	 * rte_devargs exists for it.
72 	 */
73 	if (devargs != NULL)
74 		/* If an rte_devargs exists, the generic rte_device uses the
75 		 * given name as its name.
76 		 */
77 		dev->device.name = dev->device.devargs->name;
78 	else
79 		/* Otherwise, it uses the internal, canonical form. */
80 		dev->device.name = dev->name;
81 }
82 
83 /* map a particular resource from a file */
84 void *
85 pci_map_resource(void *requested_addr, int fd, off_t offset, size_t size,
86 		 int additional_flags)
87 {
88 	void *mapaddr;
89 
90 	/* Map the PCI memory resource of device */
91 	mapaddr = rte_mem_map(requested_addr, size,
92 		RTE_PROT_READ | RTE_PROT_WRITE,
93 		RTE_MAP_SHARED | additional_flags, fd, offset);
94 	if (mapaddr == NULL) {
95 		RTE_LOG(ERR, EAL,
96 			"%s(): cannot map resource(%d, %p, 0x%zx, 0x%llx): %s (%p)\n",
97 			__func__, fd, requested_addr, size,
98 			(unsigned long long)offset,
99 			rte_strerror(rte_errno), mapaddr);
100 	} else
101 		RTE_LOG(DEBUG, EAL, "  PCI memory mapped at %p\n", mapaddr);
102 
103 	return mapaddr;
104 }
105 
106 /* unmap a particular resource */
107 void
108 pci_unmap_resource(void *requested_addr, size_t size)
109 {
110 	if (requested_addr == NULL)
111 		return;
112 
113 	/* Unmap the PCI memory resource of device */
114 	if (rte_mem_unmap(requested_addr, size)) {
115 		RTE_LOG(ERR, EAL, "%s(): cannot mem unmap(%p, %#zx): %s\n",
116 			__func__, requested_addr, size,
117 			rte_strerror(rte_errno));
118 	} else
119 		RTE_LOG(DEBUG, EAL, "  PCI memory unmapped at %p\n",
120 				requested_addr);
121 }
122 /*
123  * Match the PCI Driver and Device using the ID Table
124  */
125 int
126 rte_pci_match(const struct rte_pci_driver *pci_drv,
127 	      const struct rte_pci_device *pci_dev)
128 {
129 	const struct rte_pci_id *id_table;
130 
131 	for (id_table = pci_drv->id_table; id_table->vendor_id != 0;
132 	     id_table++) {
133 		/* check if device's identifiers match the driver's ones */
134 		if (id_table->vendor_id != pci_dev->id.vendor_id &&
135 				id_table->vendor_id != PCI_ANY_ID)
136 			continue;
137 		if (id_table->device_id != pci_dev->id.device_id &&
138 				id_table->device_id != PCI_ANY_ID)
139 			continue;
140 		if (id_table->subsystem_vendor_id !=
141 		    pci_dev->id.subsystem_vendor_id &&
142 		    id_table->subsystem_vendor_id != PCI_ANY_ID)
143 			continue;
144 		if (id_table->subsystem_device_id !=
145 		    pci_dev->id.subsystem_device_id &&
146 		    id_table->subsystem_device_id != PCI_ANY_ID)
147 			continue;
148 		if (id_table->class_id != pci_dev->id.class_id &&
149 				id_table->class_id != RTE_CLASS_ANY_ID)
150 			continue;
151 
152 		return 1;
153 	}
154 
155 	return 0;
156 }
157 
158 /*
159  * If vendor/device ID match, call the probe() function of the
160  * driver.
161  */
162 static int
163 rte_pci_probe_one_driver(struct rte_pci_driver *dr,
164 			 struct rte_pci_device *dev)
165 {
166 	int ret;
167 	bool already_probed;
168 	struct rte_pci_addr *loc;
169 
170 	if ((dr == NULL) || (dev == NULL))
171 		return -EINVAL;
172 
173 	loc = &dev->addr;
174 
175 	/* The device is not blacklisted; Check if driver supports it */
176 	if (!rte_pci_match(dr, dev))
177 		/* Match of device and driver failed */
178 		return 1;
179 
180 	RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
181 			loc->domain, loc->bus, loc->devid, loc->function,
182 			dev->device.numa_node);
183 
184 	/* no initialization when blacklisted, return without error */
185 	if (dev->device.devargs != NULL &&
186 		dev->device.devargs->policy ==
187 			RTE_DEV_BLACKLISTED) {
188 		RTE_LOG(INFO, EAL, "  Device is blacklisted, not"
189 			" initializing\n");
190 		return 1;
191 	}
192 
193 	if (dev->device.numa_node < 0) {
194 		RTE_LOG(WARNING, EAL, "  Invalid NUMA socket, default to 0\n");
195 		dev->device.numa_node = 0;
196 	}
197 
198 	already_probed = rte_dev_is_probed(&dev->device);
199 	if (already_probed && !(dr->drv_flags & RTE_PCI_DRV_PROBE_AGAIN)) {
200 		RTE_LOG(DEBUG, EAL, "Device %s is already probed\n",
201 				dev->device.name);
202 		return -EEXIST;
203 	}
204 
205 	RTE_LOG(DEBUG, EAL, "  probe driver: %x:%x %s\n", dev->id.vendor_id,
206 		dev->id.device_id, dr->driver.name);
207 
208 	/*
209 	 * reference driver structure
210 	 * This needs to be before rte_pci_map_device(), as it enables to use
211 	 * driver flags for adjusting configuration.
212 	 */
213 	if (!already_probed) {
214 		enum rte_iova_mode dev_iova_mode;
215 		enum rte_iova_mode iova_mode;
216 
217 		dev_iova_mode = pci_device_iova_mode(dr, dev);
218 		iova_mode = rte_eal_iova_mode();
219 		if (dev_iova_mode != RTE_IOVA_DC &&
220 		    dev_iova_mode != iova_mode) {
221 			RTE_LOG(ERR, EAL, "  Expecting '%s' IOVA mode but current mode is '%s', not initializing\n",
222 				dev_iova_mode == RTE_IOVA_PA ? "PA" : "VA",
223 				iova_mode == RTE_IOVA_PA ? "PA" : "VA");
224 			return -EINVAL;
225 		}
226 
227 		dev->driver = dr;
228 	}
229 
230 	if (!already_probed && (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING)) {
231 		/* map resources for devices that use igb_uio */
232 		ret = rte_pci_map_device(dev);
233 		if (ret != 0) {
234 			dev->driver = NULL;
235 			return ret;
236 		}
237 	}
238 
239 	RTE_LOG(INFO, EAL, "Probe PCI driver: %s (%x:%x) device: "PCI_PRI_FMT" (socket %i)\n",
240 			dr->driver.name, dev->id.vendor_id, dev->id.device_id,
241 			loc->domain, loc->bus, loc->devid, loc->function,
242 			dev->device.numa_node);
243 	/* call the driver probe() function */
244 	ret = dr->probe(dr, dev);
245 	if (already_probed)
246 		return ret; /* no rollback if already succeeded earlier */
247 	if (ret) {
248 		dev->driver = NULL;
249 		if ((dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) &&
250 			/* Don't unmap if device is unsupported and
251 			 * driver needs mapped resources.
252 			 */
253 			!(ret > 0 &&
254 				(dr->drv_flags & RTE_PCI_DRV_KEEP_MAPPED_RES)))
255 			rte_pci_unmap_device(dev);
256 	} else {
257 		dev->device.driver = &dr->driver;
258 	}
259 
260 	return ret;
261 }
262 
263 /*
264  * If vendor/device ID match, call the remove() function of the
265  * driver.
266  */
267 static int
268 rte_pci_detach_dev(struct rte_pci_device *dev)
269 {
270 	struct rte_pci_addr *loc;
271 	struct rte_pci_driver *dr;
272 	int ret = 0;
273 
274 	if (dev == NULL)
275 		return -EINVAL;
276 
277 	dr = dev->driver;
278 	loc = &dev->addr;
279 
280 	RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
281 			loc->domain, loc->bus, loc->devid,
282 			loc->function, dev->device.numa_node);
283 
284 	RTE_LOG(DEBUG, EAL, "  remove driver: %x:%x %s\n", dev->id.vendor_id,
285 			dev->id.device_id, dr->driver.name);
286 
287 	if (dr->remove) {
288 		ret = dr->remove(dev);
289 		if (ret < 0)
290 			return ret;
291 	}
292 
293 	/* clear driver structure */
294 	dev->driver = NULL;
295 	dev->device.driver = NULL;
296 
297 	if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING)
298 		/* unmap resources for devices that use igb_uio */
299 		rte_pci_unmap_device(dev);
300 
301 	return 0;
302 }
303 
304 /*
305  * If vendor/device ID match, call the probe() function of all
306  * registered driver for the given device. Return < 0 if initialization
307  * failed, return 1 if no driver is found for this device.
308  */
309 static int
310 pci_probe_all_drivers(struct rte_pci_device *dev)
311 {
312 	struct rte_pci_driver *dr = NULL;
313 	int rc = 0;
314 
315 	if (dev == NULL)
316 		return -EINVAL;
317 
318 	FOREACH_DRIVER_ON_PCIBUS(dr) {
319 		rc = rte_pci_probe_one_driver(dr, dev);
320 		if (rc < 0)
321 			/* negative value is an error */
322 			return rc;
323 		if (rc > 0)
324 			/* positive value means driver doesn't support it */
325 			continue;
326 		return 0;
327 	}
328 	return 1;
329 }
330 
331 /*
332  * Scan the content of the PCI bus, and call the probe() function for
333  * all registered drivers that have a matching entry in its id_table
334  * for discovered devices.
335  */
336 static int
337 pci_probe(void)
338 {
339 	struct rte_pci_device *dev = NULL;
340 	size_t probed = 0, failed = 0;
341 	int ret = 0;
342 
343 	FOREACH_DEVICE_ON_PCIBUS(dev) {
344 		probed++;
345 
346 		ret = pci_probe_all_drivers(dev);
347 		if (ret < 0) {
348 			if (ret != -EEXIST) {
349 				RTE_LOG(ERR, EAL, "Requested device "
350 					PCI_PRI_FMT " cannot be used\n",
351 					dev->addr.domain, dev->addr.bus,
352 					dev->addr.devid, dev->addr.function);
353 				rte_errno = errno;
354 				failed++;
355 			}
356 			ret = 0;
357 		}
358 	}
359 
360 	return (probed && probed == failed) ? -1 : 0;
361 }
362 
363 /* dump one device */
364 static int
365 pci_dump_one_device(FILE *f, struct rte_pci_device *dev)
366 {
367 	int i;
368 
369 	fprintf(f, PCI_PRI_FMT, dev->addr.domain, dev->addr.bus,
370 	       dev->addr.devid, dev->addr.function);
371 	fprintf(f, " - vendor:%x device:%x\n", dev->id.vendor_id,
372 	       dev->id.device_id);
373 
374 	for (i = 0; i != sizeof(dev->mem_resource) /
375 		sizeof(dev->mem_resource[0]); i++) {
376 		fprintf(f, "   %16.16"PRIx64" %16.16"PRIx64"\n",
377 			dev->mem_resource[i].phys_addr,
378 			dev->mem_resource[i].len);
379 	}
380 	return 0;
381 }
382 
383 /* dump devices on the bus */
384 void
385 rte_pci_dump(FILE *f)
386 {
387 	struct rte_pci_device *dev = NULL;
388 
389 	FOREACH_DEVICE_ON_PCIBUS(dev) {
390 		pci_dump_one_device(f, dev);
391 	}
392 }
393 
394 static int
395 pci_parse(const char *name, void *addr)
396 {
397 	struct rte_pci_addr *out = addr;
398 	struct rte_pci_addr pci_addr;
399 	bool parse;
400 
401 	parse = (rte_pci_addr_parse(name, &pci_addr) == 0);
402 	if (parse && addr != NULL)
403 		*out = pci_addr;
404 	return parse == false;
405 }
406 
407 /* register a driver */
408 void
409 rte_pci_register(struct rte_pci_driver *driver)
410 {
411 	TAILQ_INSERT_TAIL(&rte_pci_bus.driver_list, driver, next);
412 	driver->bus = &rte_pci_bus;
413 }
414 
415 /* unregister a driver */
416 void
417 rte_pci_unregister(struct rte_pci_driver *driver)
418 {
419 	TAILQ_REMOVE(&rte_pci_bus.driver_list, driver, next);
420 	driver->bus = NULL;
421 }
422 
423 /* Add a device to PCI bus */
424 void
425 rte_pci_add_device(struct rte_pci_device *pci_dev)
426 {
427 	TAILQ_INSERT_TAIL(&rte_pci_bus.device_list, pci_dev, next);
428 }
429 
430 /* Insert a device into a predefined position in PCI bus */
431 void
432 rte_pci_insert_device(struct rte_pci_device *exist_pci_dev,
433 		      struct rte_pci_device *new_pci_dev)
434 {
435 	TAILQ_INSERT_BEFORE(exist_pci_dev, new_pci_dev, next);
436 }
437 
438 /* Remove a device from PCI bus */
439 static void
440 rte_pci_remove_device(struct rte_pci_device *pci_dev)
441 {
442 	TAILQ_REMOVE(&rte_pci_bus.device_list, pci_dev, next);
443 }
444 
445 static struct rte_device *
446 pci_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
447 		const void *data)
448 {
449 	const struct rte_pci_device *pstart;
450 	struct rte_pci_device *pdev;
451 
452 	if (start != NULL) {
453 		pstart = RTE_DEV_TO_PCI_CONST(start);
454 		pdev = TAILQ_NEXT(pstart, next);
455 	} else {
456 		pdev = TAILQ_FIRST(&rte_pci_bus.device_list);
457 	}
458 	while (pdev != NULL) {
459 		if (cmp(&pdev->device, data) == 0)
460 			return &pdev->device;
461 		pdev = TAILQ_NEXT(pdev, next);
462 	}
463 	return NULL;
464 }
465 
466 /*
467  * find the device which encounter the failure, by iterate over all device on
468  * PCI bus to check if the memory failure address is located in the range
469  * of the BARs of the device.
470  */
471 static struct rte_pci_device *
472 pci_find_device_by_addr(const void *failure_addr)
473 {
474 	struct rte_pci_device *pdev = NULL;
475 	uint64_t check_point, start, end, len;
476 	int i;
477 
478 	check_point = (uint64_t)(uintptr_t)failure_addr;
479 
480 	FOREACH_DEVICE_ON_PCIBUS(pdev) {
481 		for (i = 0; i != RTE_DIM(pdev->mem_resource); i++) {
482 			start = (uint64_t)(uintptr_t)pdev->mem_resource[i].addr;
483 			len = pdev->mem_resource[i].len;
484 			end = start + len;
485 			if (check_point >= start && check_point < end) {
486 				RTE_LOG(DEBUG, EAL, "Failure address %16.16"
487 					PRIx64" belongs to device %s!\n",
488 					check_point, pdev->device.name);
489 				return pdev;
490 			}
491 		}
492 	}
493 	return NULL;
494 }
495 
496 static int
497 pci_hot_unplug_handler(struct rte_device *dev)
498 {
499 	struct rte_pci_device *pdev = NULL;
500 	int ret = 0;
501 
502 	pdev = RTE_DEV_TO_PCI(dev);
503 	if (!pdev)
504 		return -1;
505 
506 	switch (pdev->kdrv) {
507 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
508 	case RTE_PCI_KDRV_VFIO:
509 		/*
510 		 * vfio kernel module guaranty the pci device would not be
511 		 * deleted until the user space release the resource, so no
512 		 * need to remap BARs resource here, just directly notify
513 		 * the req event to the user space to handle it.
514 		 */
515 		rte_dev_event_callback_process(dev->name,
516 					       RTE_DEV_EVENT_REMOVE);
517 		break;
518 #endif
519 	case RTE_PCI_KDRV_IGB_UIO:
520 	case RTE_PCI_KDRV_UIO_GENERIC:
521 	case RTE_PCI_KDRV_NIC_UIO:
522 		/* BARs resource is invalid, remap it to be safe. */
523 		ret = pci_uio_remap_resource(pdev);
524 		break;
525 	default:
526 		RTE_LOG(DEBUG, EAL,
527 			"Not managed by a supported kernel driver, skipped\n");
528 		ret = -1;
529 		break;
530 	}
531 
532 	return ret;
533 }
534 
535 static int
536 pci_sigbus_handler(const void *failure_addr)
537 {
538 	struct rte_pci_device *pdev = NULL;
539 	int ret = 0;
540 
541 	pdev = pci_find_device_by_addr(failure_addr);
542 	if (!pdev) {
543 		/* It is a generic sigbus error, no bus would handle it. */
544 		ret = 1;
545 	} else {
546 		/* The sigbus error is caused of hot-unplug. */
547 		ret = pci_hot_unplug_handler(&pdev->device);
548 		if (ret) {
549 			RTE_LOG(ERR, EAL,
550 				"Failed to handle hot-unplug for device %s",
551 				pdev->name);
552 			ret = -1;
553 		}
554 	}
555 	return ret;
556 }
557 
558 static int
559 pci_plug(struct rte_device *dev)
560 {
561 	return pci_probe_all_drivers(RTE_DEV_TO_PCI(dev));
562 }
563 
564 static int
565 pci_unplug(struct rte_device *dev)
566 {
567 	struct rte_pci_device *pdev;
568 	int ret;
569 
570 	pdev = RTE_DEV_TO_PCI(dev);
571 	ret = rte_pci_detach_dev(pdev);
572 	if (ret == 0) {
573 		rte_pci_remove_device(pdev);
574 		rte_devargs_remove(dev->devargs);
575 		free(pdev);
576 	}
577 	return ret;
578 }
579 
580 static int
581 pci_dma_map(struct rte_device *dev, void *addr, uint64_t iova, size_t len)
582 {
583 	struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev);
584 
585 	if (!pdev || !pdev->driver) {
586 		rte_errno = EINVAL;
587 		return -1;
588 	}
589 	if (pdev->driver->dma_map)
590 		return pdev->driver->dma_map(pdev, addr, iova, len);
591 	/**
592 	 *  In case driver don't provides any specific mapping
593 	 *  try fallback to VFIO.
594 	 */
595 	if (pdev->kdrv == RTE_PCI_KDRV_VFIO)
596 		return rte_vfio_container_dma_map
597 				(RTE_VFIO_DEFAULT_CONTAINER_FD, (uintptr_t)addr,
598 				 iova, len);
599 	rte_errno = ENOTSUP;
600 	return -1;
601 }
602 
603 static int
604 pci_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova, size_t len)
605 {
606 	struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev);
607 
608 	if (!pdev || !pdev->driver) {
609 		rte_errno = EINVAL;
610 		return -1;
611 	}
612 	if (pdev->driver->dma_unmap)
613 		return pdev->driver->dma_unmap(pdev, addr, iova, len);
614 	/**
615 	 *  In case driver don't provides any specific mapping
616 	 *  try fallback to VFIO.
617 	 */
618 	if (pdev->kdrv == RTE_PCI_KDRV_VFIO)
619 		return rte_vfio_container_dma_unmap
620 				(RTE_VFIO_DEFAULT_CONTAINER_FD, (uintptr_t)addr,
621 				 iova, len);
622 	rte_errno = ENOTSUP;
623 	return -1;
624 }
625 
626 bool
627 rte_pci_ignore_device(const struct rte_pci_addr *pci_addr)
628 {
629 	struct rte_devargs *devargs = pci_devargs_lookup(pci_addr);
630 
631 	switch (rte_pci_bus.bus.conf.scan_mode) {
632 	case RTE_BUS_SCAN_WHITELIST:
633 		if (devargs && devargs->policy == RTE_DEV_WHITELISTED)
634 			return false;
635 		break;
636 	case RTE_BUS_SCAN_UNDEFINED:
637 	case RTE_BUS_SCAN_BLACKLIST:
638 		if (devargs == NULL ||
639 		    devargs->policy != RTE_DEV_BLACKLISTED)
640 			return false;
641 		break;
642 	}
643 	return true;
644 }
645 
646 enum rte_iova_mode
647 rte_pci_get_iommu_class(void)
648 {
649 	enum rte_iova_mode iova_mode = RTE_IOVA_DC;
650 	const struct rte_pci_device *dev;
651 	const struct rte_pci_driver *drv;
652 	bool devices_want_va = false;
653 	bool devices_want_pa = false;
654 	int iommu_no_va = -1;
655 
656 	FOREACH_DEVICE_ON_PCIBUS(dev) {
657 		/*
658 		 * We can check this only once, because the IOMMU hardware is
659 		 * the same for all of them.
660 		 */
661 		if (iommu_no_va == -1)
662 			iommu_no_va = pci_device_iommu_support_va(dev)
663 					? 0 : 1;
664 
665 		if (dev->kdrv == RTE_PCI_KDRV_UNKNOWN ||
666 		    dev->kdrv == RTE_PCI_KDRV_NONE)
667 			continue;
668 		FOREACH_DRIVER_ON_PCIBUS(drv) {
669 			enum rte_iova_mode dev_iova_mode;
670 
671 			if (!rte_pci_match(drv, dev))
672 				continue;
673 
674 			dev_iova_mode = pci_device_iova_mode(drv, dev);
675 			RTE_LOG(DEBUG, EAL, "PCI driver %s for device "
676 				PCI_PRI_FMT " wants IOVA as '%s'\n",
677 				drv->driver.name,
678 				dev->addr.domain, dev->addr.bus,
679 				dev->addr.devid, dev->addr.function,
680 				dev_iova_mode == RTE_IOVA_DC ? "DC" :
681 				(dev_iova_mode == RTE_IOVA_PA ? "PA" : "VA"));
682 			if (dev_iova_mode == RTE_IOVA_PA)
683 				devices_want_pa = true;
684 			else if (dev_iova_mode == RTE_IOVA_VA)
685 				devices_want_va = true;
686 		}
687 	}
688 	if (iommu_no_va == 1) {
689 		iova_mode = RTE_IOVA_PA;
690 		if (devices_want_va) {
691 			RTE_LOG(WARNING, EAL, "Some devices want 'VA' but IOMMU does not support 'VA'.\n");
692 			RTE_LOG(WARNING, EAL, "The devices that want 'VA' won't initialize.\n");
693 		}
694 	} else if (devices_want_va && !devices_want_pa) {
695 		iova_mode = RTE_IOVA_VA;
696 	} else if (devices_want_pa && !devices_want_va) {
697 		iova_mode = RTE_IOVA_PA;
698 	} else {
699 		iova_mode = RTE_IOVA_DC;
700 		if (devices_want_va) {
701 			RTE_LOG(WARNING, EAL, "Some devices want 'VA' but forcing 'DC' because other devices want 'PA'.\n");
702 			RTE_LOG(WARNING, EAL, "Depending on the final decision by the EAL, not all devices may be able to initialize.\n");
703 		}
704 	}
705 	return iova_mode;
706 }
707 
708 off_t
709 rte_pci_find_ext_capability(struct rte_pci_device *dev, uint32_t cap)
710 {
711 	off_t offset = RTE_PCI_CFG_SPACE_SIZE;
712 	uint32_t header;
713 	int ttl;
714 
715 	/* minimum 8 bytes per capability */
716 	ttl = (RTE_PCI_CFG_SPACE_EXP_SIZE - RTE_PCI_CFG_SPACE_SIZE) / 8;
717 
718 	if (rte_pci_read_config(dev, &header, 4, offset) < 0) {
719 		RTE_LOG(ERR, EAL, "error in reading extended capabilities\n");
720 		return -1;
721 	}
722 
723 	/*
724 	 * If we have no capabilities, this is indicated by cap ID,
725 	 * cap version and next pointer all being 0.
726 	 */
727 	if (header == 0)
728 		return 0;
729 
730 	while (ttl != 0) {
731 		if (RTE_PCI_EXT_CAP_ID(header) == cap)
732 			return offset;
733 
734 		offset = RTE_PCI_EXT_CAP_NEXT(header);
735 
736 		if (offset < RTE_PCI_CFG_SPACE_SIZE)
737 			break;
738 
739 		if (rte_pci_read_config(dev, &header, 4, offset) < 0) {
740 			RTE_LOG(ERR, EAL,
741 				"error in reading extended capabilities\n");
742 			return -1;
743 		}
744 
745 		ttl--;
746 	}
747 
748 	return 0;
749 }
750 
751 struct rte_pci_bus rte_pci_bus = {
752 	.bus = {
753 		.scan = rte_pci_scan,
754 		.probe = pci_probe,
755 		.find_device = pci_find_device,
756 		.plug = pci_plug,
757 		.unplug = pci_unplug,
758 		.parse = pci_parse,
759 		.dma_map = pci_dma_map,
760 		.dma_unmap = pci_dma_unmap,
761 		.get_iommu_class = rte_pci_get_iommu_class,
762 		.dev_iterate = rte_pci_dev_iterate,
763 		.hot_unplug_handler = pci_hot_unplug_handler,
764 		.sigbus_handler = pci_sigbus_handler,
765 	},
766 	.device_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.device_list),
767 	.driver_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.driver_list),
768 };
769 
770 RTE_REGISTER_BUS(pci, rte_pci_bus.bus);
771