1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright 2013-2014 6WIND S.A.
4 */
5
6 #include <string.h>
7 #include <inttypes.h>
8 #include <stdint.h>
9 #include <stdbool.h>
10 #include <stdlib.h>
11 #include <stdio.h>
12 #include <sys/queue.h>
13 #include <rte_errno.h>
14 #include <rte_interrupts.h>
15 #include <rte_log.h>
16 #include <bus_driver.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_lcore.h>
20 #include <rte_per_lcore.h>
21 #include <rte_memory.h>
22 #include <rte_eal.h>
23 #include <rte_eal_paging.h>
24 #include <rte_string_fns.h>
25 #include <rte_common.h>
26 #include <rte_devargs.h>
27 #include <rte_vfio.h>
28 #include <rte_tailq.h>
29
30 #include "private.h"
31
32
33 #define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
34
rte_pci_get_sysfs_path(void)35 const char *rte_pci_get_sysfs_path(void)
36 {
37 const char *path = NULL;
38
39 #ifdef RTE_EXEC_ENV_LINUX
40 path = getenv("SYSFS_PCI_DEVICES");
41 if (path == NULL)
42 return SYSFS_PCI_DEVICES;
43 #endif
44
45 return path;
46 }
47
48 #ifdef RTE_EXEC_ENV_WINDOWS
49 #define asprintf pci_asprintf
50
51 static int
52 __rte_format_printf(2, 3)
pci_asprintf(char ** buffer,const char * format,...)53 pci_asprintf(char **buffer, const char *format, ...)
54 {
55 int size, ret;
56 va_list arg;
57
58 va_start(arg, format);
59 size = vsnprintf(NULL, 0, format, arg);
60 va_end(arg);
61 if (size < 0)
62 return -1;
63 size++;
64
65 *buffer = malloc(size);
66 if (*buffer == NULL)
67 return -1;
68
69 va_start(arg, format);
70 ret = vsnprintf(*buffer, size, format, arg);
71 va_end(arg);
72 if (ret != size - 1) {
73 free(*buffer);
74 return -1;
75 }
76 return ret;
77 }
78 #endif /* RTE_EXEC_ENV_WINDOWS */
79
80 static struct rte_devargs *
pci_devargs_lookup(const struct rte_pci_addr * pci_addr)81 pci_devargs_lookup(const struct rte_pci_addr *pci_addr)
82 {
83 struct rte_devargs *devargs;
84 struct rte_pci_addr addr;
85
86 RTE_EAL_DEVARGS_FOREACH("pci", devargs) {
87 devargs->bus->parse(devargs->name, &addr);
88 if (!rte_pci_addr_cmp(pci_addr, &addr))
89 return devargs;
90 }
91 return NULL;
92 }
93
94 void
pci_common_set(struct rte_pci_device * dev)95 pci_common_set(struct rte_pci_device *dev)
96 {
97 struct rte_devargs *devargs;
98
99 /* Each device has its internal, canonical name set. */
100 rte_pci_device_name(&dev->addr,
101 dev->name, sizeof(dev->name));
102 devargs = pci_devargs_lookup(&dev->addr);
103 dev->device.devargs = devargs;
104
105 /* When using a blocklist, only blocked devices will have
106 * an rte_devargs. Allowed devices won't have one.
107 */
108 if (devargs != NULL)
109 /* If an rte_devargs exists, the generic rte_device uses the
110 * given name as its name.
111 */
112 dev->device.name = dev->device.devargs->name;
113 else
114 /* Otherwise, it uses the internal, canonical form. */
115 dev->device.name = dev->name;
116
117 if (dev->bus_info != NULL ||
118 asprintf(&dev->bus_info, "vendor_id=%"PRIx16", device_id=%"PRIx16,
119 dev->id.vendor_id, dev->id.device_id) != -1)
120 dev->device.bus_info = dev->bus_info;
121 }
122
123 void
pci_free(struct rte_pci_device_internal * pdev)124 pci_free(struct rte_pci_device_internal *pdev)
125 {
126 if (pdev == NULL)
127 return;
128 free(pdev->device.bus_info);
129 free(pdev);
130 }
131
132 /* map a particular resource from a file */
133 void *
pci_map_resource(void * requested_addr,int fd,off_t offset,size_t size,int additional_flags)134 pci_map_resource(void *requested_addr, int fd, off_t offset, size_t size,
135 int additional_flags)
136 {
137 void *mapaddr;
138
139 /* Map the PCI memory resource of device */
140 mapaddr = rte_mem_map(requested_addr, size,
141 RTE_PROT_READ | RTE_PROT_WRITE,
142 RTE_MAP_SHARED | additional_flags, fd, offset);
143 if (mapaddr == NULL) {
144 PCI_LOG(ERR, "%s(): cannot map resource(%d, %p, 0x%zx, 0x%llx): %s (%p)",
145 __func__, fd, requested_addr, size,
146 (unsigned long long)offset,
147 rte_strerror(rte_errno), mapaddr);
148 } else
149 PCI_LOG(DEBUG, " PCI memory mapped at %p", mapaddr);
150
151 return mapaddr;
152 }
153
154 /* unmap a particular resource */
155 void
pci_unmap_resource(void * requested_addr,size_t size)156 pci_unmap_resource(void *requested_addr, size_t size)
157 {
158 if (requested_addr == NULL)
159 return;
160
161 /* Unmap the PCI memory resource of device */
162 if (rte_mem_unmap(requested_addr, size)) {
163 PCI_LOG(ERR, "%s(): cannot mem unmap(%p, %#zx): %s",
164 __func__, requested_addr, size,
165 rte_strerror(rte_errno));
166 } else
167 PCI_LOG(DEBUG, " PCI memory unmapped at %p", requested_addr);
168 }
169 /*
170 * Match the PCI Driver and Device using the ID Table
171 */
172 int
rte_pci_match(const struct rte_pci_driver * pci_drv,const struct rte_pci_device * pci_dev)173 rte_pci_match(const struct rte_pci_driver *pci_drv,
174 const struct rte_pci_device *pci_dev)
175 {
176 const struct rte_pci_id *id_table;
177
178 for (id_table = pci_drv->id_table; id_table->vendor_id != 0;
179 id_table++) {
180 /* check if device's identifiers match the driver's ones */
181 if (id_table->vendor_id != pci_dev->id.vendor_id &&
182 id_table->vendor_id != RTE_PCI_ANY_ID)
183 continue;
184 if (id_table->device_id != pci_dev->id.device_id &&
185 id_table->device_id != RTE_PCI_ANY_ID)
186 continue;
187 if (id_table->subsystem_vendor_id !=
188 pci_dev->id.subsystem_vendor_id &&
189 id_table->subsystem_vendor_id != RTE_PCI_ANY_ID)
190 continue;
191 if (id_table->subsystem_device_id !=
192 pci_dev->id.subsystem_device_id &&
193 id_table->subsystem_device_id != RTE_PCI_ANY_ID)
194 continue;
195 if (id_table->class_id != pci_dev->id.class_id &&
196 id_table->class_id != RTE_CLASS_ANY_ID)
197 continue;
198
199 return 1;
200 }
201
202 return 0;
203 }
204
205 /*
206 * If vendor/device ID match, call the probe() function of the
207 * driver.
208 */
209 static int
rte_pci_probe_one_driver(struct rte_pci_driver * dr,struct rte_pci_device * dev)210 rte_pci_probe_one_driver(struct rte_pci_driver *dr,
211 struct rte_pci_device *dev)
212 {
213 int ret;
214 bool already_probed;
215 struct rte_pci_addr *loc;
216
217 if ((dr == NULL) || (dev == NULL))
218 return -EINVAL;
219
220 loc = &dev->addr;
221
222 /* The device is not blocked; Check if driver supports it */
223 if (!rte_pci_match(dr, dev))
224 /* Match of device and driver failed */
225 return 1;
226
227 PCI_LOG(DEBUG, "PCI device "PCI_PRI_FMT" on NUMA socket %i",
228 loc->domain, loc->bus, loc->devid, loc->function,
229 dev->device.numa_node);
230
231 /* no initialization when marked as blocked, return without error */
232 if (dev->device.devargs != NULL &&
233 dev->device.devargs->policy == RTE_DEV_BLOCKED) {
234 PCI_LOG(INFO, " Device is blocked, not initializing");
235 return 1;
236 }
237
238 if (dev->device.numa_node < 0 && rte_socket_count() > 1)
239 PCI_LOG(INFO, "Device %s is not NUMA-aware", dev->name);
240
241 already_probed = rte_dev_is_probed(&dev->device);
242 if (already_probed && !(dr->drv_flags & RTE_PCI_DRV_PROBE_AGAIN)) {
243 PCI_LOG(DEBUG, "Device %s is already probed", dev->device.name);
244 return -EEXIST;
245 }
246
247 PCI_LOG(DEBUG, " probe driver: %x:%x %s", dev->id.vendor_id,
248 dev->id.device_id, dr->driver.name);
249
250 if (!already_probed) {
251 enum rte_iova_mode dev_iova_mode;
252 enum rte_iova_mode iova_mode;
253
254 dev_iova_mode = pci_device_iova_mode(dr, dev);
255 iova_mode = rte_eal_iova_mode();
256 if (dev_iova_mode != RTE_IOVA_DC &&
257 dev_iova_mode != iova_mode) {
258 PCI_LOG(ERR, " Expecting '%s' IOVA mode but current mode is '%s', not initializing",
259 dev_iova_mode == RTE_IOVA_PA ? "PA" : "VA",
260 iova_mode == RTE_IOVA_PA ? "PA" : "VA");
261 return -EINVAL;
262 }
263
264 /* Allocate interrupt instance for pci device */
265 dev->intr_handle =
266 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
267 if (dev->intr_handle == NULL) {
268 PCI_LOG(ERR, "Failed to create interrupt instance for %s",
269 dev->device.name);
270 return -ENOMEM;
271 }
272
273 dev->vfio_req_intr_handle =
274 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
275 if (dev->vfio_req_intr_handle == NULL) {
276 rte_intr_instance_free(dev->intr_handle);
277 dev->intr_handle = NULL;
278 PCI_LOG(ERR, "Failed to create vfio req interrupt instance for %s",
279 dev->device.name);
280 return -ENOMEM;
281 }
282
283 /*
284 * Reference driver structure.
285 * This needs to be before rte_pci_map_device(), as it enables
286 * to use driver flags for adjusting configuration.
287 */
288 dev->driver = dr;
289 if (dev->driver->drv_flags & RTE_PCI_DRV_NEED_MAPPING) {
290 ret = rte_pci_map_device(dev);
291 if (ret != 0) {
292 dev->driver = NULL;
293 rte_intr_instance_free(dev->vfio_req_intr_handle);
294 dev->vfio_req_intr_handle = NULL;
295 rte_intr_instance_free(dev->intr_handle);
296 dev->intr_handle = NULL;
297 return ret;
298 }
299 }
300 }
301
302 PCI_LOG(INFO, "Probe PCI driver: %s (%x:%04x) device: "PCI_PRI_FMT" (socket %i)",
303 dr->driver.name, dev->id.vendor_id, dev->id.device_id,
304 loc->domain, loc->bus, loc->devid, loc->function,
305 dev->device.numa_node);
306 /* call the driver probe() function */
307 ret = dr->probe(dr, dev);
308 if (already_probed)
309 return ret; /* no rollback if already succeeded earlier */
310 if (ret) {
311 dev->driver = NULL;
312 if ((dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) &&
313 /* Don't unmap if device is unsupported and
314 * driver needs mapped resources.
315 */
316 !(ret > 0 &&
317 (dr->drv_flags & RTE_PCI_DRV_KEEP_MAPPED_RES)))
318 rte_pci_unmap_device(dev);
319 rte_intr_instance_free(dev->vfio_req_intr_handle);
320 dev->vfio_req_intr_handle = NULL;
321 rte_intr_instance_free(dev->intr_handle);
322 dev->intr_handle = NULL;
323 } else {
324 dev->device.driver = &dr->driver;
325 }
326
327 return ret;
328 }
329
330 /*
331 * If vendor/device ID match, call the remove() function of the
332 * driver.
333 */
334 static int
rte_pci_detach_dev(struct rte_pci_device * dev)335 rte_pci_detach_dev(struct rte_pci_device *dev)
336 {
337 struct rte_pci_addr *loc;
338 struct rte_pci_driver *dr;
339 int ret = 0;
340
341 if (dev == NULL)
342 return -EINVAL;
343
344 dr = dev->driver;
345 loc = &dev->addr;
346
347 PCI_LOG(DEBUG, "PCI device "PCI_PRI_FMT" on NUMA socket %i",
348 loc->domain, loc->bus, loc->devid,
349 loc->function, dev->device.numa_node);
350
351 PCI_LOG(DEBUG, " remove driver: %x:%x %s", dev->id.vendor_id,
352 dev->id.device_id, dr->driver.name);
353
354 if (dr->remove) {
355 ret = dr->remove(dev);
356 if (ret < 0)
357 return ret;
358 }
359
360 /* clear driver structure */
361 dev->driver = NULL;
362 dev->device.driver = NULL;
363
364 if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING)
365 /* unmap resources for devices that use igb_uio */
366 rte_pci_unmap_device(dev);
367
368 rte_intr_instance_free(dev->intr_handle);
369 dev->intr_handle = NULL;
370 rte_intr_instance_free(dev->vfio_req_intr_handle);
371 dev->vfio_req_intr_handle = NULL;
372
373 return 0;
374 }
375
376 /*
377 * If vendor/device ID match, call the probe() function of all
378 * registered driver for the given device. Return < 0 if initialization
379 * failed, return 1 if no driver is found for this device.
380 */
381 static int
pci_probe_all_drivers(struct rte_pci_device * dev)382 pci_probe_all_drivers(struct rte_pci_device *dev)
383 {
384 struct rte_pci_driver *dr = NULL;
385 int rc = 0;
386
387 if (dev == NULL)
388 return -EINVAL;
389
390 FOREACH_DRIVER_ON_PCIBUS(dr) {
391 rc = rte_pci_probe_one_driver(dr, dev);
392 if (rc < 0)
393 /* negative value is an error */
394 return rc;
395 if (rc > 0)
396 /* positive value means driver doesn't support it */
397 continue;
398 return 0;
399 }
400 return 1;
401 }
402
403 /*
404 * Scan the content of the PCI bus, and call the probe() function for
405 * all registered drivers that have a matching entry in its id_table
406 * for discovered devices.
407 */
408 static int
pci_probe(void)409 pci_probe(void)
410 {
411 struct rte_pci_device *dev = NULL;
412 size_t probed = 0, failed = 0;
413 int ret = 0;
414
415 FOREACH_DEVICE_ON_PCIBUS(dev) {
416 probed++;
417
418 ret = pci_probe_all_drivers(dev);
419 if (ret < 0) {
420 if (ret != -EEXIST) {
421 PCI_LOG(ERR, "Requested device " PCI_PRI_FMT " cannot be used",
422 dev->addr.domain, dev->addr.bus,
423 dev->addr.devid, dev->addr.function);
424 rte_errno = errno;
425 failed++;
426 }
427 ret = 0;
428 }
429 }
430
431 return (probed && probed == failed) ? -1 : 0;
432 }
433
434 static int
pci_cleanup(void)435 pci_cleanup(void)
436 {
437 struct rte_pci_device *dev, *tmp_dev;
438 int error = 0;
439
440 RTE_TAILQ_FOREACH_SAFE(dev, &rte_pci_bus.device_list, next, tmp_dev) {
441 struct rte_pci_driver *drv = dev->driver;
442 int ret = 0;
443
444 if (drv == NULL || drv->remove == NULL)
445 goto free;
446
447 ret = drv->remove(dev);
448 if (ret < 0) {
449 rte_errno = errno;
450 error = -1;
451 }
452 dev->driver = NULL;
453 dev->device.driver = NULL;
454
455 free:
456 /* free interrupt handles */
457 rte_intr_instance_free(dev->intr_handle);
458 dev->intr_handle = NULL;
459 rte_intr_instance_free(dev->vfio_req_intr_handle);
460 dev->vfio_req_intr_handle = NULL;
461
462 pci_free(RTE_PCI_DEVICE_INTERNAL(dev));
463 }
464
465 return error;
466 }
467
468 /* dump one device */
469 static int
pci_dump_one_device(FILE * f,struct rte_pci_device * dev)470 pci_dump_one_device(FILE *f, struct rte_pci_device *dev)
471 {
472 int i;
473
474 fprintf(f, PCI_PRI_FMT, dev->addr.domain, dev->addr.bus,
475 dev->addr.devid, dev->addr.function);
476 fprintf(f, " - vendor:%x device:%x\n", dev->id.vendor_id,
477 dev->id.device_id);
478
479 for (i = 0; i != sizeof(dev->mem_resource) /
480 sizeof(dev->mem_resource[0]); i++) {
481 fprintf(f, " %16.16"PRIx64" %16.16"PRIx64"\n",
482 dev->mem_resource[i].phys_addr,
483 dev->mem_resource[i].len);
484 }
485 return 0;
486 }
487
488 /* dump devices on the bus */
489 void
rte_pci_dump(FILE * f)490 rte_pci_dump(FILE *f)
491 {
492 struct rte_pci_device *dev = NULL;
493
494 FOREACH_DEVICE_ON_PCIBUS(dev) {
495 pci_dump_one_device(f, dev);
496 }
497 }
498
499 static int
pci_parse(const char * name,void * addr)500 pci_parse(const char *name, void *addr)
501 {
502 struct rte_pci_addr *out = addr;
503 struct rte_pci_addr pci_addr;
504 bool parse;
505
506 parse = (rte_pci_addr_parse(name, &pci_addr) == 0);
507 if (parse && addr != NULL)
508 *out = pci_addr;
509 return parse == false;
510 }
511
512 /* register a driver */
513 void
rte_pci_register(struct rte_pci_driver * driver)514 rte_pci_register(struct rte_pci_driver *driver)
515 {
516 TAILQ_INSERT_TAIL(&rte_pci_bus.driver_list, driver, next);
517 }
518
519 /* unregister a driver */
520 void
rte_pci_unregister(struct rte_pci_driver * driver)521 rte_pci_unregister(struct rte_pci_driver *driver)
522 {
523 TAILQ_REMOVE(&rte_pci_bus.driver_list, driver, next);
524 }
525
526 /* Add a device to PCI bus */
527 void
rte_pci_add_device(struct rte_pci_device * pci_dev)528 rte_pci_add_device(struct rte_pci_device *pci_dev)
529 {
530 TAILQ_INSERT_TAIL(&rte_pci_bus.device_list, pci_dev, next);
531 }
532
533 /* Insert a device into a predefined position in PCI bus */
534 void
rte_pci_insert_device(struct rte_pci_device * exist_pci_dev,struct rte_pci_device * new_pci_dev)535 rte_pci_insert_device(struct rte_pci_device *exist_pci_dev,
536 struct rte_pci_device *new_pci_dev)
537 {
538 TAILQ_INSERT_BEFORE(exist_pci_dev, new_pci_dev, next);
539 }
540
541 /* Remove a device from PCI bus */
542 static void
rte_pci_remove_device(struct rte_pci_device * pci_dev)543 rte_pci_remove_device(struct rte_pci_device *pci_dev)
544 {
545 TAILQ_REMOVE(&rte_pci_bus.device_list, pci_dev, next);
546 }
547
548 static struct rte_device *
pci_find_device(const struct rte_device * start,rte_dev_cmp_t cmp,const void * data)549 pci_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
550 const void *data)
551 {
552 const struct rte_pci_device *pstart;
553 struct rte_pci_device *pdev;
554
555 if (start != NULL) {
556 pstart = RTE_DEV_TO_PCI_CONST(start);
557 pdev = TAILQ_NEXT(pstart, next);
558 } else {
559 pdev = TAILQ_FIRST(&rte_pci_bus.device_list);
560 }
561 while (pdev != NULL) {
562 if (cmp(&pdev->device, data) == 0)
563 return &pdev->device;
564 pdev = TAILQ_NEXT(pdev, next);
565 }
566 return NULL;
567 }
568
569 /*
570 * find the device which encounter the failure, by iterate over all device on
571 * PCI bus to check if the memory failure address is located in the range
572 * of the BARs of the device.
573 */
574 static struct rte_pci_device *
pci_find_device_by_addr(const void * failure_addr)575 pci_find_device_by_addr(const void *failure_addr)
576 {
577 struct rte_pci_device *pdev = NULL;
578 uint64_t check_point, start, end, len;
579 int i;
580
581 check_point = (uint64_t)(uintptr_t)failure_addr;
582
583 FOREACH_DEVICE_ON_PCIBUS(pdev) {
584 for (i = 0; i != RTE_DIM(pdev->mem_resource); i++) {
585 start = (uint64_t)(uintptr_t)pdev->mem_resource[i].addr;
586 len = pdev->mem_resource[i].len;
587 end = start + len;
588 if (check_point >= start && check_point < end) {
589 PCI_LOG(DEBUG, "Failure address %16.16"
590 PRIx64" belongs to device %s!",
591 check_point, pdev->device.name);
592 return pdev;
593 }
594 }
595 }
596 return NULL;
597 }
598
599 static int
pci_hot_unplug_handler(struct rte_device * dev)600 pci_hot_unplug_handler(struct rte_device *dev)
601 {
602 struct rte_pci_device *pdev = NULL;
603 int ret = 0;
604
605 pdev = RTE_DEV_TO_PCI(dev);
606 if (!pdev)
607 return -1;
608
609 switch (pdev->kdrv) {
610 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
611 case RTE_PCI_KDRV_VFIO:
612 /*
613 * vfio kernel module guaranty the pci device would not be
614 * deleted until the user space release the resource, so no
615 * need to remap BARs resource here, just directly notify
616 * the req event to the user space to handle it.
617 */
618 rte_dev_event_callback_process(dev->name,
619 RTE_DEV_EVENT_REMOVE);
620 break;
621 #endif
622 case RTE_PCI_KDRV_IGB_UIO:
623 case RTE_PCI_KDRV_UIO_GENERIC:
624 case RTE_PCI_KDRV_NIC_UIO:
625 /* BARs resource is invalid, remap it to be safe. */
626 ret = pci_uio_remap_resource(pdev);
627 break;
628 default:
629 PCI_LOG(DEBUG, "Not managed by a supported kernel driver, skipped");
630 ret = -1;
631 break;
632 }
633
634 return ret;
635 }
636
637 static int
pci_sigbus_handler(const void * failure_addr)638 pci_sigbus_handler(const void *failure_addr)
639 {
640 struct rte_pci_device *pdev = NULL;
641 int ret = 0;
642
643 pdev = pci_find_device_by_addr(failure_addr);
644 if (!pdev) {
645 /* It is a generic sigbus error, no bus would handle it. */
646 ret = 1;
647 } else {
648 /* The sigbus error is caused of hot-unplug. */
649 ret = pci_hot_unplug_handler(&pdev->device);
650 if (ret) {
651 PCI_LOG(ERR, "Failed to handle hot-unplug for device %s",
652 pdev->name);
653 ret = -1;
654 }
655 }
656 return ret;
657 }
658
659 static int
pci_plug(struct rte_device * dev)660 pci_plug(struct rte_device *dev)
661 {
662 return pci_probe_all_drivers(RTE_DEV_TO_PCI(dev));
663 }
664
665 static int
pci_unplug(struct rte_device * dev)666 pci_unplug(struct rte_device *dev)
667 {
668 struct rte_pci_device *pdev;
669 int ret;
670
671 pdev = RTE_DEV_TO_PCI(dev);
672 ret = rte_pci_detach_dev(pdev);
673 if (ret == 0) {
674 rte_pci_remove_device(pdev);
675 rte_devargs_remove(dev->devargs);
676 pci_free(RTE_PCI_DEVICE_INTERNAL(pdev));
677 }
678 return ret;
679 }
680
681 static int
pci_dma_map(struct rte_device * dev,void * addr,uint64_t iova,size_t len)682 pci_dma_map(struct rte_device *dev, void *addr, uint64_t iova, size_t len)
683 {
684 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev);
685
686 if (!pdev || !pdev->driver) {
687 rte_errno = EINVAL;
688 return -1;
689 }
690 if (pdev->driver->dma_map)
691 return pdev->driver->dma_map(pdev, addr, iova, len);
692 /**
693 * In case driver don't provides any specific mapping
694 * try fallback to VFIO.
695 */
696 if (pdev->kdrv == RTE_PCI_KDRV_VFIO)
697 return rte_vfio_container_dma_map
698 (RTE_VFIO_DEFAULT_CONTAINER_FD, (uintptr_t)addr,
699 iova, len);
700 rte_errno = ENOTSUP;
701 return -1;
702 }
703
704 static int
pci_dma_unmap(struct rte_device * dev,void * addr,uint64_t iova,size_t len)705 pci_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova, size_t len)
706 {
707 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev);
708
709 if (!pdev || !pdev->driver) {
710 rte_errno = EINVAL;
711 return -1;
712 }
713 if (pdev->driver->dma_unmap)
714 return pdev->driver->dma_unmap(pdev, addr, iova, len);
715 /**
716 * In case driver don't provides any specific mapping
717 * try fallback to VFIO.
718 */
719 if (pdev->kdrv == RTE_PCI_KDRV_VFIO)
720 return rte_vfio_container_dma_unmap
721 (RTE_VFIO_DEFAULT_CONTAINER_FD, (uintptr_t)addr,
722 iova, len);
723 rte_errno = ENOTSUP;
724 return -1;
725 }
726
727 bool
rte_pci_ignore_device(const struct rte_pci_addr * pci_addr)728 rte_pci_ignore_device(const struct rte_pci_addr *pci_addr)
729 {
730 struct rte_devargs *devargs = pci_devargs_lookup(pci_addr);
731
732 switch (rte_pci_bus.bus.conf.scan_mode) {
733 case RTE_BUS_SCAN_ALLOWLIST:
734 if (devargs && devargs->policy == RTE_DEV_ALLOWED)
735 return false;
736 break;
737 case RTE_BUS_SCAN_UNDEFINED:
738 case RTE_BUS_SCAN_BLOCKLIST:
739 if (devargs == NULL || devargs->policy != RTE_DEV_BLOCKED)
740 return false;
741 break;
742 }
743 return true;
744 }
745
746 enum rte_iova_mode
rte_pci_get_iommu_class(void)747 rte_pci_get_iommu_class(void)
748 {
749 enum rte_iova_mode iova_mode = RTE_IOVA_DC;
750 const struct rte_pci_device *dev;
751 const struct rte_pci_driver *drv;
752 bool devices_want_va = false;
753 bool devices_want_pa = false;
754 int iommu_no_va = -1;
755
756 FOREACH_DEVICE_ON_PCIBUS(dev) {
757 /*
758 * We can check this only once, because the IOMMU hardware is
759 * the same for all of them.
760 */
761 if (iommu_no_va == -1)
762 iommu_no_va = pci_device_iommu_support_va(dev)
763 ? 0 : 1;
764
765 if (dev->kdrv == RTE_PCI_KDRV_UNKNOWN ||
766 dev->kdrv == RTE_PCI_KDRV_NONE)
767 continue;
768 FOREACH_DRIVER_ON_PCIBUS(drv) {
769 enum rte_iova_mode dev_iova_mode;
770
771 if (!rte_pci_match(drv, dev))
772 continue;
773
774 dev_iova_mode = pci_device_iova_mode(drv, dev);
775 PCI_LOG(DEBUG, "PCI driver %s for device "PCI_PRI_FMT" wants IOVA as '%s'",
776 drv->driver.name,
777 dev->addr.domain, dev->addr.bus,
778 dev->addr.devid, dev->addr.function,
779 dev_iova_mode == RTE_IOVA_DC ? "DC" :
780 (dev_iova_mode == RTE_IOVA_PA ? "PA" : "VA"));
781 if (dev_iova_mode == RTE_IOVA_PA)
782 devices_want_pa = true;
783 else if (dev_iova_mode == RTE_IOVA_VA)
784 devices_want_va = true;
785 }
786 }
787 if (iommu_no_va == 1) {
788 iova_mode = RTE_IOVA_PA;
789 if (devices_want_va) {
790 PCI_LOG(WARNING, "Some devices want 'VA' but IOMMU does not support 'VA'.");
791 PCI_LOG(WARNING, "The devices that want 'VA' won't initialize.");
792 }
793 } else if (devices_want_va && !devices_want_pa) {
794 iova_mode = RTE_IOVA_VA;
795 } else if (devices_want_pa && !devices_want_va) {
796 iova_mode = RTE_IOVA_PA;
797 } else {
798 iova_mode = RTE_IOVA_DC;
799 if (devices_want_va) {
800 PCI_LOG(WARNING, "Some devices want 'VA' but forcing 'DC' because other devices want 'PA'.");
801 PCI_LOG(WARNING, "Depending on the final decision by the EAL, not all devices may be able to initialize.");
802 }
803 }
804 return iova_mode;
805 }
806
807 bool
rte_pci_has_capability_list(const struct rte_pci_device * dev)808 rte_pci_has_capability_list(const struct rte_pci_device *dev)
809 {
810 uint16_t status;
811
812 if (rte_pci_read_config(dev, &status, sizeof(status), RTE_PCI_STATUS) != sizeof(status))
813 return false;
814
815 return (status & RTE_PCI_STATUS_CAP_LIST) != 0;
816 }
817
818 off_t
rte_pci_find_capability(const struct rte_pci_device * dev,uint8_t cap)819 rte_pci_find_capability(const struct rte_pci_device *dev, uint8_t cap)
820 {
821 return rte_pci_find_next_capability(dev, cap, 0);
822 }
823
824 off_t
rte_pci_find_next_capability(const struct rte_pci_device * dev,uint8_t cap,off_t offset)825 rte_pci_find_next_capability(const struct rte_pci_device *dev, uint8_t cap,
826 off_t offset)
827 {
828 uint8_t pos;
829 int ttl;
830
831 if (offset == 0)
832 offset = RTE_PCI_CAPABILITY_LIST;
833 else
834 offset += RTE_PCI_CAP_NEXT;
835 ttl = (RTE_PCI_CFG_SPACE_SIZE - RTE_PCI_STD_HEADER_SIZEOF) / RTE_PCI_CAP_SIZEOF;
836
837 if (rte_pci_read_config(dev, &pos, sizeof(pos), offset) < 0)
838 return -1;
839
840 while (pos && ttl--) {
841 uint16_t ent;
842 uint8_t id;
843
844 offset = pos;
845 if (rte_pci_read_config(dev, &ent, sizeof(ent), offset) < 0)
846 return -1;
847
848 id = ent & 0xff;
849 if (id == 0xff)
850 break;
851
852 if (id == cap)
853 return offset;
854
855 pos = (ent >> 8);
856 }
857
858 return 0;
859 }
860
861 off_t
rte_pci_find_ext_capability(const struct rte_pci_device * dev,uint32_t cap)862 rte_pci_find_ext_capability(const struct rte_pci_device *dev, uint32_t cap)
863 {
864 off_t offset = RTE_PCI_CFG_SPACE_SIZE;
865 uint32_t header;
866 int ttl;
867
868 /* minimum 8 bytes per capability */
869 ttl = (RTE_PCI_CFG_SPACE_EXP_SIZE - RTE_PCI_CFG_SPACE_SIZE) / 8;
870
871 if (rte_pci_read_config(dev, &header, 4, offset) < 0) {
872 PCI_LOG(ERR, "error in reading extended capabilities");
873 return -1;
874 }
875
876 /*
877 * If we have no capabilities, this is indicated by cap ID,
878 * cap version and next pointer all being 0.
879 */
880 if (header == 0)
881 return 0;
882
883 while (ttl != 0) {
884 if (RTE_PCI_EXT_CAP_ID(header) == cap)
885 return offset;
886
887 offset = RTE_PCI_EXT_CAP_NEXT(header);
888
889 if (offset < RTE_PCI_CFG_SPACE_SIZE)
890 break;
891
892 if (rte_pci_read_config(dev, &header, 4, offset) < 0) {
893 PCI_LOG(ERR, "error in reading extended capabilities");
894 return -1;
895 }
896
897 ttl--;
898 }
899
900 return 0;
901 }
902
903 int
rte_pci_set_bus_master(const struct rte_pci_device * dev,bool enable)904 rte_pci_set_bus_master(const struct rte_pci_device *dev, bool enable)
905 {
906 uint16_t old_cmd, cmd;
907
908 if (rte_pci_read_config(dev, &old_cmd, sizeof(old_cmd),
909 RTE_PCI_COMMAND) < 0) {
910 PCI_LOG(ERR, "error in reading PCI command register");
911 return -1;
912 }
913
914 if (enable)
915 cmd = old_cmd | RTE_PCI_COMMAND_MASTER;
916 else
917 cmd = old_cmd & ~RTE_PCI_COMMAND_MASTER;
918
919 if (cmd == old_cmd)
920 return 0;
921
922 if (rte_pci_write_config(dev, &cmd, sizeof(cmd),
923 RTE_PCI_COMMAND) < 0) {
924 PCI_LOG(ERR, "error in writing PCI command register");
925 return -1;
926 }
927
928 return 0;
929 }
930
931 int
rte_pci_pasid_set_state(const struct rte_pci_device * dev,off_t offset,bool enable)932 rte_pci_pasid_set_state(const struct rte_pci_device *dev,
933 off_t offset, bool enable)
934 {
935 uint16_t pasid = enable;
936 return rte_pci_write_config(dev, &pasid, sizeof(pasid),
937 offset + RTE_PCI_PASID_CTRL) != sizeof(pasid) ? -1 : 0;
938 }
939
940 struct rte_pci_bus rte_pci_bus = {
941 .bus = {
942 .scan = rte_pci_scan,
943 .probe = pci_probe,
944 .cleanup = pci_cleanup,
945 .find_device = pci_find_device,
946 .plug = pci_plug,
947 .unplug = pci_unplug,
948 .parse = pci_parse,
949 .devargs_parse = rte_pci_devargs_parse,
950 .dma_map = pci_dma_map,
951 .dma_unmap = pci_dma_unmap,
952 .get_iommu_class = rte_pci_get_iommu_class,
953 .dev_iterate = rte_pci_dev_iterate,
954 .hot_unplug_handler = pci_hot_unplug_handler,
955 .sigbus_handler = pci_sigbus_handler,
956 },
957 .device_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.device_list),
958 .driver_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.driver_list),
959 };
960
961 RTE_REGISTER_BUS(pci, rte_pci_bus.bus);
962 RTE_LOG_REGISTER_DEFAULT(pci_bus_logtype, NOTICE);
963