xref: /dpdk/drivers/bus/pci/bsd/pci.c (revision c39d1e082a4b426e915074ce30eb6f410ee2654a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <unistd.h>
11 #include <inttypes.h>
12 #include <sys/types.h>
13 #include <sys/stat.h>
14 #include <fcntl.h>
15 #include <errno.h>
16 #include <dirent.h>
17 #include <limits.h>
18 #include <sys/queue.h>
19 #include <sys/mman.h>
20 #include <sys/ioctl.h>
21 #include <sys/pciio.h>
22 #include <dev/pci/pcireg.h>
23 
24 #if defined(RTE_ARCH_X86)
25 #include <machine/cpufunc.h>
26 #endif
27 
28 #include <rte_interrupts.h>
29 #include <rte_log.h>
30 #include <rte_pci.h>
31 #include <rte_bus_pci.h>
32 #include <rte_common.h>
33 #include <rte_launch.h>
34 #include <rte_memory.h>
35 #include <rte_eal.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_malloc.h>
39 #include <rte_string_fns.h>
40 #include <rte_debug.h>
41 #include <rte_devargs.h>
42 
43 #include "eal_filesystem.h"
44 #include "private.h"
45 
46 /**
47  * @file
48  * PCI probing under BSD
49  *
50  * This code is used to simulate a PCI probe by parsing information in
51  * sysfs. Moreover, when a registered driver matches a device, the
52  * kernel driver currently using it is unloaded and replaced by
53  * igb_uio module, which is a very minimal userland driver for Intel
54  * network card, only providing access to PCI BAR to applications, and
55  * enabling bus master.
56  */
57 
58 extern struct rte_pci_bus rte_pci_bus;
59 
60 /* Map pci device */
61 int
62 rte_pci_map_device(struct rte_pci_device *dev)
63 {
64 	int ret = -1;
65 
66 	/* try mapping the NIC resources */
67 	switch (dev->kdrv) {
68 	case RTE_KDRV_NIC_UIO:
69 		/* map resources for devices that use uio */
70 		ret = pci_uio_map_resource(dev);
71 		break;
72 	default:
73 		RTE_LOG(DEBUG, EAL,
74 			"  Not managed by a supported kernel driver, skipped\n");
75 		ret = 1;
76 		break;
77 	}
78 
79 	return ret;
80 }
81 
82 /* Unmap pci device */
83 void
84 rte_pci_unmap_device(struct rte_pci_device *dev)
85 {
86 	/* try unmapping the NIC resources */
87 	switch (dev->kdrv) {
88 	case RTE_KDRV_NIC_UIO:
89 		/* unmap resources for devices that use uio */
90 		pci_uio_unmap_resource(dev);
91 		break;
92 	default:
93 		RTE_LOG(DEBUG, EAL,
94 			"  Not managed by a supported kernel driver, skipped\n");
95 		break;
96 	}
97 }
98 
99 void
100 pci_uio_free_resource(struct rte_pci_device *dev,
101 		struct mapped_pci_resource *uio_res)
102 {
103 	rte_free(uio_res);
104 
105 	if (dev->intr_handle.fd) {
106 		close(dev->intr_handle.fd);
107 		dev->intr_handle.fd = -1;
108 		dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
109 	}
110 }
111 
112 int
113 pci_uio_alloc_resource(struct rte_pci_device *dev,
114 		struct mapped_pci_resource **uio_res)
115 {
116 	char devname[PATH_MAX]; /* contains the /dev/uioX */
117 	struct rte_pci_addr *loc;
118 
119 	loc = &dev->addr;
120 
121 	snprintf(devname, sizeof(devname), "/dev/uio@pci:%u:%u:%u",
122 			dev->addr.bus, dev->addr.devid, dev->addr.function);
123 
124 	if (access(devname, O_RDWR) < 0) {
125 		RTE_LOG(WARNING, EAL, "  "PCI_PRI_FMT" not managed by UIO driver, "
126 				"skipping\n", loc->domain, loc->bus, loc->devid, loc->function);
127 		return 1;
128 	}
129 
130 	/* save fd if in primary process */
131 	dev->intr_handle.fd = open(devname, O_RDWR);
132 	if (dev->intr_handle.fd < 0) {
133 		RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
134 			devname, strerror(errno));
135 		goto error;
136 	}
137 	dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
138 
139 	/* allocate the mapping details for secondary processes*/
140 	*uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
141 	if (*uio_res == NULL) {
142 		RTE_LOG(ERR, EAL,
143 			"%s(): cannot store uio mmap details\n", __func__);
144 		goto error;
145 	}
146 
147 	strlcpy((*uio_res)->path, devname, sizeof((*uio_res)->path));
148 	memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr));
149 
150 	return 0;
151 
152 error:
153 	pci_uio_free_resource(dev, *uio_res);
154 	return -1;
155 }
156 
157 int
158 pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
159 		struct mapped_pci_resource *uio_res, int map_idx)
160 {
161 	int fd;
162 	char *devname;
163 	void *mapaddr;
164 	uint64_t offset;
165 	uint64_t pagesz;
166 	struct pci_map *maps;
167 
168 	maps = uio_res->maps;
169 	devname = uio_res->path;
170 	pagesz = sysconf(_SC_PAGESIZE);
171 
172 	/* allocate memory to keep path */
173 	maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
174 	if (maps[map_idx].path == NULL) {
175 		RTE_LOG(ERR, EAL, "Cannot allocate memory for path: %s\n",
176 				strerror(errno));
177 		return -1;
178 	}
179 
180 	/*
181 	 * open resource file, to mmap it
182 	 */
183 	fd = open(devname, O_RDWR);
184 	if (fd < 0) {
185 		RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
186 				devname, strerror(errno));
187 		goto error;
188 	}
189 
190 	/* if matching map is found, then use it */
191 	offset = res_idx * pagesz;
192 	mapaddr = pci_map_resource(NULL, fd, (off_t)offset,
193 			(size_t)dev->mem_resource[res_idx].len, 0);
194 	close(fd);
195 	if (mapaddr == MAP_FAILED)
196 		goto error;
197 
198 	maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr;
199 	maps[map_idx].size = dev->mem_resource[res_idx].len;
200 	maps[map_idx].addr = mapaddr;
201 	maps[map_idx].offset = offset;
202 	strcpy(maps[map_idx].path, devname);
203 	dev->mem_resource[res_idx].addr = mapaddr;
204 
205 	return 0;
206 
207 error:
208 	rte_free(maps[map_idx].path);
209 	return -1;
210 }
211 
212 static int
213 pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
214 {
215 	struct rte_pci_device *dev;
216 	struct pci_bar_io bar;
217 	unsigned i, max;
218 
219 	dev = malloc(sizeof(*dev));
220 	if (dev == NULL) {
221 		return -1;
222 	}
223 
224 	memset(dev, 0, sizeof(*dev));
225 	dev->device.bus = &rte_pci_bus.bus;
226 
227 	dev->addr.domain = conf->pc_sel.pc_domain;
228 	dev->addr.bus = conf->pc_sel.pc_bus;
229 	dev->addr.devid = conf->pc_sel.pc_dev;
230 	dev->addr.function = conf->pc_sel.pc_func;
231 
232 	/* get vendor id */
233 	dev->id.vendor_id = conf->pc_vendor;
234 
235 	/* get device id */
236 	dev->id.device_id = conf->pc_device;
237 
238 	/* get subsystem_vendor id */
239 	dev->id.subsystem_vendor_id = conf->pc_subvendor;
240 
241 	/* get subsystem_device id */
242 	dev->id.subsystem_device_id = conf->pc_subdevice;
243 
244 	/* get class id */
245 	dev->id.class_id = (conf->pc_class << 16) |
246 			   (conf->pc_subclass << 8) |
247 			   (conf->pc_progif);
248 
249 	/* TODO: get max_vfs */
250 	dev->max_vfs = 0;
251 
252 	/* FreeBSD has no NUMA support (yet) */
253 	dev->device.numa_node = 0;
254 
255 	pci_name_set(dev);
256 
257 	/* FreeBSD has only one pass through driver */
258 	dev->kdrv = RTE_KDRV_NIC_UIO;
259 
260 	/* parse resources */
261 	switch (conf->pc_hdr & PCIM_HDRTYPE) {
262 	case PCIM_HDRTYPE_NORMAL:
263 		max = PCIR_MAX_BAR_0;
264 		break;
265 	case PCIM_HDRTYPE_BRIDGE:
266 		max = PCIR_MAX_BAR_1;
267 		break;
268 	case PCIM_HDRTYPE_CARDBUS:
269 		max = PCIR_MAX_BAR_2;
270 		break;
271 	default:
272 		goto skipdev;
273 	}
274 
275 	for (i = 0; i <= max; i++) {
276 		bar.pbi_sel = conf->pc_sel;
277 		bar.pbi_reg = PCIR_BAR(i);
278 		if (ioctl(dev_pci_fd, PCIOCGETBAR, &bar) < 0)
279 			continue;
280 
281 		dev->mem_resource[i].len = bar.pbi_length;
282 		if (PCI_BAR_IO(bar.pbi_base)) {
283 			dev->mem_resource[i].addr = (void *)(bar.pbi_base & ~((uint64_t)0xf));
284 			continue;
285 		}
286 		dev->mem_resource[i].phys_addr = bar.pbi_base & ~((uint64_t)0xf);
287 	}
288 
289 	/* device is valid, add in list (sorted) */
290 	if (TAILQ_EMPTY(&rte_pci_bus.device_list)) {
291 		rte_pci_add_device(dev);
292 	}
293 	else {
294 		struct rte_pci_device *dev2 = NULL;
295 		int ret;
296 
297 		TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) {
298 			ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr);
299 			if (ret > 0)
300 				continue;
301 			else if (ret < 0) {
302 				rte_pci_insert_device(dev2, dev);
303 			} else { /* already registered */
304 				dev2->kdrv = dev->kdrv;
305 				dev2->max_vfs = dev->max_vfs;
306 				pci_name_set(dev2);
307 				memmove(dev2->mem_resource,
308 					dev->mem_resource,
309 					sizeof(dev->mem_resource));
310 				free(dev);
311 			}
312 			return 0;
313 		}
314 		rte_pci_add_device(dev);
315 	}
316 
317 	return 0;
318 
319 skipdev:
320 	free(dev);
321 	return 0;
322 }
323 
324 /*
325  * Scan the content of the PCI bus, and add the devices in the devices
326  * list. Call pci_scan_one() for each pci entry found.
327  */
328 int
329 rte_pci_scan(void)
330 {
331 	int fd;
332 	unsigned dev_count = 0;
333 	struct pci_conf matches[16];
334 	struct pci_conf_io conf_io = {
335 			.pat_buf_len = 0,
336 			.num_patterns = 0,
337 			.patterns = NULL,
338 			.match_buf_len = sizeof(matches),
339 			.matches = &matches[0],
340 	};
341 
342 	/* for debug purposes, PCI can be disabled */
343 	if (!rte_eal_has_pci())
344 		return 0;
345 
346 	fd = open("/dev/pci", O_RDONLY);
347 	if (fd < 0) {
348 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
349 		goto error;
350 	}
351 
352 	do {
353 		unsigned i;
354 		if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
355 			RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
356 					__func__, strerror(errno));
357 			goto error;
358 		}
359 
360 		for (i = 0; i < conf_io.num_matches; i++)
361 			if (pci_scan_one(fd, &matches[i]) < 0)
362 				goto error;
363 
364 		dev_count += conf_io.num_matches;
365 	} while(conf_io.status == PCI_GETCONF_MORE_DEVS);
366 
367 	close(fd);
368 
369 	RTE_LOG(DEBUG, EAL, "PCI scan found %u devices\n", dev_count);
370 	return 0;
371 
372 error:
373 	if (fd >= 0)
374 		close(fd);
375 	return -1;
376 }
377 
378 bool
379 pci_device_iommu_support_va(__rte_unused const struct rte_pci_device *dev)
380 {
381 	return false;
382 }
383 
384 enum rte_iova_mode
385 pci_device_iova_mode(const struct rte_pci_driver *pdrv __rte_unused,
386 		     const struct rte_pci_device *pdev)
387 {
388 	/* Supports only RTE_KDRV_NIC_UIO */
389 	if (pdev->kdrv != RTE_KDRV_NIC_UIO)
390 		RTE_LOG(DEBUG, EAL, "Unsupported kernel driver? Defaulting to IOVA as 'PA'\n");
391 
392 	return RTE_IOVA_PA;
393 }
394 
395 int
396 pci_update_device(const struct rte_pci_addr *addr)
397 {
398 	int fd;
399 	struct pci_conf matches[2];
400 	struct pci_match_conf match = {
401 		.pc_sel = {
402 			.pc_domain = addr->domain,
403 			.pc_bus = addr->bus,
404 			.pc_dev = addr->devid,
405 			.pc_func = addr->function,
406 		},
407 	};
408 	struct pci_conf_io conf_io = {
409 		.pat_buf_len = 0,
410 		.num_patterns = 1,
411 		.patterns = &match,
412 		.match_buf_len = sizeof(matches),
413 		.matches = &matches[0],
414 	};
415 
416 	fd = open("/dev/pci", O_RDONLY);
417 	if (fd < 0) {
418 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
419 		goto error;
420 	}
421 
422 	if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
423 		RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
424 				__func__, strerror(errno));
425 		goto error;
426 	}
427 
428 	if (conf_io.num_matches != 1)
429 		goto error;
430 
431 	if (pci_scan_one(fd, &matches[0]) < 0)
432 		goto error;
433 
434 	close(fd);
435 
436 	return 0;
437 
438 error:
439 	if (fd >= 0)
440 		close(fd);
441 	return -1;
442 }
443 
444 /* Read PCI config space. */
445 int rte_pci_read_config(const struct rte_pci_device *dev,
446 		void *buf, size_t len, off_t offset)
447 {
448 	int fd = -1;
449 	int size;
450 	/* Copy Linux implementation's behaviour */
451 	const int return_len = len;
452 	struct pci_io pi = {
453 		.pi_sel = {
454 			.pc_domain = dev->addr.domain,
455 			.pc_bus = dev->addr.bus,
456 			.pc_dev = dev->addr.devid,
457 			.pc_func = dev->addr.function,
458 		},
459 		.pi_reg = offset,
460 	};
461 
462 	fd = open("/dev/pci", O_RDWR);
463 	if (fd < 0) {
464 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
465 		goto error;
466 	}
467 
468 	while (len > 0) {
469 		size = (len >= 4) ? 4 : ((len >= 2) ? 2 : 1);
470 		pi.pi_width = size;
471 
472 		if (ioctl(fd, PCIOCREAD, &pi) < 0)
473 			goto error;
474 		memcpy(buf, &pi.pi_data, size);
475 
476 		buf = (char *)buf + size;
477 		pi.pi_reg += size;
478 		len -= size;
479 	}
480 	close(fd);
481 
482 	return return_len;
483 
484  error:
485 	if (fd >= 0)
486 		close(fd);
487 	return -1;
488 }
489 
490 /* Write PCI config space. */
491 int rte_pci_write_config(const struct rte_pci_device *dev,
492 		const void *buf, size_t len, off_t offset)
493 {
494 	int fd = -1;
495 
496 	struct pci_io pi = {
497 		.pi_sel = {
498 			.pc_domain = dev->addr.domain,
499 			.pc_bus = dev->addr.bus,
500 			.pc_dev = dev->addr.devid,
501 			.pc_func = dev->addr.function,
502 		},
503 		.pi_reg = offset,
504 		.pi_data = *(const uint32_t *)buf,
505 		.pi_width = len,
506 	};
507 
508 	if (len == 3 || len > sizeof(pi.pi_data)) {
509 		RTE_LOG(ERR, EAL, "%s(): invalid pci read length\n", __func__);
510 		goto error;
511 	}
512 
513 	memcpy(&pi.pi_data, buf, len);
514 
515 	fd = open("/dev/pci", O_RDWR);
516 	if (fd < 0) {
517 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
518 		goto error;
519 	}
520 
521 	if (ioctl(fd, PCIOCWRITE, &pi) < 0)
522 		goto error;
523 
524 	close(fd);
525 	return 0;
526 
527  error:
528 	if (fd >= 0)
529 		close(fd);
530 	return -1;
531 }
532 
533 int
534 rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
535 		struct rte_pci_ioport *p)
536 {
537 	int ret;
538 
539 	switch (dev->kdrv) {
540 #if defined(RTE_ARCH_X86)
541 	case RTE_KDRV_NIC_UIO:
542 		if ((uintptr_t) dev->mem_resource[bar].addr <= UINT16_MAX) {
543 			p->base = (uintptr_t)dev->mem_resource[bar].addr;
544 			ret = 0;
545 		} else
546 			ret = -1;
547 		break;
548 #endif
549 	default:
550 		ret = -1;
551 		break;
552 	}
553 
554 	if (!ret)
555 		p->dev = dev;
556 
557 	return ret;
558 }
559 
560 static void
561 pci_uio_ioport_read(struct rte_pci_ioport *p,
562 		void *data, size_t len, off_t offset)
563 {
564 #if defined(RTE_ARCH_X86)
565 	uint8_t *d;
566 	int size;
567 	unsigned short reg = p->base + offset;
568 
569 	for (d = data; len > 0; d += size, reg += size, len -= size) {
570 		if (len >= 4) {
571 			size = 4;
572 			*(uint32_t *)d = inl(reg);
573 		} else if (len >= 2) {
574 			size = 2;
575 			*(uint16_t *)d = inw(reg);
576 		} else {
577 			size = 1;
578 			*d = inb(reg);
579 		}
580 	}
581 #else
582 	RTE_SET_USED(p);
583 	RTE_SET_USED(data);
584 	RTE_SET_USED(len);
585 	RTE_SET_USED(offset);
586 #endif
587 }
588 
589 void
590 rte_pci_ioport_read(struct rte_pci_ioport *p,
591 		void *data, size_t len, off_t offset)
592 {
593 	switch (p->dev->kdrv) {
594 	case RTE_KDRV_NIC_UIO:
595 		pci_uio_ioport_read(p, data, len, offset);
596 		break;
597 	default:
598 		break;
599 	}
600 }
601 
602 static void
603 pci_uio_ioport_write(struct rte_pci_ioport *p,
604 		const void *data, size_t len, off_t offset)
605 {
606 #if defined(RTE_ARCH_X86)
607 	const uint8_t *s;
608 	int size;
609 	unsigned short reg = p->base + offset;
610 
611 	for (s = data; len > 0; s += size, reg += size, len -= size) {
612 		if (len >= 4) {
613 			size = 4;
614 			outl(reg, *(const uint32_t *)s);
615 		} else if (len >= 2) {
616 			size = 2;
617 			outw(reg, *(const uint16_t *)s);
618 		} else {
619 			size = 1;
620 			outb(reg, *s);
621 		}
622 	}
623 #else
624 	RTE_SET_USED(p);
625 	RTE_SET_USED(data);
626 	RTE_SET_USED(len);
627 	RTE_SET_USED(offset);
628 #endif
629 }
630 
631 void
632 rte_pci_ioport_write(struct rte_pci_ioport *p,
633 		const void *data, size_t len, off_t offset)
634 {
635 	switch (p->dev->kdrv) {
636 	case RTE_KDRV_NIC_UIO:
637 		pci_uio_ioport_write(p, data, len, offset);
638 		break;
639 	default:
640 		break;
641 	}
642 }
643 
644 int
645 rte_pci_ioport_unmap(struct rte_pci_ioport *p)
646 {
647 	int ret;
648 
649 	switch (p->dev->kdrv) {
650 #if defined(RTE_ARCH_X86)
651 	case RTE_KDRV_NIC_UIO:
652 		ret = 0;
653 		break;
654 #endif
655 	default:
656 		ret = -1;
657 		break;
658 	}
659 
660 	return ret;
661 }
662