xref: /dpdk/drivers/bus/pci/bsd/pci.c (revision 3998e2a07220844d3f3c17f76a781ced3efe0de0)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <unistd.h>
11 #include <inttypes.h>
12 #include <sys/types.h>
13 #include <sys/stat.h>
14 #include <fcntl.h>
15 #include <errno.h>
16 #include <dirent.h>
17 #include <limits.h>
18 #include <sys/queue.h>
19 #include <sys/mman.h>
20 #include <sys/ioctl.h>
21 #include <sys/pciio.h>
22 #include <dev/pci/pcireg.h>
23 
24 #if defined(RTE_ARCH_X86)
25 #include <machine/cpufunc.h>
26 #endif
27 
28 #include <rte_interrupts.h>
29 #include <rte_log.h>
30 #include <rte_pci.h>
31 #include <rte_bus_pci.h>
32 #include <rte_common.h>
33 #include <rte_launch.h>
34 #include <rte_memory.h>
35 #include <rte_eal.h>
36 #include <rte_eal_memconfig.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_malloc.h>
40 #include <rte_string_fns.h>
41 #include <rte_debug.h>
42 #include <rte_devargs.h>
43 
44 #include "eal_filesystem.h"
45 #include "private.h"
46 
47 /**
48  * @file
49  * PCI probing under BSD
50  *
51  * This code is used to simulate a PCI probe by parsing information in
52  * sysfs. Moreover, when a registered driver matches a device, the
53  * kernel driver currently using it is unloaded and replaced by
54  * igb_uio module, which is a very minimal userland driver for Intel
55  * network card, only providing access to PCI BAR to applications, and
56  * enabling bus master.
57  */
58 
59 extern struct rte_pci_bus rte_pci_bus;
60 
61 /* Map pci device */
62 int
63 rte_pci_map_device(struct rte_pci_device *dev)
64 {
65 	int ret = -1;
66 
67 	/* try mapping the NIC resources */
68 	switch (dev->kdrv) {
69 	case RTE_KDRV_NIC_UIO:
70 		/* map resources for devices that use uio */
71 		ret = pci_uio_map_resource(dev);
72 		break;
73 	default:
74 		RTE_LOG(DEBUG, EAL,
75 			"  Not managed by a supported kernel driver, skipped\n");
76 		ret = 1;
77 		break;
78 	}
79 
80 	return ret;
81 }
82 
83 /* Unmap pci device */
84 void
85 rte_pci_unmap_device(struct rte_pci_device *dev)
86 {
87 	/* try unmapping the NIC resources */
88 	switch (dev->kdrv) {
89 	case RTE_KDRV_NIC_UIO:
90 		/* unmap resources for devices that use uio */
91 		pci_uio_unmap_resource(dev);
92 		break;
93 	default:
94 		RTE_LOG(DEBUG, EAL,
95 			"  Not managed by a supported kernel driver, skipped\n");
96 		break;
97 	}
98 }
99 
100 void
101 pci_uio_free_resource(struct rte_pci_device *dev,
102 		struct mapped_pci_resource *uio_res)
103 {
104 	rte_free(uio_res);
105 
106 	if (dev->intr_handle.fd) {
107 		close(dev->intr_handle.fd);
108 		dev->intr_handle.fd = -1;
109 		dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
110 	}
111 }
112 
113 int
114 pci_uio_alloc_resource(struct rte_pci_device *dev,
115 		struct mapped_pci_resource **uio_res)
116 {
117 	char devname[PATH_MAX]; /* contains the /dev/uioX */
118 	struct rte_pci_addr *loc;
119 
120 	loc = &dev->addr;
121 
122 	snprintf(devname, sizeof(devname), "/dev/uio@pci:%u:%u:%u",
123 			dev->addr.bus, dev->addr.devid, dev->addr.function);
124 
125 	if (access(devname, O_RDWR) < 0) {
126 		RTE_LOG(WARNING, EAL, "  "PCI_PRI_FMT" not managed by UIO driver, "
127 				"skipping\n", loc->domain, loc->bus, loc->devid, loc->function);
128 		return 1;
129 	}
130 
131 	/* save fd if in primary process */
132 	dev->intr_handle.fd = open(devname, O_RDWR);
133 	if (dev->intr_handle.fd < 0) {
134 		RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
135 			devname, strerror(errno));
136 		goto error;
137 	}
138 	dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
139 
140 	/* allocate the mapping details for secondary processes*/
141 	*uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
142 	if (*uio_res == NULL) {
143 		RTE_LOG(ERR, EAL,
144 			"%s(): cannot store uio mmap details\n", __func__);
145 		goto error;
146 	}
147 
148 	snprintf((*uio_res)->path, sizeof((*uio_res)->path), "%s", devname);
149 	memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr));
150 
151 	return 0;
152 
153 error:
154 	pci_uio_free_resource(dev, *uio_res);
155 	return -1;
156 }
157 
158 int
159 pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
160 		struct mapped_pci_resource *uio_res, int map_idx)
161 {
162 	int fd;
163 	char *devname;
164 	void *mapaddr;
165 	uint64_t offset;
166 	uint64_t pagesz;
167 	struct pci_map *maps;
168 
169 	maps = uio_res->maps;
170 	devname = uio_res->path;
171 	pagesz = sysconf(_SC_PAGESIZE);
172 
173 	/* allocate memory to keep path */
174 	maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
175 	if (maps[map_idx].path == NULL) {
176 		RTE_LOG(ERR, EAL, "Cannot allocate memory for path: %s\n",
177 				strerror(errno));
178 		return -1;
179 	}
180 
181 	/*
182 	 * open resource file, to mmap it
183 	 */
184 	fd = open(devname, O_RDWR);
185 	if (fd < 0) {
186 		RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
187 				devname, strerror(errno));
188 		goto error;
189 	}
190 
191 	/* if matching map is found, then use it */
192 	offset = res_idx * pagesz;
193 	mapaddr = pci_map_resource(NULL, fd, (off_t)offset,
194 			(size_t)dev->mem_resource[res_idx].len, 0);
195 	close(fd);
196 	if (mapaddr == MAP_FAILED)
197 		goto error;
198 
199 	maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr;
200 	maps[map_idx].size = dev->mem_resource[res_idx].len;
201 	maps[map_idx].addr = mapaddr;
202 	maps[map_idx].offset = offset;
203 	strcpy(maps[map_idx].path, devname);
204 	dev->mem_resource[res_idx].addr = mapaddr;
205 
206 	return 0;
207 
208 error:
209 	rte_free(maps[map_idx].path);
210 	return -1;
211 }
212 
213 static int
214 pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
215 {
216 	struct rte_pci_device *dev;
217 	struct pci_bar_io bar;
218 	unsigned i, max;
219 
220 	dev = malloc(sizeof(*dev));
221 	if (dev == NULL) {
222 		return -1;
223 	}
224 
225 	memset(dev, 0, sizeof(*dev));
226 	dev->addr.domain = conf->pc_sel.pc_domain;
227 	dev->addr.bus = conf->pc_sel.pc_bus;
228 	dev->addr.devid = conf->pc_sel.pc_dev;
229 	dev->addr.function = conf->pc_sel.pc_func;
230 
231 	/* get vendor id */
232 	dev->id.vendor_id = conf->pc_vendor;
233 
234 	/* get device id */
235 	dev->id.device_id = conf->pc_device;
236 
237 	/* get subsystem_vendor id */
238 	dev->id.subsystem_vendor_id = conf->pc_subvendor;
239 
240 	/* get subsystem_device id */
241 	dev->id.subsystem_device_id = conf->pc_subdevice;
242 
243 	/* get class id */
244 	dev->id.class_id = (conf->pc_class << 16) |
245 			   (conf->pc_subclass << 8) |
246 			   (conf->pc_progif);
247 
248 	/* TODO: get max_vfs */
249 	dev->max_vfs = 0;
250 
251 	/* FreeBSD has no NUMA support (yet) */
252 	dev->device.numa_node = 0;
253 
254 	pci_name_set(dev);
255 
256 	/* FreeBSD has only one pass through driver */
257 	dev->kdrv = RTE_KDRV_NIC_UIO;
258 
259 	/* parse resources */
260 	switch (conf->pc_hdr & PCIM_HDRTYPE) {
261 	case PCIM_HDRTYPE_NORMAL:
262 		max = PCIR_MAX_BAR_0;
263 		break;
264 	case PCIM_HDRTYPE_BRIDGE:
265 		max = PCIR_MAX_BAR_1;
266 		break;
267 	case PCIM_HDRTYPE_CARDBUS:
268 		max = PCIR_MAX_BAR_2;
269 		break;
270 	default:
271 		goto skipdev;
272 	}
273 
274 	for (i = 0; i <= max; i++) {
275 		bar.pbi_sel = conf->pc_sel;
276 		bar.pbi_reg = PCIR_BAR(i);
277 		if (ioctl(dev_pci_fd, PCIOCGETBAR, &bar) < 0)
278 			continue;
279 
280 		dev->mem_resource[i].len = bar.pbi_length;
281 		if (PCI_BAR_IO(bar.pbi_base)) {
282 			dev->mem_resource[i].addr = (void *)(bar.pbi_base & ~((uint64_t)0xf));
283 			continue;
284 		}
285 		dev->mem_resource[i].phys_addr = bar.pbi_base & ~((uint64_t)0xf);
286 	}
287 
288 	/* device is valid, add in list (sorted) */
289 	if (TAILQ_EMPTY(&rte_pci_bus.device_list)) {
290 		rte_pci_add_device(dev);
291 	}
292 	else {
293 		struct rte_pci_device *dev2 = NULL;
294 		int ret;
295 
296 		TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) {
297 			ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr);
298 			if (ret > 0)
299 				continue;
300 			else if (ret < 0) {
301 				rte_pci_insert_device(dev2, dev);
302 			} else { /* already registered */
303 				dev2->kdrv = dev->kdrv;
304 				dev2->max_vfs = dev->max_vfs;
305 				pci_name_set(dev2);
306 				memmove(dev2->mem_resource,
307 					dev->mem_resource,
308 					sizeof(dev->mem_resource));
309 				free(dev);
310 			}
311 			return 0;
312 		}
313 		rte_pci_add_device(dev);
314 	}
315 
316 	return 0;
317 
318 skipdev:
319 	free(dev);
320 	return 0;
321 }
322 
323 /*
324  * Scan the content of the PCI bus, and add the devices in the devices
325  * list. Call pci_scan_one() for each pci entry found.
326  */
327 int
328 rte_pci_scan(void)
329 {
330 	int fd;
331 	unsigned dev_count = 0;
332 	struct pci_conf matches[16];
333 	struct pci_conf_io conf_io = {
334 			.pat_buf_len = 0,
335 			.num_patterns = 0,
336 			.patterns = NULL,
337 			.match_buf_len = sizeof(matches),
338 			.matches = &matches[0],
339 	};
340 
341 	/* for debug purposes, PCI can be disabled */
342 	if (!rte_eal_has_pci())
343 		return 0;
344 
345 	fd = open("/dev/pci", O_RDONLY);
346 	if (fd < 0) {
347 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
348 		goto error;
349 	}
350 
351 	do {
352 		unsigned i;
353 		if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
354 			RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
355 					__func__, strerror(errno));
356 			goto error;
357 		}
358 
359 		for (i = 0; i < conf_io.num_matches; i++)
360 			if (pci_scan_one(fd, &matches[i]) < 0)
361 				goto error;
362 
363 		dev_count += conf_io.num_matches;
364 	} while(conf_io.status == PCI_GETCONF_MORE_DEVS);
365 
366 	close(fd);
367 
368 	RTE_LOG(DEBUG, EAL, "PCI scan found %u devices\n", dev_count);
369 	return 0;
370 
371 error:
372 	if (fd >= 0)
373 		close(fd);
374 	return -1;
375 }
376 
377 /*
378  * Get iommu class of PCI devices on the bus.
379  */
380 enum rte_iova_mode
381 rte_pci_get_iommu_class(void)
382 {
383 	/* Supports only RTE_KDRV_NIC_UIO */
384 	return RTE_IOVA_PA;
385 }
386 
387 int
388 pci_update_device(const struct rte_pci_addr *addr)
389 {
390 	int fd;
391 	struct pci_conf matches[2];
392 	struct pci_match_conf match = {
393 		.pc_sel = {
394 			.pc_domain = addr->domain,
395 			.pc_bus = addr->bus,
396 			.pc_dev = addr->devid,
397 			.pc_func = addr->function,
398 		},
399 	};
400 	struct pci_conf_io conf_io = {
401 		.pat_buf_len = 0,
402 		.num_patterns = 1,
403 		.patterns = &match,
404 		.match_buf_len = sizeof(matches),
405 		.matches = &matches[0],
406 	};
407 
408 	fd = open("/dev/pci", O_RDONLY);
409 	if (fd < 0) {
410 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
411 		goto error;
412 	}
413 
414 	if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
415 		RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
416 				__func__, strerror(errno));
417 		goto error;
418 	}
419 
420 	if (conf_io.num_matches != 1)
421 		goto error;
422 
423 	if (pci_scan_one(fd, &matches[0]) < 0)
424 		goto error;
425 
426 	close(fd);
427 
428 	return 0;
429 
430 error:
431 	if (fd >= 0)
432 		close(fd);
433 	return -1;
434 }
435 
436 /* Read PCI config space. */
437 int rte_pci_read_config(const struct rte_pci_device *dev,
438 		void *buf, size_t len, off_t offset)
439 {
440 	int fd = -1;
441 	int size;
442 	struct pci_io pi = {
443 		.pi_sel = {
444 			.pc_domain = dev->addr.domain,
445 			.pc_bus = dev->addr.bus,
446 			.pc_dev = dev->addr.devid,
447 			.pc_func = dev->addr.function,
448 		},
449 		.pi_reg = offset,
450 	};
451 
452 	fd = open("/dev/pci", O_RDWR);
453 	if (fd < 0) {
454 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
455 		goto error;
456 	}
457 
458 	while (len > 0) {
459 		size = (len >= 4) ? 4 : ((len >= 2) ? 2 : 1);
460 		pi.pi_width = size;
461 
462 		if (ioctl(fd, PCIOCREAD, &pi) < 0)
463 			goto error;
464 		memcpy(buf, &pi.pi_data, size);
465 
466 		buf = (char *)buf + size;
467 		pi.pi_reg += size;
468 		len -= size;
469 	}
470 	close(fd);
471 
472 	return 0;
473 
474  error:
475 	if (fd >= 0)
476 		close(fd);
477 	return -1;
478 }
479 
480 /* Write PCI config space. */
481 int rte_pci_write_config(const struct rte_pci_device *dev,
482 		const void *buf, size_t len, off_t offset)
483 {
484 	int fd = -1;
485 
486 	struct pci_io pi = {
487 		.pi_sel = {
488 			.pc_domain = dev->addr.domain,
489 			.pc_bus = dev->addr.bus,
490 			.pc_dev = dev->addr.devid,
491 			.pc_func = dev->addr.function,
492 		},
493 		.pi_reg = offset,
494 		.pi_data = *(const uint32_t *)buf,
495 		.pi_width = len,
496 	};
497 
498 	if (len == 3 || len > sizeof(pi.pi_data)) {
499 		RTE_LOG(ERR, EAL, "%s(): invalid pci read length\n", __func__);
500 		goto error;
501 	}
502 
503 	memcpy(&pi.pi_data, buf, len);
504 
505 	fd = open("/dev/pci", O_RDWR);
506 	if (fd < 0) {
507 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
508 		goto error;
509 	}
510 
511 	if (ioctl(fd, PCIOCWRITE, &pi) < 0)
512 		goto error;
513 
514 	close(fd);
515 	return 0;
516 
517  error:
518 	if (fd >= 0)
519 		close(fd);
520 	return -1;
521 }
522 
523 int
524 rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
525 		struct rte_pci_ioport *p)
526 {
527 	int ret;
528 
529 	switch (dev->kdrv) {
530 #if defined(RTE_ARCH_X86)
531 	case RTE_KDRV_NIC_UIO:
532 		if ((uintptr_t) dev->mem_resource[bar].addr <= UINT16_MAX) {
533 			p->base = (uintptr_t)dev->mem_resource[bar].addr;
534 			ret = 0;
535 		} else
536 			ret = -1;
537 		break;
538 #endif
539 	default:
540 		ret = -1;
541 		break;
542 	}
543 
544 	if (!ret)
545 		p->dev = dev;
546 
547 	return ret;
548 }
549 
550 static void
551 pci_uio_ioport_read(struct rte_pci_ioport *p,
552 		void *data, size_t len, off_t offset)
553 {
554 #if defined(RTE_ARCH_X86)
555 	uint8_t *d;
556 	int size;
557 	unsigned short reg = p->base + offset;
558 
559 	for (d = data; len > 0; d += size, reg += size, len -= size) {
560 		if (len >= 4) {
561 			size = 4;
562 			*(uint32_t *)d = inl(reg);
563 		} else if (len >= 2) {
564 			size = 2;
565 			*(uint16_t *)d = inw(reg);
566 		} else {
567 			size = 1;
568 			*d = inb(reg);
569 		}
570 	}
571 #else
572 	RTE_SET_USED(p);
573 	RTE_SET_USED(data);
574 	RTE_SET_USED(len);
575 	RTE_SET_USED(offset);
576 #endif
577 }
578 
579 void
580 rte_pci_ioport_read(struct rte_pci_ioport *p,
581 		void *data, size_t len, off_t offset)
582 {
583 	switch (p->dev->kdrv) {
584 	case RTE_KDRV_NIC_UIO:
585 		pci_uio_ioport_read(p, data, len, offset);
586 		break;
587 	default:
588 		break;
589 	}
590 }
591 
592 static void
593 pci_uio_ioport_write(struct rte_pci_ioport *p,
594 		const void *data, size_t len, off_t offset)
595 {
596 #if defined(RTE_ARCH_X86)
597 	const uint8_t *s;
598 	int size;
599 	unsigned short reg = p->base + offset;
600 
601 	for (s = data; len > 0; s += size, reg += size, len -= size) {
602 		if (len >= 4) {
603 			size = 4;
604 			outl(reg, *(const uint32_t *)s);
605 		} else if (len >= 2) {
606 			size = 2;
607 			outw(reg, *(const uint16_t *)s);
608 		} else {
609 			size = 1;
610 			outb(reg, *s);
611 		}
612 	}
613 #else
614 	RTE_SET_USED(p);
615 	RTE_SET_USED(data);
616 	RTE_SET_USED(len);
617 	RTE_SET_USED(offset);
618 #endif
619 }
620 
621 void
622 rte_pci_ioport_write(struct rte_pci_ioport *p,
623 		const void *data, size_t len, off_t offset)
624 {
625 	switch (p->dev->kdrv) {
626 	case RTE_KDRV_NIC_UIO:
627 		pci_uio_ioport_write(p, data, len, offset);
628 		break;
629 	default:
630 		break;
631 	}
632 }
633 
634 int
635 rte_pci_ioport_unmap(struct rte_pci_ioport *p)
636 {
637 	int ret;
638 
639 	switch (p->dev->kdrv) {
640 #if defined(RTE_ARCH_X86)
641 	case RTE_KDRV_NIC_UIO:
642 		ret = 0;
643 		break;
644 #endif
645 	default:
646 		ret = -1;
647 		break;
648 	}
649 
650 	return ret;
651 }
652