xref: /dpdk/drivers/bus/pci/bsd/pci.c (revision 095cf6e68b28605635a9edb5f01991ad98474c46)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <unistd.h>
11 #include <inttypes.h>
12 #include <sys/types.h>
13 #include <sys/stat.h>
14 #include <fcntl.h>
15 #include <errno.h>
16 #include <dirent.h>
17 #include <limits.h>
18 #include <sys/queue.h>
19 #include <sys/mman.h>
20 #include <sys/ioctl.h>
21 #include <sys/pciio.h>
22 #include <dev/pci/pcireg.h>
23 
24 #if defined(RTE_ARCH_X86)
25 #include <machine/cpufunc.h>
26 #endif
27 
28 #include <rte_interrupts.h>
29 #include <rte_log.h>
30 #include <rte_pci.h>
31 #include <rte_common.h>
32 #include <rte_launch.h>
33 #include <rte_memory.h>
34 #include <rte_eal.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_malloc.h>
38 #include <rte_string_fns.h>
39 #include <rte_debug.h>
40 #include <rte_devargs.h>
41 
42 #include "eal_filesystem.h"
43 #include "private.h"
44 
45 /**
46  * @file
47  * PCI probing under BSD.
48  */
49 
50 /* Map pci device */
51 int
52 rte_pci_map_device(struct rte_pci_device *dev)
53 {
54 	int ret = -1;
55 
56 	/* try mapping the NIC resources */
57 	switch (dev->kdrv) {
58 	case RTE_PCI_KDRV_NIC_UIO:
59 		/* map resources for devices that use uio */
60 		ret = pci_uio_map_resource(dev);
61 		break;
62 	default:
63 		RTE_LOG(DEBUG, EAL,
64 			"  Not managed by a supported kernel driver, skipped\n");
65 		ret = 1;
66 		break;
67 	}
68 
69 	return ret;
70 }
71 
72 /* Unmap pci device */
73 void
74 rte_pci_unmap_device(struct rte_pci_device *dev)
75 {
76 	/* try unmapping the NIC resources */
77 	switch (dev->kdrv) {
78 	case RTE_PCI_KDRV_NIC_UIO:
79 		/* unmap resources for devices that use uio */
80 		pci_uio_unmap_resource(dev);
81 		break;
82 	default:
83 		RTE_LOG(DEBUG, EAL,
84 			"  Not managed by a supported kernel driver, skipped\n");
85 		break;
86 	}
87 }
88 
89 void
90 pci_uio_free_resource(struct rte_pci_device *dev,
91 		struct mapped_pci_resource *uio_res)
92 {
93 	rte_free(uio_res);
94 
95 	if (rte_intr_fd_get(dev->intr_handle)) {
96 		close(rte_intr_fd_get(dev->intr_handle));
97 		rte_intr_fd_set(dev->intr_handle, -1);
98 		rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_UNKNOWN);
99 	}
100 }
101 
102 int
103 pci_uio_alloc_resource(struct rte_pci_device *dev,
104 		struct mapped_pci_resource **uio_res)
105 {
106 	char devname[PATH_MAX]; /* contains the /dev/uioX */
107 	struct rte_pci_addr *loc;
108 
109 	loc = &dev->addr;
110 
111 	snprintf(devname, sizeof(devname), "/dev/uio@pci:%u:%u:%u",
112 			dev->addr.bus, dev->addr.devid, dev->addr.function);
113 
114 	if (access(devname, O_RDWR) < 0) {
115 		RTE_LOG(WARNING, EAL, "  "PCI_PRI_FMT" not managed by UIO driver, "
116 				"skipping\n", loc->domain, loc->bus, loc->devid, loc->function);
117 		return 1;
118 	}
119 
120 	/* save fd if in primary process */
121 	if (rte_intr_fd_set(dev->intr_handle, open(devname, O_RDWR))) {
122 		RTE_LOG(WARNING, EAL, "Failed to save fd");
123 		goto error;
124 	}
125 
126 	if (rte_intr_fd_get(dev->intr_handle) < 0) {
127 		RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
128 			devname, strerror(errno));
129 		goto error;
130 	}
131 
132 	if (rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_UIO))
133 		goto error;
134 
135 	/* allocate the mapping details for secondary processes*/
136 	*uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
137 	if (*uio_res == NULL) {
138 		RTE_LOG(ERR, EAL,
139 			"%s(): cannot store uio mmap details\n", __func__);
140 		goto error;
141 	}
142 
143 	strlcpy((*uio_res)->path, devname, sizeof((*uio_res)->path));
144 	memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr));
145 
146 	return 0;
147 
148 error:
149 	pci_uio_free_resource(dev, *uio_res);
150 	return -1;
151 }
152 
153 int
154 pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
155 		struct mapped_pci_resource *uio_res, int map_idx)
156 {
157 	int fd;
158 	char *devname;
159 	void *mapaddr;
160 	uint64_t offset;
161 	uint64_t pagesz;
162 	struct pci_map *maps;
163 
164 	maps = uio_res->maps;
165 	devname = uio_res->path;
166 	pagesz = sysconf(_SC_PAGESIZE);
167 
168 	/* allocate memory to keep path */
169 	maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
170 	if (maps[map_idx].path == NULL) {
171 		RTE_LOG(ERR, EAL, "Cannot allocate memory for path: %s\n",
172 				strerror(errno));
173 		return -1;
174 	}
175 
176 	/*
177 	 * open resource file, to mmap it
178 	 */
179 	fd = open(devname, O_RDWR);
180 	if (fd < 0) {
181 		RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
182 				devname, strerror(errno));
183 		goto error;
184 	}
185 
186 	/* if matching map is found, then use it */
187 	offset = res_idx * pagesz;
188 	mapaddr = pci_map_resource(NULL, fd, (off_t)offset,
189 			(size_t)dev->mem_resource[res_idx].len, 0);
190 	close(fd);
191 	if (mapaddr == NULL)
192 		goto error;
193 
194 	maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr;
195 	maps[map_idx].size = dev->mem_resource[res_idx].len;
196 	maps[map_idx].addr = mapaddr;
197 	maps[map_idx].offset = offset;
198 	strcpy(maps[map_idx].path, devname);
199 	dev->mem_resource[res_idx].addr = mapaddr;
200 
201 	return 0;
202 
203 error:
204 	rte_free(maps[map_idx].path);
205 	return -1;
206 }
207 
208 static int
209 pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
210 {
211 	struct rte_pci_device_internal *pdev;
212 	struct rte_pci_device *dev;
213 	struct pci_bar_io bar;
214 	unsigned i, max;
215 
216 	pdev = malloc(sizeof(*pdev));
217 	if (pdev == NULL) {
218 		RTE_LOG(ERR, EAL, "Cannot allocate memory for internal pci device\n");
219 		return -1;
220 	}
221 
222 	memset(pdev, 0, sizeof(*pdev));
223 	dev = &pdev->device;
224 	dev->device.bus = &rte_pci_bus.bus;
225 
226 	dev->addr.domain = conf->pc_sel.pc_domain;
227 	dev->addr.bus = conf->pc_sel.pc_bus;
228 	dev->addr.devid = conf->pc_sel.pc_dev;
229 	dev->addr.function = conf->pc_sel.pc_func;
230 
231 	/* get vendor id */
232 	dev->id.vendor_id = conf->pc_vendor;
233 
234 	/* get device id */
235 	dev->id.device_id = conf->pc_device;
236 
237 	/* get subsystem_vendor id */
238 	dev->id.subsystem_vendor_id = conf->pc_subvendor;
239 
240 	/* get subsystem_device id */
241 	dev->id.subsystem_device_id = conf->pc_subdevice;
242 
243 	/* get class id */
244 	dev->id.class_id = (conf->pc_class << 16) |
245 			   (conf->pc_subclass << 8) |
246 			   (conf->pc_progif);
247 
248 	/* TODO: get max_vfs */
249 	dev->max_vfs = 0;
250 
251 	/* FreeBSD has no NUMA support (yet) */
252 	dev->device.numa_node = SOCKET_ID_ANY;
253 
254 	pci_common_set(dev);
255 
256 	/* FreeBSD has only one pass through driver */
257 	dev->kdrv = RTE_PCI_KDRV_NIC_UIO;
258 
259 	/* parse resources */
260 	switch (conf->pc_hdr & PCIM_HDRTYPE) {
261 	case PCIM_HDRTYPE_NORMAL:
262 		max = PCIR_MAX_BAR_0;
263 		break;
264 	case PCIM_HDRTYPE_BRIDGE:
265 		max = PCIR_MAX_BAR_1;
266 		break;
267 	case PCIM_HDRTYPE_CARDBUS:
268 		max = PCIR_MAX_BAR_2;
269 		break;
270 	default:
271 		goto skipdev;
272 	}
273 
274 	for (i = 0; i <= max; i++) {
275 		bar.pbi_sel = conf->pc_sel;
276 		bar.pbi_reg = PCIR_BAR(i);
277 		if (ioctl(dev_pci_fd, PCIOCGETBAR, &bar) < 0)
278 			continue;
279 
280 		dev->mem_resource[i].len = bar.pbi_length;
281 		if (PCI_BAR_IO(bar.pbi_base)) {
282 			dev->mem_resource[i].addr = (void *)(bar.pbi_base & ~((uint64_t)0xf));
283 			continue;
284 		}
285 		dev->mem_resource[i].phys_addr = bar.pbi_base & ~((uint64_t)0xf);
286 	}
287 
288 	/* device is valid, add in list (sorted) */
289 	if (TAILQ_EMPTY(&rte_pci_bus.device_list)) {
290 		rte_pci_add_device(dev);
291 	}
292 	else {
293 		struct rte_pci_device *dev2 = NULL;
294 		int ret;
295 
296 		TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) {
297 			ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr);
298 			if (ret > 0)
299 				continue;
300 			else if (ret < 0) {
301 				rte_pci_insert_device(dev2, dev);
302 			} else { /* already registered */
303 				dev2->kdrv = dev->kdrv;
304 				dev2->max_vfs = dev->max_vfs;
305 				pci_common_set(dev2);
306 				memmove(dev2->mem_resource,
307 					dev->mem_resource,
308 					sizeof(dev->mem_resource));
309 				pci_free(pdev);
310 			}
311 			return 0;
312 		}
313 		rte_pci_add_device(dev);
314 	}
315 
316 	return 0;
317 
318 skipdev:
319 	pci_free(pdev);
320 	return 0;
321 }
322 
323 /*
324  * Scan the content of the PCI bus, and add the devices in the devices
325  * list. Call pci_scan_one() for each pci entry found.
326  */
327 int
328 rte_pci_scan(void)
329 {
330 	int fd;
331 	unsigned dev_count = 0;
332 	struct pci_conf matches[16];
333 	struct pci_conf_io conf_io = {
334 			.pat_buf_len = 0,
335 			.num_patterns = 0,
336 			.patterns = NULL,
337 			.match_buf_len = sizeof(matches),
338 			.matches = &matches[0],
339 	};
340 	struct rte_pci_addr pci_addr;
341 
342 	/* for debug purposes, PCI can be disabled */
343 	if (!rte_eal_has_pci())
344 		return 0;
345 
346 	fd = open("/dev/pci", O_RDONLY);
347 	if (fd < 0) {
348 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
349 		goto error;
350 	}
351 
352 	do {
353 		unsigned i;
354 		if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
355 			RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
356 					__func__, strerror(errno));
357 			goto error;
358 		}
359 
360 		for (i = 0; i < conf_io.num_matches; i++) {
361 			pci_addr.domain = matches[i].pc_sel.pc_domain;
362 			pci_addr.bus = matches[i].pc_sel.pc_bus;
363 			pci_addr.devid = matches[i].pc_sel.pc_dev;
364 			pci_addr.function = matches[i].pc_sel.pc_func;
365 
366 			if (rte_pci_ignore_device(&pci_addr))
367 				continue;
368 
369 			if (pci_scan_one(fd, &matches[i]) < 0)
370 				goto error;
371 		}
372 
373 		dev_count += conf_io.num_matches;
374 	} while(conf_io.status == PCI_GETCONF_MORE_DEVS);
375 
376 	close(fd);
377 
378 	RTE_LOG(DEBUG, EAL, "PCI scan found %u devices\n", dev_count);
379 	return 0;
380 
381 error:
382 	if (fd >= 0)
383 		close(fd);
384 	return -1;
385 }
386 
387 bool
388 pci_device_iommu_support_va(__rte_unused const struct rte_pci_device *dev)
389 {
390 	return false;
391 }
392 
393 enum rte_iova_mode
394 pci_device_iova_mode(const struct rte_pci_driver *pdrv __rte_unused,
395 		     const struct rte_pci_device *pdev)
396 {
397 	if (pdev->kdrv != RTE_PCI_KDRV_NIC_UIO)
398 		RTE_LOG(DEBUG, EAL, "Unsupported kernel driver? Defaulting to IOVA as 'PA'\n");
399 
400 	return RTE_IOVA_PA;
401 }
402 
403 /* Read PCI config space. */
404 int rte_pci_read_config(const struct rte_pci_device *dev,
405 		void *buf, size_t len, off_t offset)
406 {
407 	int fd = -1;
408 	int size;
409 	/* Copy Linux implementation's behaviour */
410 	const int return_len = len;
411 	struct pci_io pi = {
412 		.pi_sel = {
413 			.pc_domain = dev->addr.domain,
414 			.pc_bus = dev->addr.bus,
415 			.pc_dev = dev->addr.devid,
416 			.pc_func = dev->addr.function,
417 		},
418 		.pi_reg = offset,
419 	};
420 
421 	fd = open("/dev/pci", O_RDWR);
422 	if (fd < 0) {
423 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
424 		goto error;
425 	}
426 
427 	while (len > 0) {
428 		size = (len >= 4) ? 4 : ((len >= 2) ? 2 : 1);
429 		pi.pi_width = size;
430 
431 		if (ioctl(fd, PCIOCREAD, &pi) < 0)
432 			goto error;
433 		memcpy(buf, &pi.pi_data, size);
434 
435 		buf = (char *)buf + size;
436 		pi.pi_reg += size;
437 		len -= size;
438 	}
439 	close(fd);
440 
441 	return return_len;
442 
443  error:
444 	if (fd >= 0)
445 		close(fd);
446 	return -1;
447 }
448 
449 /* Write PCI config space. */
450 int rte_pci_write_config(const struct rte_pci_device *dev,
451 		const void *buf, size_t len, off_t offset)
452 {
453 	int fd = -1;
454 
455 	struct pci_io pi = {
456 		.pi_sel = {
457 			.pc_domain = dev->addr.domain,
458 			.pc_bus = dev->addr.bus,
459 			.pc_dev = dev->addr.devid,
460 			.pc_func = dev->addr.function,
461 		},
462 		.pi_reg = offset,
463 		.pi_data = *(const uint32_t *)buf,
464 		.pi_width = len,
465 	};
466 
467 	if (len == 3 || len > sizeof(pi.pi_data)) {
468 		RTE_LOG(ERR, EAL, "%s(): invalid pci read length\n", __func__);
469 		goto error;
470 	}
471 
472 	memcpy(&pi.pi_data, buf, len);
473 
474 	fd = open("/dev/pci", O_RDWR);
475 	if (fd < 0) {
476 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
477 		goto error;
478 	}
479 
480 	if (ioctl(fd, PCIOCWRITE, &pi) < 0)
481 		goto error;
482 
483 	close(fd);
484 	return 0;
485 
486  error:
487 	if (fd >= 0)
488 		close(fd);
489 	return -1;
490 }
491 
492 /* Read PCI MMIO space. */
493 int rte_pci_mmio_read(const struct rte_pci_device *dev, int bar,
494 		      void *buf, size_t len, off_t offset)
495 {
496 	if (bar >= PCI_MAX_RESOURCE || dev->mem_resource[bar].addr == NULL ||
497 			(uint64_t)offset + len > dev->mem_resource[bar].len)
498 		return -1;
499 	memcpy(buf, (uint8_t *)dev->mem_resource[bar].addr + offset, len);
500 	return len;
501 }
502 
503 /* Write PCI MMIO space. */
504 int rte_pci_mmio_write(const struct rte_pci_device *dev, int bar,
505 		       const void *buf, size_t len, off_t offset)
506 {
507 	if (bar >= PCI_MAX_RESOURCE || dev->mem_resource[bar].addr == NULL ||
508 			(uint64_t)offset + len > dev->mem_resource[bar].len)
509 		return -1;
510 	memcpy((uint8_t *)dev->mem_resource[bar].addr + offset, buf, len);
511 	return len;
512 }
513 
514 int
515 rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
516 		struct rte_pci_ioport *p)
517 {
518 	int ret;
519 
520 	switch (dev->kdrv) {
521 #if defined(RTE_ARCH_X86)
522 	case RTE_PCI_KDRV_NIC_UIO:
523 		if (rte_eal_iopl_init() != 0) {
524 			RTE_LOG(ERR, EAL, "%s(): insufficient ioport permissions for PCI device %s\n",
525 				__func__, dev->name);
526 			return -1;
527 		}
528 		if ((uintptr_t) dev->mem_resource[bar].addr <= UINT16_MAX) {
529 			p->base = (uintptr_t)dev->mem_resource[bar].addr;
530 			ret = 0;
531 		} else
532 			ret = -1;
533 		break;
534 #endif
535 	default:
536 		ret = -1;
537 		break;
538 	}
539 
540 	if (!ret)
541 		p->dev = dev;
542 
543 	return ret;
544 }
545 
546 static void
547 pci_uio_ioport_read(struct rte_pci_ioport *p,
548 		void *data, size_t len, off_t offset)
549 {
550 #if defined(RTE_ARCH_X86)
551 	uint8_t *d;
552 	int size;
553 	unsigned short reg = p->base + offset;
554 
555 	for (d = data; len > 0; d += size, reg += size, len -= size) {
556 		if (len >= 4) {
557 			size = 4;
558 			*(uint32_t *)d = inl(reg);
559 		} else if (len >= 2) {
560 			size = 2;
561 			*(uint16_t *)d = inw(reg);
562 		} else {
563 			size = 1;
564 			*d = inb(reg);
565 		}
566 	}
567 #else
568 	RTE_SET_USED(p);
569 	RTE_SET_USED(data);
570 	RTE_SET_USED(len);
571 	RTE_SET_USED(offset);
572 #endif
573 }
574 
575 void
576 rte_pci_ioport_read(struct rte_pci_ioport *p,
577 		void *data, size_t len, off_t offset)
578 {
579 	switch (p->dev->kdrv) {
580 	case RTE_PCI_KDRV_NIC_UIO:
581 		pci_uio_ioport_read(p, data, len, offset);
582 		break;
583 	default:
584 		break;
585 	}
586 }
587 
588 static void
589 pci_uio_ioport_write(struct rte_pci_ioport *p,
590 		const void *data, size_t len, off_t offset)
591 {
592 #if defined(RTE_ARCH_X86)
593 	const uint8_t *s;
594 	int size;
595 	unsigned short reg = p->base + offset;
596 
597 	for (s = data; len > 0; s += size, reg += size, len -= size) {
598 		if (len >= 4) {
599 			size = 4;
600 			outl(reg, *(const uint32_t *)s);
601 		} else if (len >= 2) {
602 			size = 2;
603 			outw(reg, *(const uint16_t *)s);
604 		} else {
605 			size = 1;
606 			outb(reg, *s);
607 		}
608 	}
609 #else
610 	RTE_SET_USED(p);
611 	RTE_SET_USED(data);
612 	RTE_SET_USED(len);
613 	RTE_SET_USED(offset);
614 #endif
615 }
616 
617 void
618 rte_pci_ioport_write(struct rte_pci_ioport *p,
619 		const void *data, size_t len, off_t offset)
620 {
621 	switch (p->dev->kdrv) {
622 	case RTE_PCI_KDRV_NIC_UIO:
623 		pci_uio_ioport_write(p, data, len, offset);
624 		break;
625 	default:
626 		break;
627 	}
628 }
629 
630 int
631 rte_pci_ioport_unmap(struct rte_pci_ioport *p)
632 {
633 	int ret;
634 
635 	switch (p->dev->kdrv) {
636 #if defined(RTE_ARCH_X86)
637 	case RTE_PCI_KDRV_NIC_UIO:
638 		ret = 0;
639 		break;
640 #endif
641 	default:
642 		ret = -1;
643 		break;
644 	}
645 
646 	return ret;
647 }
648