xref: /dpdk/drivers/bus/pci/bsd/pci.c (revision f9dfb59edbccae50e7c5508348aa2b4b84413048)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <unistd.h>
11 #include <inttypes.h>
12 #include <sys/types.h>
13 #include <sys/stat.h>
14 #include <fcntl.h>
15 #include <errno.h>
16 #include <dirent.h>
17 #include <limits.h>
18 #include <sys/queue.h>
19 #include <sys/mman.h>
20 #include <sys/ioctl.h>
21 #include <sys/pciio.h>
22 #include <dev/pci/pcireg.h>
23 
24 #if defined(RTE_ARCH_X86)
25 #include <machine/cpufunc.h>
26 #endif
27 
28 #include <rte_interrupts.h>
29 #include <rte_log.h>
30 #include <rte_pci.h>
31 #include <rte_common.h>
32 #include <rte_launch.h>
33 #include <rte_memory.h>
34 #include <rte_eal.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_malloc.h>
38 #include <rte_string_fns.h>
39 #include <rte_debug.h>
40 #include <rte_devargs.h>
41 
42 #include "eal_filesystem.h"
43 #include "private.h"
44 
45 /**
46  * @file
47  * PCI probing under BSD.
48  */
49 
50 /* Map pci device */
51 int
52 rte_pci_map_device(struct rte_pci_device *dev)
53 {
54 	int ret = -1;
55 
56 	/* try mapping the NIC resources */
57 	switch (dev->kdrv) {
58 	case RTE_PCI_KDRV_NIC_UIO:
59 		/* map resources for devices that use uio */
60 		ret = pci_uio_map_resource(dev);
61 		break;
62 	default:
63 		RTE_LOG(DEBUG, EAL,
64 			"  Not managed by a supported kernel driver, skipped\n");
65 		ret = 1;
66 		break;
67 	}
68 
69 	return ret;
70 }
71 
72 /* Unmap pci device */
73 void
74 rte_pci_unmap_device(struct rte_pci_device *dev)
75 {
76 	/* try unmapping the NIC resources */
77 	switch (dev->kdrv) {
78 	case RTE_PCI_KDRV_NIC_UIO:
79 		/* unmap resources for devices that use uio */
80 		pci_uio_unmap_resource(dev);
81 		break;
82 	default:
83 		RTE_LOG(DEBUG, EAL,
84 			"  Not managed by a supported kernel driver, skipped\n");
85 		break;
86 	}
87 }
88 
89 void
90 pci_uio_free_resource(struct rte_pci_device *dev,
91 		struct mapped_pci_resource *uio_res)
92 {
93 	rte_free(uio_res);
94 
95 	if (rte_intr_fd_get(dev->intr_handle)) {
96 		close(rte_intr_fd_get(dev->intr_handle));
97 		rte_intr_fd_set(dev->intr_handle, -1);
98 		rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_UNKNOWN);
99 	}
100 }
101 
102 int
103 pci_uio_alloc_resource(struct rte_pci_device *dev,
104 		struct mapped_pci_resource **uio_res)
105 {
106 	char devname[PATH_MAX]; /* contains the /dev/uioX */
107 	struct rte_pci_addr *loc;
108 
109 	loc = &dev->addr;
110 
111 	snprintf(devname, sizeof(devname), "/dev/uio@pci:%u:%u:%u",
112 			dev->addr.bus, dev->addr.devid, dev->addr.function);
113 
114 	if (access(devname, O_RDWR) < 0) {
115 		RTE_LOG(WARNING, EAL, "  "PCI_PRI_FMT" not managed by UIO driver, "
116 				"skipping\n", loc->domain, loc->bus, loc->devid, loc->function);
117 		return 1;
118 	}
119 
120 	/* save fd if in primary process */
121 	if (rte_intr_fd_set(dev->intr_handle, open(devname, O_RDWR))) {
122 		RTE_LOG(WARNING, EAL, "Failed to save fd");
123 		goto error;
124 	}
125 
126 	if (rte_intr_fd_get(dev->intr_handle) < 0) {
127 		RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
128 			devname, strerror(errno));
129 		goto error;
130 	}
131 
132 	if (rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_UIO))
133 		goto error;
134 
135 	/* allocate the mapping details for secondary processes*/
136 	*uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
137 	if (*uio_res == NULL) {
138 		RTE_LOG(ERR, EAL,
139 			"%s(): cannot store uio mmap details\n", __func__);
140 		goto error;
141 	}
142 
143 	strlcpy((*uio_res)->path, devname, sizeof((*uio_res)->path));
144 	memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr));
145 
146 	return 0;
147 
148 error:
149 	pci_uio_free_resource(dev, *uio_res);
150 	return -1;
151 }
152 
153 int
154 pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
155 		struct mapped_pci_resource *uio_res, int map_idx)
156 {
157 	int fd;
158 	char *devname;
159 	void *mapaddr;
160 	uint64_t offset;
161 	uint64_t pagesz;
162 	struct pci_map *maps;
163 
164 	maps = uio_res->maps;
165 	devname = uio_res->path;
166 	pagesz = sysconf(_SC_PAGESIZE);
167 
168 	/* allocate memory to keep path */
169 	maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
170 	if (maps[map_idx].path == NULL) {
171 		RTE_LOG(ERR, EAL, "Cannot allocate memory for path: %s\n",
172 				strerror(errno));
173 		return -1;
174 	}
175 
176 	/*
177 	 * open resource file, to mmap it
178 	 */
179 	fd = open(devname, O_RDWR);
180 	if (fd < 0) {
181 		RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
182 				devname, strerror(errno));
183 		goto error;
184 	}
185 
186 	/* if matching map is found, then use it */
187 	offset = res_idx * pagesz;
188 	mapaddr = pci_map_resource(NULL, fd, (off_t)offset,
189 			(size_t)dev->mem_resource[res_idx].len, 0);
190 	close(fd);
191 	if (mapaddr == NULL)
192 		goto error;
193 
194 	maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr;
195 	maps[map_idx].size = dev->mem_resource[res_idx].len;
196 	maps[map_idx].addr = mapaddr;
197 	maps[map_idx].offset = offset;
198 	strcpy(maps[map_idx].path, devname);
199 	dev->mem_resource[res_idx].addr = mapaddr;
200 
201 	return 0;
202 
203 error:
204 	rte_free(maps[map_idx].path);
205 	return -1;
206 }
207 
208 static int
209 pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
210 {
211 	struct rte_pci_device *dev;
212 	struct pci_bar_io bar;
213 	unsigned i, max;
214 
215 	dev = malloc(sizeof(*dev));
216 	if (dev == NULL) {
217 		return -1;
218 	}
219 
220 	memset(dev, 0, sizeof(*dev));
221 	dev->device.bus = &rte_pci_bus.bus;
222 
223 	dev->addr.domain = conf->pc_sel.pc_domain;
224 	dev->addr.bus = conf->pc_sel.pc_bus;
225 	dev->addr.devid = conf->pc_sel.pc_dev;
226 	dev->addr.function = conf->pc_sel.pc_func;
227 
228 	/* get vendor id */
229 	dev->id.vendor_id = conf->pc_vendor;
230 
231 	/* get device id */
232 	dev->id.device_id = conf->pc_device;
233 
234 	/* get subsystem_vendor id */
235 	dev->id.subsystem_vendor_id = conf->pc_subvendor;
236 
237 	/* get subsystem_device id */
238 	dev->id.subsystem_device_id = conf->pc_subdevice;
239 
240 	/* get class id */
241 	dev->id.class_id = (conf->pc_class << 16) |
242 			   (conf->pc_subclass << 8) |
243 			   (conf->pc_progif);
244 
245 	/* TODO: get max_vfs */
246 	dev->max_vfs = 0;
247 
248 	/* FreeBSD has no NUMA support (yet) */
249 	dev->device.numa_node = SOCKET_ID_ANY;
250 
251 	pci_common_set(dev);
252 
253 	/* FreeBSD has only one pass through driver */
254 	dev->kdrv = RTE_PCI_KDRV_NIC_UIO;
255 
256 	/* parse resources */
257 	switch (conf->pc_hdr & PCIM_HDRTYPE) {
258 	case PCIM_HDRTYPE_NORMAL:
259 		max = PCIR_MAX_BAR_0;
260 		break;
261 	case PCIM_HDRTYPE_BRIDGE:
262 		max = PCIR_MAX_BAR_1;
263 		break;
264 	case PCIM_HDRTYPE_CARDBUS:
265 		max = PCIR_MAX_BAR_2;
266 		break;
267 	default:
268 		goto skipdev;
269 	}
270 
271 	for (i = 0; i <= max; i++) {
272 		bar.pbi_sel = conf->pc_sel;
273 		bar.pbi_reg = PCIR_BAR(i);
274 		if (ioctl(dev_pci_fd, PCIOCGETBAR, &bar) < 0)
275 			continue;
276 
277 		dev->mem_resource[i].len = bar.pbi_length;
278 		if (PCI_BAR_IO(bar.pbi_base)) {
279 			dev->mem_resource[i].addr = (void *)(bar.pbi_base & ~((uint64_t)0xf));
280 			continue;
281 		}
282 		dev->mem_resource[i].phys_addr = bar.pbi_base & ~((uint64_t)0xf);
283 	}
284 
285 	/* device is valid, add in list (sorted) */
286 	if (TAILQ_EMPTY(&rte_pci_bus.device_list)) {
287 		rte_pci_add_device(dev);
288 	}
289 	else {
290 		struct rte_pci_device *dev2 = NULL;
291 		int ret;
292 
293 		TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) {
294 			ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr);
295 			if (ret > 0)
296 				continue;
297 			else if (ret < 0) {
298 				rte_pci_insert_device(dev2, dev);
299 			} else { /* already registered */
300 				dev2->kdrv = dev->kdrv;
301 				dev2->max_vfs = dev->max_vfs;
302 				pci_common_set(dev2);
303 				memmove(dev2->mem_resource,
304 					dev->mem_resource,
305 					sizeof(dev->mem_resource));
306 				pci_free(dev);
307 			}
308 			return 0;
309 		}
310 		rte_pci_add_device(dev);
311 	}
312 
313 	return 0;
314 
315 skipdev:
316 	pci_free(dev);
317 	return 0;
318 }
319 
320 /*
321  * Scan the content of the PCI bus, and add the devices in the devices
322  * list. Call pci_scan_one() for each pci entry found.
323  */
324 int
325 rte_pci_scan(void)
326 {
327 	int fd;
328 	unsigned dev_count = 0;
329 	struct pci_conf matches[16];
330 	struct pci_conf_io conf_io = {
331 			.pat_buf_len = 0,
332 			.num_patterns = 0,
333 			.patterns = NULL,
334 			.match_buf_len = sizeof(matches),
335 			.matches = &matches[0],
336 	};
337 	struct rte_pci_addr pci_addr;
338 
339 	/* for debug purposes, PCI can be disabled */
340 	if (!rte_eal_has_pci())
341 		return 0;
342 
343 	fd = open("/dev/pci", O_RDONLY);
344 	if (fd < 0) {
345 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
346 		goto error;
347 	}
348 
349 	do {
350 		unsigned i;
351 		if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
352 			RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
353 					__func__, strerror(errno));
354 			goto error;
355 		}
356 
357 		for (i = 0; i < conf_io.num_matches; i++) {
358 			pci_addr.domain = matches[i].pc_sel.pc_domain;
359 			pci_addr.bus = matches[i].pc_sel.pc_bus;
360 			pci_addr.devid = matches[i].pc_sel.pc_dev;
361 			pci_addr.function = matches[i].pc_sel.pc_func;
362 
363 			if (rte_pci_ignore_device(&pci_addr))
364 				continue;
365 
366 			if (pci_scan_one(fd, &matches[i]) < 0)
367 				goto error;
368 		}
369 
370 		dev_count += conf_io.num_matches;
371 	} while(conf_io.status == PCI_GETCONF_MORE_DEVS);
372 
373 	close(fd);
374 
375 	RTE_LOG(DEBUG, EAL, "PCI scan found %u devices\n", dev_count);
376 	return 0;
377 
378 error:
379 	if (fd >= 0)
380 		close(fd);
381 	return -1;
382 }
383 
384 bool
385 pci_device_iommu_support_va(__rte_unused const struct rte_pci_device *dev)
386 {
387 	return false;
388 }
389 
390 enum rte_iova_mode
391 pci_device_iova_mode(const struct rte_pci_driver *pdrv __rte_unused,
392 		     const struct rte_pci_device *pdev)
393 {
394 	if (pdev->kdrv != RTE_PCI_KDRV_NIC_UIO)
395 		RTE_LOG(DEBUG, EAL, "Unsupported kernel driver? Defaulting to IOVA as 'PA'\n");
396 
397 	return RTE_IOVA_PA;
398 }
399 
400 /* Read PCI config space. */
401 int rte_pci_read_config(const struct rte_pci_device *dev,
402 		void *buf, size_t len, off_t offset)
403 {
404 	int fd = -1;
405 	int size;
406 	/* Copy Linux implementation's behaviour */
407 	const int return_len = len;
408 	struct pci_io pi = {
409 		.pi_sel = {
410 			.pc_domain = dev->addr.domain,
411 			.pc_bus = dev->addr.bus,
412 			.pc_dev = dev->addr.devid,
413 			.pc_func = dev->addr.function,
414 		},
415 		.pi_reg = offset,
416 	};
417 
418 	fd = open("/dev/pci", O_RDWR);
419 	if (fd < 0) {
420 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
421 		goto error;
422 	}
423 
424 	while (len > 0) {
425 		size = (len >= 4) ? 4 : ((len >= 2) ? 2 : 1);
426 		pi.pi_width = size;
427 
428 		if (ioctl(fd, PCIOCREAD, &pi) < 0)
429 			goto error;
430 		memcpy(buf, &pi.pi_data, size);
431 
432 		buf = (char *)buf + size;
433 		pi.pi_reg += size;
434 		len -= size;
435 	}
436 	close(fd);
437 
438 	return return_len;
439 
440  error:
441 	if (fd >= 0)
442 		close(fd);
443 	return -1;
444 }
445 
446 /* Write PCI config space. */
447 int rte_pci_write_config(const struct rte_pci_device *dev,
448 		const void *buf, size_t len, off_t offset)
449 {
450 	int fd = -1;
451 
452 	struct pci_io pi = {
453 		.pi_sel = {
454 			.pc_domain = dev->addr.domain,
455 			.pc_bus = dev->addr.bus,
456 			.pc_dev = dev->addr.devid,
457 			.pc_func = dev->addr.function,
458 		},
459 		.pi_reg = offset,
460 		.pi_data = *(const uint32_t *)buf,
461 		.pi_width = len,
462 	};
463 
464 	if (len == 3 || len > sizeof(pi.pi_data)) {
465 		RTE_LOG(ERR, EAL, "%s(): invalid pci read length\n", __func__);
466 		goto error;
467 	}
468 
469 	memcpy(&pi.pi_data, buf, len);
470 
471 	fd = open("/dev/pci", O_RDWR);
472 	if (fd < 0) {
473 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
474 		goto error;
475 	}
476 
477 	if (ioctl(fd, PCIOCWRITE, &pi) < 0)
478 		goto error;
479 
480 	close(fd);
481 	return 0;
482 
483  error:
484 	if (fd >= 0)
485 		close(fd);
486 	return -1;
487 }
488 
489 int
490 rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
491 		struct rte_pci_ioport *p)
492 {
493 	int ret;
494 
495 	switch (dev->kdrv) {
496 #if defined(RTE_ARCH_X86)
497 	case RTE_PCI_KDRV_NIC_UIO:
498 		if (rte_eal_iopl_init() != 0) {
499 			RTE_LOG(ERR, EAL, "%s(): insufficient ioport permissions for PCI device %s\n",
500 				__func__, dev->name);
501 			return -1;
502 		}
503 		if ((uintptr_t) dev->mem_resource[bar].addr <= UINT16_MAX) {
504 			p->base = (uintptr_t)dev->mem_resource[bar].addr;
505 			ret = 0;
506 		} else
507 			ret = -1;
508 		break;
509 #endif
510 	default:
511 		ret = -1;
512 		break;
513 	}
514 
515 	if (!ret)
516 		p->dev = dev;
517 
518 	return ret;
519 }
520 
521 static void
522 pci_uio_ioport_read(struct rte_pci_ioport *p,
523 		void *data, size_t len, off_t offset)
524 {
525 #if defined(RTE_ARCH_X86)
526 	uint8_t *d;
527 	int size;
528 	unsigned short reg = p->base + offset;
529 
530 	for (d = data; len > 0; d += size, reg += size, len -= size) {
531 		if (len >= 4) {
532 			size = 4;
533 			*(uint32_t *)d = inl(reg);
534 		} else if (len >= 2) {
535 			size = 2;
536 			*(uint16_t *)d = inw(reg);
537 		} else {
538 			size = 1;
539 			*d = inb(reg);
540 		}
541 	}
542 #else
543 	RTE_SET_USED(p);
544 	RTE_SET_USED(data);
545 	RTE_SET_USED(len);
546 	RTE_SET_USED(offset);
547 #endif
548 }
549 
550 void
551 rte_pci_ioport_read(struct rte_pci_ioport *p,
552 		void *data, size_t len, off_t offset)
553 {
554 	switch (p->dev->kdrv) {
555 	case RTE_PCI_KDRV_NIC_UIO:
556 		pci_uio_ioport_read(p, data, len, offset);
557 		break;
558 	default:
559 		break;
560 	}
561 }
562 
563 static void
564 pci_uio_ioport_write(struct rte_pci_ioport *p,
565 		const void *data, size_t len, off_t offset)
566 {
567 #if defined(RTE_ARCH_X86)
568 	const uint8_t *s;
569 	int size;
570 	unsigned short reg = p->base + offset;
571 
572 	for (s = data; len > 0; s += size, reg += size, len -= size) {
573 		if (len >= 4) {
574 			size = 4;
575 			outl(reg, *(const uint32_t *)s);
576 		} else if (len >= 2) {
577 			size = 2;
578 			outw(reg, *(const uint16_t *)s);
579 		} else {
580 			size = 1;
581 			outb(reg, *s);
582 		}
583 	}
584 #else
585 	RTE_SET_USED(p);
586 	RTE_SET_USED(data);
587 	RTE_SET_USED(len);
588 	RTE_SET_USED(offset);
589 #endif
590 }
591 
592 void
593 rte_pci_ioport_write(struct rte_pci_ioport *p,
594 		const void *data, size_t len, off_t offset)
595 {
596 	switch (p->dev->kdrv) {
597 	case RTE_PCI_KDRV_NIC_UIO:
598 		pci_uio_ioport_write(p, data, len, offset);
599 		break;
600 	default:
601 		break;
602 	}
603 }
604 
605 int
606 rte_pci_ioport_unmap(struct rte_pci_ioport *p)
607 {
608 	int ret;
609 
610 	switch (p->dev->kdrv) {
611 #if defined(RTE_ARCH_X86)
612 	case RTE_PCI_KDRV_NIC_UIO:
613 		ret = 0;
614 		break;
615 #endif
616 	default:
617 		ret = -1;
618 		break;
619 	}
620 
621 	return ret;
622 }
623