xref: /dpdk/drivers/bus/pci/bsd/pci.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <unistd.h>
11 #include <inttypes.h>
12 #include <sys/types.h>
13 #include <sys/stat.h>
14 #include <fcntl.h>
15 #include <errno.h>
16 #include <dirent.h>
17 #include <limits.h>
18 #include <sys/queue.h>
19 #include <sys/mman.h>
20 #include <sys/ioctl.h>
21 #include <sys/pciio.h>
22 #include <dev/pci/pcireg.h>
23 
24 #if defined(RTE_ARCH_X86)
25 #include <machine/cpufunc.h>
26 #endif
27 
28 #include <rte_interrupts.h>
29 #include <rte_log.h>
30 #include <rte_pci.h>
31 #include <rte_bus_pci.h>
32 #include <rte_common.h>
33 #include <rte_launch.h>
34 #include <rte_memory.h>
35 #include <rte_eal.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_malloc.h>
39 #include <rte_string_fns.h>
40 #include <rte_debug.h>
41 #include <rte_devargs.h>
42 
43 #include "eal_filesystem.h"
44 #include "private.h"
45 
46 /**
47  * @file
48  * PCI probing under BSD.
49  */
50 
51 extern struct rte_pci_bus rte_pci_bus;
52 
53 /* Map pci device */
54 int
55 rte_pci_map_device(struct rte_pci_device *dev)
56 {
57 	int ret = -1;
58 
59 	/* try mapping the NIC resources */
60 	switch (dev->kdrv) {
61 	case RTE_PCI_KDRV_NIC_UIO:
62 		/* map resources for devices that use uio */
63 		ret = pci_uio_map_resource(dev);
64 		break;
65 	default:
66 		RTE_LOG(DEBUG, EAL,
67 			"  Not managed by a supported kernel driver, skipped\n");
68 		ret = 1;
69 		break;
70 	}
71 
72 	return ret;
73 }
74 
75 /* Unmap pci device */
76 void
77 rte_pci_unmap_device(struct rte_pci_device *dev)
78 {
79 	/* try unmapping the NIC resources */
80 	switch (dev->kdrv) {
81 	case RTE_PCI_KDRV_NIC_UIO:
82 		/* unmap resources for devices that use uio */
83 		pci_uio_unmap_resource(dev);
84 		break;
85 	default:
86 		RTE_LOG(DEBUG, EAL,
87 			"  Not managed by a supported kernel driver, skipped\n");
88 		break;
89 	}
90 }
91 
92 void
93 pci_uio_free_resource(struct rte_pci_device *dev,
94 		struct mapped_pci_resource *uio_res)
95 {
96 	rte_free(uio_res);
97 
98 	if (dev->intr_handle.fd) {
99 		close(dev->intr_handle.fd);
100 		dev->intr_handle.fd = -1;
101 		dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
102 	}
103 }
104 
105 int
106 pci_uio_alloc_resource(struct rte_pci_device *dev,
107 		struct mapped_pci_resource **uio_res)
108 {
109 	char devname[PATH_MAX]; /* contains the /dev/uioX */
110 	struct rte_pci_addr *loc;
111 
112 	loc = &dev->addr;
113 
114 	snprintf(devname, sizeof(devname), "/dev/uio@pci:%u:%u:%u",
115 			dev->addr.bus, dev->addr.devid, dev->addr.function);
116 
117 	if (access(devname, O_RDWR) < 0) {
118 		RTE_LOG(WARNING, EAL, "  "PCI_PRI_FMT" not managed by UIO driver, "
119 				"skipping\n", loc->domain, loc->bus, loc->devid, loc->function);
120 		return 1;
121 	}
122 
123 	/* save fd if in primary process */
124 	dev->intr_handle.fd = open(devname, O_RDWR);
125 	if (dev->intr_handle.fd < 0) {
126 		RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
127 			devname, strerror(errno));
128 		goto error;
129 	}
130 	dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
131 
132 	/* allocate the mapping details for secondary processes*/
133 	*uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
134 	if (*uio_res == NULL) {
135 		RTE_LOG(ERR, EAL,
136 			"%s(): cannot store uio mmap details\n", __func__);
137 		goto error;
138 	}
139 
140 	strlcpy((*uio_res)->path, devname, sizeof((*uio_res)->path));
141 	memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr));
142 
143 	return 0;
144 
145 error:
146 	pci_uio_free_resource(dev, *uio_res);
147 	return -1;
148 }
149 
150 int
151 pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
152 		struct mapped_pci_resource *uio_res, int map_idx)
153 {
154 	int fd;
155 	char *devname;
156 	void *mapaddr;
157 	uint64_t offset;
158 	uint64_t pagesz;
159 	struct pci_map *maps;
160 
161 	maps = uio_res->maps;
162 	devname = uio_res->path;
163 	pagesz = sysconf(_SC_PAGESIZE);
164 
165 	/* allocate memory to keep path */
166 	maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
167 	if (maps[map_idx].path == NULL) {
168 		RTE_LOG(ERR, EAL, "Cannot allocate memory for path: %s\n",
169 				strerror(errno));
170 		return -1;
171 	}
172 
173 	/*
174 	 * open resource file, to mmap it
175 	 */
176 	fd = open(devname, O_RDWR);
177 	if (fd < 0) {
178 		RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
179 				devname, strerror(errno));
180 		goto error;
181 	}
182 
183 	/* if matching map is found, then use it */
184 	offset = res_idx * pagesz;
185 	mapaddr = pci_map_resource(NULL, fd, (off_t)offset,
186 			(size_t)dev->mem_resource[res_idx].len, 0);
187 	close(fd);
188 	if (mapaddr == NULL)
189 		goto error;
190 
191 	maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr;
192 	maps[map_idx].size = dev->mem_resource[res_idx].len;
193 	maps[map_idx].addr = mapaddr;
194 	maps[map_idx].offset = offset;
195 	strcpy(maps[map_idx].path, devname);
196 	dev->mem_resource[res_idx].addr = mapaddr;
197 
198 	return 0;
199 
200 error:
201 	rte_free(maps[map_idx].path);
202 	return -1;
203 }
204 
205 static int
206 pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
207 {
208 	struct rte_pci_device *dev;
209 	struct pci_bar_io bar;
210 	unsigned i, max;
211 
212 	dev = malloc(sizeof(*dev));
213 	if (dev == NULL) {
214 		return -1;
215 	}
216 
217 	memset(dev, 0, sizeof(*dev));
218 	dev->device.bus = &rte_pci_bus.bus;
219 
220 	dev->addr.domain = conf->pc_sel.pc_domain;
221 	dev->addr.bus = conf->pc_sel.pc_bus;
222 	dev->addr.devid = conf->pc_sel.pc_dev;
223 	dev->addr.function = conf->pc_sel.pc_func;
224 
225 	/* get vendor id */
226 	dev->id.vendor_id = conf->pc_vendor;
227 
228 	/* get device id */
229 	dev->id.device_id = conf->pc_device;
230 
231 	/* get subsystem_vendor id */
232 	dev->id.subsystem_vendor_id = conf->pc_subvendor;
233 
234 	/* get subsystem_device id */
235 	dev->id.subsystem_device_id = conf->pc_subdevice;
236 
237 	/* get class id */
238 	dev->id.class_id = (conf->pc_class << 16) |
239 			   (conf->pc_subclass << 8) |
240 			   (conf->pc_progif);
241 
242 	/* TODO: get max_vfs */
243 	dev->max_vfs = 0;
244 
245 	/* FreeBSD has no NUMA support (yet) */
246 	dev->device.numa_node = 0;
247 
248 	pci_name_set(dev);
249 
250 	/* FreeBSD has only one pass through driver */
251 	dev->kdrv = RTE_PCI_KDRV_NIC_UIO;
252 
253 	/* parse resources */
254 	switch (conf->pc_hdr & PCIM_HDRTYPE) {
255 	case PCIM_HDRTYPE_NORMAL:
256 		max = PCIR_MAX_BAR_0;
257 		break;
258 	case PCIM_HDRTYPE_BRIDGE:
259 		max = PCIR_MAX_BAR_1;
260 		break;
261 	case PCIM_HDRTYPE_CARDBUS:
262 		max = PCIR_MAX_BAR_2;
263 		break;
264 	default:
265 		goto skipdev;
266 	}
267 
268 	for (i = 0; i <= max; i++) {
269 		bar.pbi_sel = conf->pc_sel;
270 		bar.pbi_reg = PCIR_BAR(i);
271 		if (ioctl(dev_pci_fd, PCIOCGETBAR, &bar) < 0)
272 			continue;
273 
274 		dev->mem_resource[i].len = bar.pbi_length;
275 		if (PCI_BAR_IO(bar.pbi_base)) {
276 			dev->mem_resource[i].addr = (void *)(bar.pbi_base & ~((uint64_t)0xf));
277 			continue;
278 		}
279 		dev->mem_resource[i].phys_addr = bar.pbi_base & ~((uint64_t)0xf);
280 	}
281 
282 	/* device is valid, add in list (sorted) */
283 	if (TAILQ_EMPTY(&rte_pci_bus.device_list)) {
284 		rte_pci_add_device(dev);
285 	}
286 	else {
287 		struct rte_pci_device *dev2 = NULL;
288 		int ret;
289 
290 		TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) {
291 			ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr);
292 			if (ret > 0)
293 				continue;
294 			else if (ret < 0) {
295 				rte_pci_insert_device(dev2, dev);
296 			} else { /* already registered */
297 				dev2->kdrv = dev->kdrv;
298 				dev2->max_vfs = dev->max_vfs;
299 				pci_name_set(dev2);
300 				memmove(dev2->mem_resource,
301 					dev->mem_resource,
302 					sizeof(dev->mem_resource));
303 				free(dev);
304 			}
305 			return 0;
306 		}
307 		rte_pci_add_device(dev);
308 	}
309 
310 	return 0;
311 
312 skipdev:
313 	free(dev);
314 	return 0;
315 }
316 
317 /*
318  * Scan the content of the PCI bus, and add the devices in the devices
319  * list. Call pci_scan_one() for each pci entry found.
320  */
321 int
322 rte_pci_scan(void)
323 {
324 	int fd;
325 	unsigned dev_count = 0;
326 	struct pci_conf matches[16];
327 	struct pci_conf_io conf_io = {
328 			.pat_buf_len = 0,
329 			.num_patterns = 0,
330 			.patterns = NULL,
331 			.match_buf_len = sizeof(matches),
332 			.matches = &matches[0],
333 	};
334 	struct rte_pci_addr pci_addr;
335 
336 	/* for debug purposes, PCI can be disabled */
337 	if (!rte_eal_has_pci())
338 		return 0;
339 
340 	fd = open("/dev/pci", O_RDONLY);
341 	if (fd < 0) {
342 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
343 		goto error;
344 	}
345 
346 	do {
347 		unsigned i;
348 		if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
349 			RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
350 					__func__, strerror(errno));
351 			goto error;
352 		}
353 
354 		for (i = 0; i < conf_io.num_matches; i++) {
355 			pci_addr.domain = matches[i].pc_sel.pc_domain;
356 			pci_addr.bus = matches[i].pc_sel.pc_bus;
357 			pci_addr.devid = matches[i].pc_sel.pc_dev;
358 			pci_addr.function = matches[i].pc_sel.pc_func;
359 
360 			if (rte_pci_ignore_device(&pci_addr))
361 				continue;
362 
363 			if (pci_scan_one(fd, &matches[i]) < 0)
364 				goto error;
365 		}
366 
367 		dev_count += conf_io.num_matches;
368 	} while(conf_io.status == PCI_GETCONF_MORE_DEVS);
369 
370 	close(fd);
371 
372 	RTE_LOG(DEBUG, EAL, "PCI scan found %u devices\n", dev_count);
373 	return 0;
374 
375 error:
376 	if (fd >= 0)
377 		close(fd);
378 	return -1;
379 }
380 
381 bool
382 pci_device_iommu_support_va(__rte_unused const struct rte_pci_device *dev)
383 {
384 	return false;
385 }
386 
387 enum rte_iova_mode
388 pci_device_iova_mode(const struct rte_pci_driver *pdrv __rte_unused,
389 		     const struct rte_pci_device *pdev)
390 {
391 	if (pdev->kdrv != RTE_PCI_KDRV_NIC_UIO)
392 		RTE_LOG(DEBUG, EAL, "Unsupported kernel driver? Defaulting to IOVA as 'PA'\n");
393 
394 	return RTE_IOVA_PA;
395 }
396 
397 /* Read PCI config space. */
398 int rte_pci_read_config(const struct rte_pci_device *dev,
399 		void *buf, size_t len, off_t offset)
400 {
401 	int fd = -1;
402 	int size;
403 	/* Copy Linux implementation's behaviour */
404 	const int return_len = len;
405 	struct pci_io pi = {
406 		.pi_sel = {
407 			.pc_domain = dev->addr.domain,
408 			.pc_bus = dev->addr.bus,
409 			.pc_dev = dev->addr.devid,
410 			.pc_func = dev->addr.function,
411 		},
412 		.pi_reg = offset,
413 	};
414 
415 	fd = open("/dev/pci", O_RDWR);
416 	if (fd < 0) {
417 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
418 		goto error;
419 	}
420 
421 	while (len > 0) {
422 		size = (len >= 4) ? 4 : ((len >= 2) ? 2 : 1);
423 		pi.pi_width = size;
424 
425 		if (ioctl(fd, PCIOCREAD, &pi) < 0)
426 			goto error;
427 		memcpy(buf, &pi.pi_data, size);
428 
429 		buf = (char *)buf + size;
430 		pi.pi_reg += size;
431 		len -= size;
432 	}
433 	close(fd);
434 
435 	return return_len;
436 
437  error:
438 	if (fd >= 0)
439 		close(fd);
440 	return -1;
441 }
442 
443 /* Write PCI config space. */
444 int rte_pci_write_config(const struct rte_pci_device *dev,
445 		const void *buf, size_t len, off_t offset)
446 {
447 	int fd = -1;
448 
449 	struct pci_io pi = {
450 		.pi_sel = {
451 			.pc_domain = dev->addr.domain,
452 			.pc_bus = dev->addr.bus,
453 			.pc_dev = dev->addr.devid,
454 			.pc_func = dev->addr.function,
455 		},
456 		.pi_reg = offset,
457 		.pi_data = *(const uint32_t *)buf,
458 		.pi_width = len,
459 	};
460 
461 	if (len == 3 || len > sizeof(pi.pi_data)) {
462 		RTE_LOG(ERR, EAL, "%s(): invalid pci read length\n", __func__);
463 		goto error;
464 	}
465 
466 	memcpy(&pi.pi_data, buf, len);
467 
468 	fd = open("/dev/pci", O_RDWR);
469 	if (fd < 0) {
470 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
471 		goto error;
472 	}
473 
474 	if (ioctl(fd, PCIOCWRITE, &pi) < 0)
475 		goto error;
476 
477 	close(fd);
478 	return 0;
479 
480  error:
481 	if (fd >= 0)
482 		close(fd);
483 	return -1;
484 }
485 
486 int
487 rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
488 		struct rte_pci_ioport *p)
489 {
490 	int ret;
491 
492 	switch (dev->kdrv) {
493 #if defined(RTE_ARCH_X86)
494 	case RTE_PCI_KDRV_NIC_UIO:
495 		if (rte_eal_iopl_init() != 0) {
496 			RTE_LOG(ERR, EAL, "%s(): insufficient ioport permissions for PCI device %s\n",
497 				__func__, dev->name);
498 			return -1;
499 		}
500 		if ((uintptr_t) dev->mem_resource[bar].addr <= UINT16_MAX) {
501 			p->base = (uintptr_t)dev->mem_resource[bar].addr;
502 			ret = 0;
503 		} else
504 			ret = -1;
505 		break;
506 #endif
507 	default:
508 		ret = -1;
509 		break;
510 	}
511 
512 	if (!ret)
513 		p->dev = dev;
514 
515 	return ret;
516 }
517 
518 static void
519 pci_uio_ioport_read(struct rte_pci_ioport *p,
520 		void *data, size_t len, off_t offset)
521 {
522 #if defined(RTE_ARCH_X86)
523 	uint8_t *d;
524 	int size;
525 	unsigned short reg = p->base + offset;
526 
527 	for (d = data; len > 0; d += size, reg += size, len -= size) {
528 		if (len >= 4) {
529 			size = 4;
530 			*(uint32_t *)d = inl(reg);
531 		} else if (len >= 2) {
532 			size = 2;
533 			*(uint16_t *)d = inw(reg);
534 		} else {
535 			size = 1;
536 			*d = inb(reg);
537 		}
538 	}
539 #else
540 	RTE_SET_USED(p);
541 	RTE_SET_USED(data);
542 	RTE_SET_USED(len);
543 	RTE_SET_USED(offset);
544 #endif
545 }
546 
547 void
548 rte_pci_ioport_read(struct rte_pci_ioport *p,
549 		void *data, size_t len, off_t offset)
550 {
551 	switch (p->dev->kdrv) {
552 	case RTE_PCI_KDRV_NIC_UIO:
553 		pci_uio_ioport_read(p, data, len, offset);
554 		break;
555 	default:
556 		break;
557 	}
558 }
559 
560 static void
561 pci_uio_ioport_write(struct rte_pci_ioport *p,
562 		const void *data, size_t len, off_t offset)
563 {
564 #if defined(RTE_ARCH_X86)
565 	const uint8_t *s;
566 	int size;
567 	unsigned short reg = p->base + offset;
568 
569 	for (s = data; len > 0; s += size, reg += size, len -= size) {
570 		if (len >= 4) {
571 			size = 4;
572 			outl(reg, *(const uint32_t *)s);
573 		} else if (len >= 2) {
574 			size = 2;
575 			outw(reg, *(const uint16_t *)s);
576 		} else {
577 			size = 1;
578 			outb(reg, *s);
579 		}
580 	}
581 #else
582 	RTE_SET_USED(p);
583 	RTE_SET_USED(data);
584 	RTE_SET_USED(len);
585 	RTE_SET_USED(offset);
586 #endif
587 }
588 
589 void
590 rte_pci_ioport_write(struct rte_pci_ioport *p,
591 		const void *data, size_t len, off_t offset)
592 {
593 	switch (p->dev->kdrv) {
594 	case RTE_PCI_KDRV_NIC_UIO:
595 		pci_uio_ioport_write(p, data, len, offset);
596 		break;
597 	default:
598 		break;
599 	}
600 }
601 
602 int
603 rte_pci_ioport_unmap(struct rte_pci_ioport *p)
604 {
605 	int ret;
606 
607 	switch (p->dev->kdrv) {
608 #if defined(RTE_ARCH_X86)
609 	case RTE_PCI_KDRV_NIC_UIO:
610 		ret = 0;
611 		break;
612 #endif
613 	default:
614 		ret = -1;
615 		break;
616 	}
617 
618 	return ret;
619 }
620