xref: /dpdk/drivers/bus/pci/bsd/pci.c (revision c752998b5e2eb5c827ffbecc5bd03ea28b14314f)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <ctype.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <stdarg.h>
39 #include <unistd.h>
40 #include <inttypes.h>
41 #include <sys/types.h>
42 #include <sys/stat.h>
43 #include <fcntl.h>
44 #include <errno.h>
45 #include <dirent.h>
46 #include <limits.h>
47 #include <sys/queue.h>
48 #include <sys/mman.h>
49 #include <sys/ioctl.h>
50 #include <sys/pciio.h>
51 #include <dev/pci/pcireg.h>
52 
53 #if defined(RTE_ARCH_X86)
54 #include <machine/cpufunc.h>
55 #endif
56 
57 #include <rte_interrupts.h>
58 #include <rte_log.h>
59 #include <rte_pci.h>
60 #include <rte_bus_pci.h>
61 #include <rte_common.h>
62 #include <rte_launch.h>
63 #include <rte_memory.h>
64 #include <rte_memzone.h>
65 #include <rte_eal.h>
66 #include <rte_eal_memconfig.h>
67 #include <rte_per_lcore.h>
68 #include <rte_lcore.h>
69 #include <rte_malloc.h>
70 #include <rte_string_fns.h>
71 #include <rte_debug.h>
72 #include <rte_devargs.h>
73 
74 #include "eal_filesystem.h"
75 #include "private.h"
76 
77 /**
78  * @file
79  * PCI probing under linux
80  *
81  * This code is used to simulate a PCI probe by parsing information in
82  * sysfs. Moreover, when a registered driver matches a device, the
83  * kernel driver currently using it is unloaded and replaced by
84  * igb_uio module, which is a very minimal userland driver for Intel
85  * network card, only providing access to PCI BAR to applications, and
86  * enabling bus master.
87  */
88 
89 extern struct rte_pci_bus rte_pci_bus;
90 
91 /* Map pci device */
92 int
93 rte_pci_map_device(struct rte_pci_device *dev)
94 {
95 	int ret = -1;
96 
97 	/* try mapping the NIC resources */
98 	switch (dev->kdrv) {
99 	case RTE_KDRV_NIC_UIO:
100 		/* map resources for devices that use uio */
101 		ret = pci_uio_map_resource(dev);
102 		break;
103 	default:
104 		RTE_LOG(DEBUG, EAL,
105 			"  Not managed by a supported kernel driver, skipped\n");
106 		ret = 1;
107 		break;
108 	}
109 
110 	return ret;
111 }
112 
113 /* Unmap pci device */
114 void
115 rte_pci_unmap_device(struct rte_pci_device *dev)
116 {
117 	/* try unmapping the NIC resources */
118 	switch (dev->kdrv) {
119 	case RTE_KDRV_NIC_UIO:
120 		/* unmap resources for devices that use uio */
121 		pci_uio_unmap_resource(dev);
122 		break;
123 	default:
124 		RTE_LOG(DEBUG, EAL,
125 			"  Not managed by a supported kernel driver, skipped\n");
126 		break;
127 	}
128 }
129 
130 void
131 pci_uio_free_resource(struct rte_pci_device *dev,
132 		struct mapped_pci_resource *uio_res)
133 {
134 	rte_free(uio_res);
135 
136 	if (dev->intr_handle.fd) {
137 		close(dev->intr_handle.fd);
138 		dev->intr_handle.fd = -1;
139 		dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
140 	}
141 }
142 
143 int
144 pci_uio_alloc_resource(struct rte_pci_device *dev,
145 		struct mapped_pci_resource **uio_res)
146 {
147 	char devname[PATH_MAX]; /* contains the /dev/uioX */
148 	struct rte_pci_addr *loc;
149 
150 	loc = &dev->addr;
151 
152 	snprintf(devname, sizeof(devname), "/dev/uio@pci:%u:%u:%u",
153 			dev->addr.bus, dev->addr.devid, dev->addr.function);
154 
155 	if (access(devname, O_RDWR) < 0) {
156 		RTE_LOG(WARNING, EAL, "  "PCI_PRI_FMT" not managed by UIO driver, "
157 				"skipping\n", loc->domain, loc->bus, loc->devid, loc->function);
158 		return 1;
159 	}
160 
161 	/* save fd if in primary process */
162 	dev->intr_handle.fd = open(devname, O_RDWR);
163 	if (dev->intr_handle.fd < 0) {
164 		RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
165 			devname, strerror(errno));
166 		goto error;
167 	}
168 	dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
169 
170 	/* allocate the mapping details for secondary processes*/
171 	*uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
172 	if (*uio_res == NULL) {
173 		RTE_LOG(ERR, EAL,
174 			"%s(): cannot store uio mmap details\n", __func__);
175 		goto error;
176 	}
177 
178 	snprintf((*uio_res)->path, sizeof((*uio_res)->path), "%s", devname);
179 	memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr));
180 
181 	return 0;
182 
183 error:
184 	pci_uio_free_resource(dev, *uio_res);
185 	return -1;
186 }
187 
188 int
189 pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
190 		struct mapped_pci_resource *uio_res, int map_idx)
191 {
192 	int fd;
193 	char *devname;
194 	void *mapaddr;
195 	uint64_t offset;
196 	uint64_t pagesz;
197 	struct pci_map *maps;
198 
199 	maps = uio_res->maps;
200 	devname = uio_res->path;
201 	pagesz = sysconf(_SC_PAGESIZE);
202 
203 	/* allocate memory to keep path */
204 	maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
205 	if (maps[map_idx].path == NULL) {
206 		RTE_LOG(ERR, EAL, "Cannot allocate memory for path: %s\n",
207 				strerror(errno));
208 		return -1;
209 	}
210 
211 	/*
212 	 * open resource file, to mmap it
213 	 */
214 	fd = open(devname, O_RDWR);
215 	if (fd < 0) {
216 		RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
217 				devname, strerror(errno));
218 		goto error;
219 	}
220 
221 	/* if matching map is found, then use it */
222 	offset = res_idx * pagesz;
223 	mapaddr = pci_map_resource(NULL, fd, (off_t)offset,
224 			(size_t)dev->mem_resource[res_idx].len, 0);
225 	close(fd);
226 	if (mapaddr == MAP_FAILED)
227 		goto error;
228 
229 	maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr;
230 	maps[map_idx].size = dev->mem_resource[res_idx].len;
231 	maps[map_idx].addr = mapaddr;
232 	maps[map_idx].offset = offset;
233 	strcpy(maps[map_idx].path, devname);
234 	dev->mem_resource[res_idx].addr = mapaddr;
235 
236 	return 0;
237 
238 error:
239 	rte_free(maps[map_idx].path);
240 	return -1;
241 }
242 
243 static int
244 pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
245 {
246 	struct rte_pci_device *dev;
247 	struct pci_bar_io bar;
248 	unsigned i, max;
249 
250 	dev = malloc(sizeof(*dev));
251 	if (dev == NULL) {
252 		return -1;
253 	}
254 
255 	memset(dev, 0, sizeof(*dev));
256 	dev->addr.domain = conf->pc_sel.pc_domain;
257 	dev->addr.bus = conf->pc_sel.pc_bus;
258 	dev->addr.devid = conf->pc_sel.pc_dev;
259 	dev->addr.function = conf->pc_sel.pc_func;
260 
261 	/* get vendor id */
262 	dev->id.vendor_id = conf->pc_vendor;
263 
264 	/* get device id */
265 	dev->id.device_id = conf->pc_device;
266 
267 	/* get subsystem_vendor id */
268 	dev->id.subsystem_vendor_id = conf->pc_subvendor;
269 
270 	/* get subsystem_device id */
271 	dev->id.subsystem_device_id = conf->pc_subdevice;
272 
273 	/* get class id */
274 	dev->id.class_id = (conf->pc_class << 16) |
275 			   (conf->pc_subclass << 8) |
276 			   (conf->pc_progif);
277 
278 	/* TODO: get max_vfs */
279 	dev->max_vfs = 0;
280 
281 	/* FreeBSD has no NUMA support (yet) */
282 	dev->device.numa_node = 0;
283 
284 	pci_name_set(dev);
285 
286 	/* FreeBSD has only one pass through driver */
287 	dev->kdrv = RTE_KDRV_NIC_UIO;
288 
289 	/* parse resources */
290 	switch (conf->pc_hdr & PCIM_HDRTYPE) {
291 	case PCIM_HDRTYPE_NORMAL:
292 		max = PCIR_MAX_BAR_0;
293 		break;
294 	case PCIM_HDRTYPE_BRIDGE:
295 		max = PCIR_MAX_BAR_1;
296 		break;
297 	case PCIM_HDRTYPE_CARDBUS:
298 		max = PCIR_MAX_BAR_2;
299 		break;
300 	default:
301 		goto skipdev;
302 	}
303 
304 	for (i = 0; i <= max; i++) {
305 		bar.pbi_sel = conf->pc_sel;
306 		bar.pbi_reg = PCIR_BAR(i);
307 		if (ioctl(dev_pci_fd, PCIOCGETBAR, &bar) < 0)
308 			continue;
309 
310 		dev->mem_resource[i].len = bar.pbi_length;
311 		if (PCI_BAR_IO(bar.pbi_base)) {
312 			dev->mem_resource[i].addr = (void *)(bar.pbi_base & ~((uint64_t)0xf));
313 			continue;
314 		}
315 		dev->mem_resource[i].phys_addr = bar.pbi_base & ~((uint64_t)0xf);
316 	}
317 
318 	/* device is valid, add in list (sorted) */
319 	if (TAILQ_EMPTY(&rte_pci_bus.device_list)) {
320 		rte_pci_add_device(dev);
321 	}
322 	else {
323 		struct rte_pci_device *dev2 = NULL;
324 		int ret;
325 
326 		TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) {
327 			ret = pci_addr_cmp(&dev->addr, &dev2->addr);
328 			if (ret > 0)
329 				continue;
330 			else if (ret < 0) {
331 				rte_pci_insert_device(dev2, dev);
332 			} else { /* already registered */
333 				dev2->kdrv = dev->kdrv;
334 				dev2->max_vfs = dev->max_vfs;
335 				pci_name_set(dev2);
336 				memmove(dev2->mem_resource,
337 					dev->mem_resource,
338 					sizeof(dev->mem_resource));
339 				free(dev);
340 			}
341 			return 0;
342 		}
343 		rte_pci_add_device(dev);
344 	}
345 
346 	return 0;
347 
348 skipdev:
349 	free(dev);
350 	return 0;
351 }
352 
353 /*
354  * Scan the content of the PCI bus, and add the devices in the devices
355  * list. Call pci_scan_one() for each pci entry found.
356  */
357 int
358 rte_pci_scan(void)
359 {
360 	int fd;
361 	unsigned dev_count = 0;
362 	struct pci_conf matches[16];
363 	struct pci_conf_io conf_io = {
364 			.pat_buf_len = 0,
365 			.num_patterns = 0,
366 			.patterns = NULL,
367 			.match_buf_len = sizeof(matches),
368 			.matches = &matches[0],
369 	};
370 
371 	/* for debug purposes, PCI can be disabled */
372 	if (!rte_eal_has_pci())
373 		return 0;
374 
375 	fd = open("/dev/pci", O_RDONLY);
376 	if (fd < 0) {
377 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
378 		goto error;
379 	}
380 
381 	do {
382 		unsigned i;
383 		if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
384 			RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
385 					__func__, strerror(errno));
386 			goto error;
387 		}
388 
389 		for (i = 0; i < conf_io.num_matches; i++)
390 			if (pci_scan_one(fd, &matches[i]) < 0)
391 				goto error;
392 
393 		dev_count += conf_io.num_matches;
394 	} while(conf_io.status == PCI_GETCONF_MORE_DEVS);
395 
396 	close(fd);
397 
398 	RTE_LOG(DEBUG, EAL, "PCI scan found %u devices\n", dev_count);
399 	return 0;
400 
401 error:
402 	if (fd >= 0)
403 		close(fd);
404 	return -1;
405 }
406 
407 /*
408  * Get iommu class of PCI devices on the bus.
409  */
410 enum rte_iova_mode
411 rte_pci_get_iommu_class(void)
412 {
413 	/* Supports only RTE_KDRV_NIC_UIO */
414 	return RTE_IOVA_PA;
415 }
416 
417 int
418 pci_update_device(const struct rte_pci_addr *addr)
419 {
420 	int fd;
421 	struct pci_conf matches[2];
422 	struct pci_match_conf match = {
423 		.pc_sel = {
424 			.pc_domain = addr->domain,
425 			.pc_bus = addr->bus,
426 			.pc_dev = addr->devid,
427 			.pc_func = addr->function,
428 		},
429 	};
430 	struct pci_conf_io conf_io = {
431 		.pat_buf_len = 0,
432 		.num_patterns = 1,
433 		.patterns = &match,
434 		.match_buf_len = sizeof(matches),
435 		.matches = &matches[0],
436 	};
437 
438 	fd = open("/dev/pci", O_RDONLY);
439 	if (fd < 0) {
440 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
441 		goto error;
442 	}
443 
444 	if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
445 		RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
446 				__func__, strerror(errno));
447 		goto error;
448 	}
449 
450 	if (conf_io.num_matches != 1)
451 		goto error;
452 
453 	if (pci_scan_one(fd, &matches[0]) < 0)
454 		goto error;
455 
456 	close(fd);
457 
458 	return 0;
459 
460 error:
461 	if (fd >= 0)
462 		close(fd);
463 	return -1;
464 }
465 
466 /* Read PCI config space. */
467 int rte_pci_read_config(const struct rte_pci_device *dev,
468 		void *buf, size_t len, off_t offset)
469 {
470 	int fd = -1;
471 	int size;
472 	struct pci_io pi = {
473 		.pi_sel = {
474 			.pc_domain = dev->addr.domain,
475 			.pc_bus = dev->addr.bus,
476 			.pc_dev = dev->addr.devid,
477 			.pc_func = dev->addr.function,
478 		},
479 		.pi_reg = offset,
480 	};
481 
482 	fd = open("/dev/pci", O_RDWR);
483 	if (fd < 0) {
484 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
485 		goto error;
486 	}
487 
488 	while (len > 0) {
489 		size = (len >= 4) ? 4 : ((len >= 2) ? 2 : 1);
490 		pi.pi_width = size;
491 
492 		if (ioctl(fd, PCIOCREAD, &pi) < 0)
493 			goto error;
494 		memcpy(buf, &pi.pi_data, size);
495 
496 		buf = (char *)buf + size;
497 		pi.pi_reg += size;
498 		len -= size;
499 	}
500 	close(fd);
501 
502 	return 0;
503 
504  error:
505 	if (fd >= 0)
506 		close(fd);
507 	return -1;
508 }
509 
510 /* Write PCI config space. */
511 int rte_pci_write_config(const struct rte_pci_device *dev,
512 		const void *buf, size_t len, off_t offset)
513 {
514 	int fd = -1;
515 
516 	struct pci_io pi = {
517 		.pi_sel = {
518 			.pc_domain = dev->addr.domain,
519 			.pc_bus = dev->addr.bus,
520 			.pc_dev = dev->addr.devid,
521 			.pc_func = dev->addr.function,
522 		},
523 		.pi_reg = offset,
524 		.pi_data = *(const uint32_t *)buf,
525 		.pi_width = len,
526 	};
527 
528 	if (len == 3 || len > sizeof(pi.pi_data)) {
529 		RTE_LOG(ERR, EAL, "%s(): invalid pci read length\n", __func__);
530 		goto error;
531 	}
532 
533 	memcpy(&pi.pi_data, buf, len);
534 
535 	fd = open("/dev/pci", O_RDWR);
536 	if (fd < 0) {
537 		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
538 		goto error;
539 	}
540 
541 	if (ioctl(fd, PCIOCWRITE, &pi) < 0)
542 		goto error;
543 
544 	close(fd);
545 	return 0;
546 
547  error:
548 	if (fd >= 0)
549 		close(fd);
550 	return -1;
551 }
552 
553 int
554 rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
555 		struct rte_pci_ioport *p)
556 {
557 	int ret;
558 
559 	switch (dev->kdrv) {
560 #if defined(RTE_ARCH_X86)
561 	case RTE_KDRV_NIC_UIO:
562 		if ((uintptr_t) dev->mem_resource[bar].addr <= UINT16_MAX) {
563 			p->base = (uintptr_t)dev->mem_resource[bar].addr;
564 			ret = 0;
565 		} else
566 			ret = -1;
567 		break;
568 #endif
569 	default:
570 		ret = -1;
571 		break;
572 	}
573 
574 	if (!ret)
575 		p->dev = dev;
576 
577 	return ret;
578 }
579 
580 static void
581 pci_uio_ioport_read(struct rte_pci_ioport *p,
582 		void *data, size_t len, off_t offset)
583 {
584 #if defined(RTE_ARCH_X86)
585 	uint8_t *d;
586 	int size;
587 	unsigned short reg = p->base + offset;
588 
589 	for (d = data; len > 0; d += size, reg += size, len -= size) {
590 		if (len >= 4) {
591 			size = 4;
592 			*(uint32_t *)d = inl(reg);
593 		} else if (len >= 2) {
594 			size = 2;
595 			*(uint16_t *)d = inw(reg);
596 		} else {
597 			size = 1;
598 			*d = inb(reg);
599 		}
600 	}
601 #else
602 	RTE_SET_USED(p);
603 	RTE_SET_USED(data);
604 	RTE_SET_USED(len);
605 	RTE_SET_USED(offset);
606 #endif
607 }
608 
609 void
610 rte_pci_ioport_read(struct rte_pci_ioport *p,
611 		void *data, size_t len, off_t offset)
612 {
613 	switch (p->dev->kdrv) {
614 	case RTE_KDRV_NIC_UIO:
615 		pci_uio_ioport_read(p, data, len, offset);
616 		break;
617 	default:
618 		break;
619 	}
620 }
621 
622 static void
623 pci_uio_ioport_write(struct rte_pci_ioport *p,
624 		const void *data, size_t len, off_t offset)
625 {
626 #if defined(RTE_ARCH_X86)
627 	const uint8_t *s;
628 	int size;
629 	unsigned short reg = p->base + offset;
630 
631 	for (s = data; len > 0; s += size, reg += size, len -= size) {
632 		if (len >= 4) {
633 			size = 4;
634 			outl(reg, *(const uint32_t *)s);
635 		} else if (len >= 2) {
636 			size = 2;
637 			outw(reg, *(const uint16_t *)s);
638 		} else {
639 			size = 1;
640 			outb(reg, *s);
641 		}
642 	}
643 #else
644 	RTE_SET_USED(p);
645 	RTE_SET_USED(data);
646 	RTE_SET_USED(len);
647 	RTE_SET_USED(offset);
648 #endif
649 }
650 
651 void
652 rte_pci_ioport_write(struct rte_pci_ioport *p,
653 		const void *data, size_t len, off_t offset)
654 {
655 	switch (p->dev->kdrv) {
656 	case RTE_KDRV_NIC_UIO:
657 		pci_uio_ioport_write(p, data, len, offset);
658 		break;
659 	default:
660 		break;
661 	}
662 }
663 
664 int
665 rte_pci_ioport_unmap(struct rte_pci_ioport *p)
666 {
667 	int ret;
668 
669 	switch (p->dev->kdrv) {
670 #if defined(RTE_ARCH_X86)
671 	case RTE_KDRV_NIC_UIO:
672 		ret = 0;
673 		break;
674 #endif
675 	default:
676 		ret = -1;
677 		break;
678 	}
679 
680 	return ret;
681 }
682