xref: /dpdk/drivers/bus/pci/bsd/pci.c (revision 849f773b7645216954022a47e466043a23125af9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <unistd.h>
11 #include <inttypes.h>
12 #include <sys/types.h>
13 #include <sys/stat.h>
14 #include <fcntl.h>
15 #include <errno.h>
16 #include <dirent.h>
17 #include <limits.h>
18 #include <sys/queue.h>
19 #include <sys/mman.h>
20 #include <sys/ioctl.h>
21 #include <sys/pciio.h>
22 #include <dev/pci/pcireg.h>
23 
24 #if defined(RTE_ARCH_X86)
25 #include <machine/cpufunc.h>
26 #endif
27 
28 #include <rte_interrupts.h>
29 #include <rte_log.h>
30 #include <rte_pci.h>
31 #include <rte_common.h>
32 #include <rte_launch.h>
33 #include <rte_memory.h>
34 #include <rte_eal.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_malloc.h>
38 #include <rte_string_fns.h>
39 #include <rte_debug.h>
40 #include <rte_devargs.h>
41 
42 #include "eal_filesystem.h"
43 #include "private.h"
44 
45 /**
46  * @file
47  * PCI probing under BSD.
48  */
49 
50 /* Map pci device */
51 int
rte_pci_map_device(struct rte_pci_device * dev)52 rte_pci_map_device(struct rte_pci_device *dev)
53 {
54 	int ret = -1;
55 
56 	/* try mapping the NIC resources */
57 	switch (dev->kdrv) {
58 	case RTE_PCI_KDRV_NIC_UIO:
59 		/* map resources for devices that use uio */
60 		ret = pci_uio_map_resource(dev);
61 		break;
62 	default:
63 		PCI_LOG(DEBUG, "  Not managed by a supported kernel driver, skipped");
64 		ret = 1;
65 		break;
66 	}
67 
68 	return ret;
69 }
70 
71 /* Unmap pci device */
72 void
rte_pci_unmap_device(struct rte_pci_device * dev)73 rte_pci_unmap_device(struct rte_pci_device *dev)
74 {
75 	/* try unmapping the NIC resources */
76 	switch (dev->kdrv) {
77 	case RTE_PCI_KDRV_NIC_UIO:
78 		/* unmap resources for devices that use uio */
79 		pci_uio_unmap_resource(dev);
80 		break;
81 	default:
82 		PCI_LOG(DEBUG, "  Not managed by a supported kernel driver, skipped");
83 		break;
84 	}
85 }
86 
87 void
pci_uio_free_resource(struct rte_pci_device * dev,struct mapped_pci_resource * uio_res)88 pci_uio_free_resource(struct rte_pci_device *dev,
89 		struct mapped_pci_resource *uio_res)
90 {
91 	rte_free(uio_res);
92 
93 	if (rte_intr_fd_get(dev->intr_handle)) {
94 		close(rte_intr_fd_get(dev->intr_handle));
95 		rte_intr_fd_set(dev->intr_handle, -1);
96 		rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_UNKNOWN);
97 	}
98 }
99 
100 int
pci_uio_alloc_resource(struct rte_pci_device * dev,struct mapped_pci_resource ** uio_res)101 pci_uio_alloc_resource(struct rte_pci_device *dev,
102 		struct mapped_pci_resource **uio_res)
103 {
104 	char devname[PATH_MAX]; /* contains the /dev/uioX */
105 	struct rte_pci_addr *loc;
106 
107 	loc = &dev->addr;
108 
109 	snprintf(devname, sizeof(devname), "/dev/uio@pci:%u:%u:%u",
110 			dev->addr.bus, dev->addr.devid, dev->addr.function);
111 
112 	if (access(devname, O_RDWR) < 0) {
113 		PCI_LOG(WARNING, "  "PCI_PRI_FMT" not managed by UIO driver, skipping",
114 			loc->domain, loc->bus, loc->devid, loc->function);
115 		return 1;
116 	}
117 
118 	/* save fd if in primary process */
119 	if (rte_intr_fd_set(dev->intr_handle, open(devname, O_RDWR))) {
120 		PCI_LOG(WARNING, "Failed to save fd");
121 		goto error;
122 	}
123 
124 	if (rte_intr_fd_get(dev->intr_handle) < 0) {
125 		PCI_LOG(ERR, "Cannot open %s: %s", devname, strerror(errno));
126 		goto error;
127 	}
128 
129 	if (rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_UIO))
130 		goto error;
131 
132 	/* allocate the mapping details for secondary processes*/
133 	*uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
134 	if (*uio_res == NULL) {
135 		PCI_LOG(ERR, "%s(): cannot store uio mmap details", __func__);
136 		goto error;
137 	}
138 
139 	strlcpy((*uio_res)->path, devname, sizeof((*uio_res)->path));
140 	memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr));
141 
142 	return 0;
143 
144 error:
145 	pci_uio_free_resource(dev, *uio_res);
146 	return -1;
147 }
148 
149 int
pci_uio_map_resource_by_index(struct rte_pci_device * dev,int res_idx,struct mapped_pci_resource * uio_res,int map_idx)150 pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
151 		struct mapped_pci_resource *uio_res, int map_idx)
152 {
153 	int fd;
154 	char *devname;
155 	void *mapaddr;
156 	uint64_t offset;
157 	uint64_t pagesz;
158 	struct pci_map *maps;
159 
160 	maps = uio_res->maps;
161 	devname = uio_res->path;
162 	pagesz = sysconf(_SC_PAGESIZE);
163 
164 	/* allocate memory to keep path */
165 	maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
166 	if (maps[map_idx].path == NULL) {
167 		PCI_LOG(ERR, "Cannot allocate memory for path: %s", strerror(errno));
168 		return -1;
169 	}
170 
171 	/*
172 	 * open resource file, to mmap it
173 	 */
174 	fd = open(devname, O_RDWR);
175 	if (fd < 0) {
176 		PCI_LOG(ERR, "Cannot open %s: %s", devname, strerror(errno));
177 		goto error;
178 	}
179 
180 	/* if matching map is found, then use it */
181 	offset = res_idx * pagesz;
182 	mapaddr = pci_map_resource(NULL, fd, (off_t)offset,
183 			(size_t)dev->mem_resource[res_idx].len, 0);
184 	close(fd);
185 	if (mapaddr == NULL)
186 		goto error;
187 
188 	maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr;
189 	maps[map_idx].size = dev->mem_resource[res_idx].len;
190 	maps[map_idx].addr = mapaddr;
191 	maps[map_idx].offset = offset;
192 	strcpy(maps[map_idx].path, devname);
193 	dev->mem_resource[res_idx].addr = mapaddr;
194 
195 	return 0;
196 
197 error:
198 	rte_free(maps[map_idx].path);
199 	return -1;
200 }
201 
202 static int
pci_scan_one(int dev_pci_fd,struct pci_conf * conf)203 pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
204 {
205 	struct rte_pci_device_internal *pdev;
206 	struct rte_pci_device *dev;
207 	struct pci_bar_io bar;
208 	unsigned i, max;
209 
210 	pdev = malloc(sizeof(*pdev));
211 	if (pdev == NULL) {
212 		PCI_LOG(ERR, "Cannot allocate memory for internal pci device");
213 		return -1;
214 	}
215 
216 	memset(pdev, 0, sizeof(*pdev));
217 	dev = &pdev->device;
218 	dev->device.bus = &rte_pci_bus.bus;
219 
220 	dev->addr.domain = conf->pc_sel.pc_domain;
221 	dev->addr.bus = conf->pc_sel.pc_bus;
222 	dev->addr.devid = conf->pc_sel.pc_dev;
223 	dev->addr.function = conf->pc_sel.pc_func;
224 
225 	/* get vendor id */
226 	dev->id.vendor_id = conf->pc_vendor;
227 
228 	/* get device id */
229 	dev->id.device_id = conf->pc_device;
230 
231 	/* get subsystem_vendor id */
232 	dev->id.subsystem_vendor_id = conf->pc_subvendor;
233 
234 	/* get subsystem_device id */
235 	dev->id.subsystem_device_id = conf->pc_subdevice;
236 
237 	/* get class id */
238 	dev->id.class_id = (conf->pc_class << 16) |
239 			   (conf->pc_subclass << 8) |
240 			   (conf->pc_progif);
241 
242 	/* TODO: get max_vfs */
243 	dev->max_vfs = 0;
244 
245 	/* FreeBSD has no NUMA support (yet) */
246 	dev->device.numa_node = SOCKET_ID_ANY;
247 
248 	pci_common_set(dev);
249 
250 	/* FreeBSD has only one pass through driver */
251 	dev->kdrv = RTE_PCI_KDRV_NIC_UIO;
252 
253 	/* parse resources */
254 	switch (conf->pc_hdr & PCIM_HDRTYPE) {
255 	case PCIM_HDRTYPE_NORMAL:
256 		max = PCIR_MAX_BAR_0;
257 		break;
258 	case PCIM_HDRTYPE_BRIDGE:
259 		max = PCIR_MAX_BAR_1;
260 		break;
261 	case PCIM_HDRTYPE_CARDBUS:
262 		max = PCIR_MAX_BAR_2;
263 		break;
264 	default:
265 		goto skipdev;
266 	}
267 
268 	for (i = 0; i <= max; i++) {
269 		bar.pbi_sel = conf->pc_sel;
270 		bar.pbi_reg = PCIR_BAR(i);
271 		if (ioctl(dev_pci_fd, PCIOCGETBAR, &bar) < 0)
272 			continue;
273 
274 		dev->mem_resource[i].len = bar.pbi_length;
275 		if (PCI_BAR_IO(bar.pbi_base)) {
276 			dev->mem_resource[i].addr = (void *)(bar.pbi_base & ~((uint64_t)0xf));
277 			continue;
278 		}
279 		dev->mem_resource[i].phys_addr = bar.pbi_base & ~((uint64_t)0xf);
280 	}
281 
282 	/* device is valid, add in list (sorted) */
283 	if (TAILQ_EMPTY(&rte_pci_bus.device_list)) {
284 		rte_pci_add_device(dev);
285 	}
286 	else {
287 		struct rte_pci_device *dev2 = NULL;
288 		int ret;
289 
290 		TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) {
291 			ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr);
292 			if (ret > 0)
293 				continue;
294 			else if (ret < 0) {
295 				rte_pci_insert_device(dev2, dev);
296 			} else { /* already registered */
297 				dev2->kdrv = dev->kdrv;
298 				dev2->max_vfs = dev->max_vfs;
299 				pci_common_set(dev2);
300 				memmove(dev2->mem_resource,
301 					dev->mem_resource,
302 					sizeof(dev->mem_resource));
303 				pci_free(pdev);
304 			}
305 			return 0;
306 		}
307 		rte_pci_add_device(dev);
308 	}
309 
310 	return 0;
311 
312 skipdev:
313 	pci_free(pdev);
314 	return 0;
315 }
316 
317 /*
318  * Scan the content of the PCI bus, and add the devices in the devices
319  * list. Call pci_scan_one() for each pci entry found.
320  */
321 int
rte_pci_scan(void)322 rte_pci_scan(void)
323 {
324 	int fd;
325 	unsigned dev_count = 0;
326 	struct pci_conf matches[16];
327 	struct pci_conf_io conf_io = {
328 			.pat_buf_len = 0,
329 			.num_patterns = 0,
330 			.patterns = NULL,
331 			.match_buf_len = sizeof(matches),
332 			.matches = &matches[0],
333 	};
334 	struct rte_pci_addr pci_addr;
335 
336 	/* for debug purposes, PCI can be disabled */
337 	if (!rte_eal_has_pci())
338 		return 0;
339 
340 	fd = open("/dev/pci", O_RDONLY);
341 	if (fd < 0) {
342 		PCI_LOG(ERR, "%s(): error opening /dev/pci", __func__);
343 		goto error;
344 	}
345 
346 	do {
347 		unsigned i;
348 		if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
349 			PCI_LOG(ERR, "%s(): error with ioctl on /dev/pci: %s",
350 				__func__, strerror(errno));
351 			goto error;
352 		}
353 
354 		for (i = 0; i < conf_io.num_matches; i++) {
355 			pci_addr.domain = matches[i].pc_sel.pc_domain;
356 			pci_addr.bus = matches[i].pc_sel.pc_bus;
357 			pci_addr.devid = matches[i].pc_sel.pc_dev;
358 			pci_addr.function = matches[i].pc_sel.pc_func;
359 
360 			if (rte_pci_ignore_device(&pci_addr))
361 				continue;
362 
363 			if (pci_scan_one(fd, &matches[i]) < 0)
364 				goto error;
365 		}
366 
367 		dev_count += conf_io.num_matches;
368 	} while(conf_io.status == PCI_GETCONF_MORE_DEVS);
369 
370 	close(fd);
371 
372 	PCI_LOG(DEBUG, "PCI scan found %u devices", dev_count);
373 	return 0;
374 
375 error:
376 	if (fd >= 0)
377 		close(fd);
378 	return -1;
379 }
380 
381 bool
pci_device_iommu_support_va(__rte_unused const struct rte_pci_device * dev)382 pci_device_iommu_support_va(__rte_unused const struct rte_pci_device *dev)
383 {
384 	return false;
385 }
386 
387 enum rte_iova_mode
pci_device_iova_mode(const struct rte_pci_driver * pdrv __rte_unused,const struct rte_pci_device * pdev)388 pci_device_iova_mode(const struct rte_pci_driver *pdrv __rte_unused,
389 		     const struct rte_pci_device *pdev)
390 {
391 	if (pdev->kdrv != RTE_PCI_KDRV_NIC_UIO)
392 		PCI_LOG(DEBUG, "Unsupported kernel driver? Defaulting to IOVA as 'PA'");
393 
394 	return RTE_IOVA_PA;
395 }
396 
397 /* Read PCI config space. */
rte_pci_read_config(const struct rte_pci_device * dev,void * buf,size_t len,off_t offset)398 int rte_pci_read_config(const struct rte_pci_device *dev,
399 		void *buf, size_t len, off_t offset)
400 {
401 	int fd = -1;
402 	int size;
403 	/* Copy Linux implementation's behaviour */
404 	const int return_len = len;
405 	struct pci_io pi = {
406 		.pi_sel = {
407 			.pc_domain = dev->addr.domain,
408 			.pc_bus = dev->addr.bus,
409 			.pc_dev = dev->addr.devid,
410 			.pc_func = dev->addr.function,
411 		},
412 		.pi_reg = offset,
413 	};
414 
415 	fd = open("/dev/pci", O_RDWR);
416 	if (fd < 0) {
417 		PCI_LOG(ERR, "%s(): error opening /dev/pci", __func__);
418 		goto error;
419 	}
420 
421 	while (len > 0) {
422 		size = (len >= 4) ? 4 : ((len >= 2) ? 2 : 1);
423 		pi.pi_width = size;
424 
425 		if (ioctl(fd, PCIOCREAD, &pi) < 0)
426 			goto error;
427 		memcpy(buf, &pi.pi_data, size);
428 
429 		buf = (char *)buf + size;
430 		pi.pi_reg += size;
431 		len -= size;
432 	}
433 	close(fd);
434 
435 	return return_len;
436 
437  error:
438 	if (fd >= 0)
439 		close(fd);
440 	return -1;
441 }
442 
443 /* Write PCI config space. */
rte_pci_write_config(const struct rte_pci_device * dev,const void * buf,size_t len,off_t offset)444 int rte_pci_write_config(const struct rte_pci_device *dev,
445 		const void *buf, size_t len, off_t offset)
446 {
447 	int fd = -1;
448 
449 	struct pci_io pi = {
450 		.pi_sel = {
451 			.pc_domain = dev->addr.domain,
452 			.pc_bus = dev->addr.bus,
453 			.pc_dev = dev->addr.devid,
454 			.pc_func = dev->addr.function,
455 		},
456 		.pi_reg = offset,
457 		.pi_data = *(const uint32_t *)buf,
458 		.pi_width = len,
459 	};
460 
461 	if (len == 3 || len > sizeof(pi.pi_data)) {
462 		PCI_LOG(ERR, "%s(): invalid pci read length", __func__);
463 		goto error;
464 	}
465 
466 	memcpy(&pi.pi_data, buf, len);
467 
468 	fd = open("/dev/pci", O_RDWR);
469 	if (fd < 0) {
470 		PCI_LOG(ERR, "%s(): error opening /dev/pci", __func__);
471 		goto error;
472 	}
473 
474 	if (ioctl(fd, PCIOCWRITE, &pi) < 0)
475 		goto error;
476 
477 	close(fd);
478 	return 0;
479 
480  error:
481 	if (fd >= 0)
482 		close(fd);
483 	return -1;
484 }
485 
486 /* Read PCI MMIO space. */
rte_pci_mmio_read(const struct rte_pci_device * dev,int bar,void * buf,size_t len,off_t offset)487 int rte_pci_mmio_read(const struct rte_pci_device *dev, int bar,
488 		      void *buf, size_t len, off_t offset)
489 {
490 	if (bar >= PCI_MAX_RESOURCE || dev->mem_resource[bar].addr == NULL ||
491 			(uint64_t)offset + len > dev->mem_resource[bar].len)
492 		return -1;
493 	memcpy(buf, (uint8_t *)dev->mem_resource[bar].addr + offset, len);
494 	return len;
495 }
496 
497 /* Write PCI MMIO space. */
rte_pci_mmio_write(const struct rte_pci_device * dev,int bar,const void * buf,size_t len,off_t offset)498 int rte_pci_mmio_write(const struct rte_pci_device *dev, int bar,
499 		       const void *buf, size_t len, off_t offset)
500 {
501 	if (bar >= PCI_MAX_RESOURCE || dev->mem_resource[bar].addr == NULL ||
502 			(uint64_t)offset + len > dev->mem_resource[bar].len)
503 		return -1;
504 	memcpy((uint8_t *)dev->mem_resource[bar].addr + offset, buf, len);
505 	return len;
506 }
507 
508 int
rte_pci_ioport_map(struct rte_pci_device * dev,int bar,struct rte_pci_ioport * p)509 rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
510 		struct rte_pci_ioport *p)
511 {
512 	int ret;
513 
514 	switch (dev->kdrv) {
515 #if defined(RTE_ARCH_X86)
516 	case RTE_PCI_KDRV_NIC_UIO:
517 		if (rte_eal_iopl_init() != 0) {
518 			PCI_LOG(ERR, "%s(): insufficient ioport permissions for PCI device %s",
519 				__func__, dev->name);
520 			return -1;
521 		}
522 		if ((uintptr_t) dev->mem_resource[bar].addr <= UINT16_MAX) {
523 			p->base = (uintptr_t)dev->mem_resource[bar].addr;
524 			ret = 0;
525 		} else
526 			ret = -1;
527 		break;
528 #endif
529 	default:
530 		ret = -1;
531 		break;
532 	}
533 
534 	if (!ret)
535 		p->dev = dev;
536 
537 	return ret;
538 }
539 
540 static void
pci_uio_ioport_read(struct rte_pci_ioport * p,void * data,size_t len,off_t offset)541 pci_uio_ioport_read(struct rte_pci_ioport *p,
542 		void *data, size_t len, off_t offset)
543 {
544 #if defined(RTE_ARCH_X86)
545 	uint8_t *d;
546 	int size;
547 	unsigned short reg = p->base + offset;
548 
549 	for (d = data; len > 0; d += size, reg += size, len -= size) {
550 		if (len >= 4) {
551 			size = 4;
552 			*(uint32_t *)d = inl(reg);
553 		} else if (len >= 2) {
554 			size = 2;
555 			*(uint16_t *)d = inw(reg);
556 		} else {
557 			size = 1;
558 			*d = inb(reg);
559 		}
560 	}
561 #else
562 	RTE_SET_USED(p);
563 	RTE_SET_USED(data);
564 	RTE_SET_USED(len);
565 	RTE_SET_USED(offset);
566 #endif
567 }
568 
569 void
rte_pci_ioport_read(struct rte_pci_ioport * p,void * data,size_t len,off_t offset)570 rte_pci_ioport_read(struct rte_pci_ioport *p,
571 		void *data, size_t len, off_t offset)
572 {
573 	switch (p->dev->kdrv) {
574 	case RTE_PCI_KDRV_NIC_UIO:
575 		pci_uio_ioport_read(p, data, len, offset);
576 		break;
577 	default:
578 		break;
579 	}
580 }
581 
582 static void
pci_uio_ioport_write(struct rte_pci_ioport * p,const void * data,size_t len,off_t offset)583 pci_uio_ioport_write(struct rte_pci_ioport *p,
584 		const void *data, size_t len, off_t offset)
585 {
586 #if defined(RTE_ARCH_X86)
587 	const uint8_t *s;
588 	int size;
589 	unsigned short reg = p->base + offset;
590 
591 	for (s = data; len > 0; s += size, reg += size, len -= size) {
592 		if (len >= 4) {
593 			size = 4;
594 			outl(reg, *(const uint32_t *)s);
595 		} else if (len >= 2) {
596 			size = 2;
597 			outw(reg, *(const uint16_t *)s);
598 		} else {
599 			size = 1;
600 			outb(reg, *s);
601 		}
602 	}
603 #else
604 	RTE_SET_USED(p);
605 	RTE_SET_USED(data);
606 	RTE_SET_USED(len);
607 	RTE_SET_USED(offset);
608 #endif
609 }
610 
611 void
rte_pci_ioport_write(struct rte_pci_ioport * p,const void * data,size_t len,off_t offset)612 rte_pci_ioport_write(struct rte_pci_ioport *p,
613 		const void *data, size_t len, off_t offset)
614 {
615 	switch (p->dev->kdrv) {
616 	case RTE_PCI_KDRV_NIC_UIO:
617 		pci_uio_ioport_write(p, data, len, offset);
618 		break;
619 	default:
620 		break;
621 	}
622 }
623 
624 int
rte_pci_ioport_unmap(struct rte_pci_ioport * p)625 rte_pci_ioport_unmap(struct rte_pci_ioport *p)
626 {
627 	int ret;
628 
629 	switch (p->dev->kdrv) {
630 #if defined(RTE_ARCH_X86)
631 	case RTE_PCI_KDRV_NIC_UIO:
632 		ret = 0;
633 		break;
634 #endif
635 	default:
636 		ret = -1;
637 		break;
638 	}
639 
640 	return ret;
641 }
642