Lines Matching defs:pdev
149 linux_pdev_dma_uninit(struct pci_dev *pdev)
153 priv = pdev->dev.dma_priv;
159 pdev->dev.dma_priv = NULL;
165 linux_pdev_dma_init(struct pci_dev *pdev)
175 pdev->dev.dma_priv = priv;
178 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64));
182 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32));
189 linux_pdev_dma_uninit(pdev);
290 struct pci_dev *pdev;
295 list_for_each_entry(pdev, &pci_devices, links) {
296 if (pdev->vendor == vendor && pdev->device == device)
301 return (pdev);
313 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev)
316 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev));
317 pdev->vendor = pci_get_vendor(dev);
318 pdev->device = pci_get_device(dev);
319 pdev->subsystem_vendor = pci_get_subvendor(dev);
320 pdev->subsystem_device = pci_get_subdevice(dev);
321 pdev->class = pci_get_class(dev);
322 pdev->revision = pci_get_revid(dev);
323 pdev->path_name = kasprintf(GFP_KERNEL, "%04d:%02d:%02d.%d",
326 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO);
332 pdev->bus->self = pdev;
333 pdev->bus->number = pci_get_bus(dev);
334 pdev->bus->domain = pci_get_domain(dev);
335 pdev->dev.bsddev = dev;
336 pdev->dev.parent = &linux_root_device;
337 pdev->dev.release = lkpi_pci_dev_release;
338 INIT_LIST_HEAD(&pdev->dev.irqents);
341 pdev->msi_desc = malloc(pci_msi_count(dev) *
342 sizeof(*pdev->msi_desc), M_DEVBUF, M_WAITOK | M_ZERO);
344 kobject_init(&pdev->dev.kobj, &linux_dev_ktype);
345 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev));
346 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj,
347 kobject_name(&pdev->dev.kobj));
348 spin_lock_init(&pdev->dev.devres_lock);
349 INIT_LIST_HEAD(&pdev->dev.devres_head);
355 struct pci_dev *pdev;
358 pdev = to_pci_dev(dev);
359 if (pdev->root != NULL)
360 pci_dev_put(pdev->root);
361 if (pdev->bus->self != pdev)
362 pci_dev_put(pdev->bus->self);
363 free(pdev->bus, M_DEVBUF);
364 if (pdev->msi_desc != NULL) {
365 for (i = pci_msi_count(pdev->dev.bsddev) - 1; i >= 0; i--)
366 free(pdev->msi_desc[i], M_DEVBUF);
367 free(pdev->msi_desc, M_DEVBUF);
369 kfree(pdev->path_name);
370 free(pdev, M_DEVBUF);
376 struct pci_dev *pdev;
378 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO);
379 lkpifill_pci_dev(dev, pdev);
380 pdev->dev.release = lkpinew_pci_dev_release;
382 return (pdev);
390 struct pci_dev *pdev;
399 pdev = lkpinew_pci_dev(dev);
400 return (pdev);
408 struct pci_dev *pdev;
414 pdev = lkpinew_pci_dev(dev);
415 return (pdev);
442 struct pci_dev *pdev;
445 pdev = device_get_softc(dev);
448 MPASS(pdev != NULL);
450 return (linux_pci_attach_device(dev, pdrv, id, pdev));
454 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl,
463 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ?
464 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;
473 linux_pci_get_rle(struct pci_dev *pdev, int type, int rid, bool reserve_bar)
479 dinfo = device_get_ivars(pdev->dev.bsddev);
484 rle = linux_pci_reserve_bar(pdev, rl, type, rid);
490 const struct pci_device_id *id, struct pci_dev *pdev)
510 lkpifill_pci_dev(dev, pdev);
515 pdev->devfn = rid;
516 pdev->pdrv = pdrv;
517 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false);
519 pdev->dev.irq = rle->start;
521 pdev->dev.irq = LINUX_IRQ_INVALID;
522 pdev->irq = pdev->dev.irq;
523 error = linux_pdev_dma_init(pdev);
527 TAILQ_INIT(&pdev->mmio);
528 spin_lock_init(&pdev->pcie_cap_lock);
531 list_add(&pdev->links, &pci_devices);
535 error = pdrv->probe(pdev, id);
542 free(pdev->bus, M_DEVBUF);
543 spin_lock_destroy(&pdev->pcie_cap_lock);
544 linux_pdev_dma_uninit(pdev);
547 list_del(&pdev->links);
549 put_device(&pdev->dev);
556 struct pci_dev *pdev;
558 pdev = device_get_softc(dev);
560 MPASS(pdev != NULL);
564 return (linux_pci_detach_device(pdev));
568 linux_pci_detach_device(struct pci_dev *pdev)
573 if (pdev->pdrv != NULL)
574 pdev->pdrv->remove(pdev);
576 if (pdev->root != NULL)
577 pci_dev_put(pdev->root);
578 free(pdev->bus, M_DEVBUF);
579 linux_pdev_dma_uninit(pdev);
582 list_del(&pdev->links);
584 spin_lock_destroy(&pdev->pcie_cap_lock);
585 put_device(&pdev->dev);
600 lkpi_pci_devres_get_alloc(struct pci_dev *pdev)
604 dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL);
609 lkpi_devres_add(&pdev->dev, dr);
616 lkpi_pci_devres_find(struct pci_dev *pdev)
618 if (!pdev->managed)
621 return (lkpi_pci_devres_get_alloc(pdev));
628 struct pci_dev *pdev;
631 pdev = to_pci_dev(dev);
634 if (pdev->msix_enabled)
635 lkpi_pci_disable_msix(pdev);
636 if (pdev->msi_enabled)
637 lkpi_pci_disable_msi(pdev);
648 pci_release_region(pdev, bar);
653 linuxkpi_pcim_enable_device(struct pci_dev *pdev)
658 /* Here we cannot run through the pdev->managed check. */
659 dr = lkpi_pci_devres_get_alloc(pdev);
667 error = pci_enable_device(pdev);
672 pdev->managed = true;
678 lkpi_pcim_iomap_devres_find(struct pci_dev *pdev)
682 dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release,
688 lkpi_devres_add(&pdev->dev, dr);
692 device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__);
698 linuxkpi_pcim_iomap_table(struct pci_dev *pdev)
702 dr = lkpi_pcim_iomap_devres_find(pdev);
710 if (pdev->want_iomap_res)
718 _lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused)
723 type = pci_resource_type(pdev, bar);
725 device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n",
734 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
743 mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type,
746 device_printf(pdev->dev.bsddev, "%s: failed to alloc "
752 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next);
758 linuxkpi_pci_iomap_range(struct pci_dev *pdev, int mmio_bar,
763 res = _lkpi_pci_iomap(pdev, mmio_bar, mmio_size);
767 if (pdev->want_iomap_res)
774 linuxkpi_pci_iomap(struct pci_dev *pdev, int mmio_bar, int mmio_size)
776 return (linuxkpi_pci_iomap_range(pdev, mmio_bar, 0, mmio_size));
780 linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res)
785 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
786 if (pdev->want_iomap_res) {
795 bus_release_resource(pdev->dev.bsddev,
797 TAILQ_REMOVE(&pdev->mmio, mmio, next);
804 linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask, const char *name)
811 dr = lkpi_pcim_iomap_devres_find(pdev);
822 device_printf(pdev->dev.bsddev, "%s: bar %d %p\n",
827 res = _lkpi_pci_iomap(pdev, bar, 0);
843 pci_iounmap(pdev, res);
854 struct pci_dev *pdev;
858 pdev = to_pci_dev(dev);
864 pci_iounmap(pdev, dr->mmio_table[bar]);
873 struct pci_dev *pdev;
878 pdev = device_get_softc(dev);
879 pmops = pdev->pdrv->driver.pm;
881 if (pdev->pdrv->suspend != NULL)
882 error = -pdev->pdrv->suspend(pdev, pm);
884 error = -pmops->suspend(&pdev->dev);
886 error = -pmops->suspend_late(&pdev->dev);
888 error = -pmops->suspend_noirq(&pdev->dev);
897 struct pci_dev *pdev;
902 pdev = device_get_softc(dev);
903 pmops = pdev->pdrv->driver.pm;
905 if (pdev->pdrv->resume != NULL)
906 error = -pdev->pdrv->resume(pdev);
909 error = -pmops->resume_early(&pdev->dev);
911 error = -pmops->resume(&pdev->dev);
919 struct pci_dev *pdev;
922 pdev = device_get_softc(dev);
923 if (pdev->pdrv->shutdown != NULL)
924 pdev->pdrv->shutdown(pdev);
931 struct pci_dev *pdev;
935 pdev = device_get_softc(dev);
936 if (pdev->pdrv->bsd_iov_init != NULL)
937 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config);
946 struct pci_dev *pdev;
949 pdev = device_get_softc(dev);
950 if (pdev->pdrv->bsd_iov_uninit != NULL)
951 pdev->pdrv->bsd_iov_uninit(dev);
957 struct pci_dev *pdev;
961 pdev = device_get_softc(dev);
962 if (pdev->pdrv->bsd_iov_add_vf != NULL)
963 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config);
1003 lkpi_pci_get_bar(struct pci_dev *pdev, int bar, bool reserve)
1007 type = pci_resource_type(pdev, bar);
1011 return (linux_pci_get_rle(pdev, type, bar, reserve));
1017 struct pci_dev *pdev;
1022 list_for_each_entry(pdev, &pci_devices, links) {
1023 if (irq == pdev->dev.irq ||
1024 (irq >= pdev->dev.irq_start && irq < pdev->dev.irq_end)) {
1025 found = &pdev->dev;
1034 pci_resource_start(struct pci_dev *pdev, int bar)
1041 if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL)
1043 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ?
1044 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;
1047 device_printf(pdev->dev.bsddev,
1056 pci_resource_len(struct pci_dev *pdev, int bar)
1060 if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL)
1066 pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
1074 type = pci_resource_type(pdev, bar);
1078 res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid,
1081 device_printf(pdev->dev.bsddev, "%s: failed to alloc "
1093 dr = lkpi_pci_devres_find(pdev);
1104 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next);
1110 linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name)
1116 error = pci_request_region(pdev, i, res_name);
1118 pci_release_regions(pdev);
1126 linuxkpi_pci_release_region(struct pci_dev *pdev, int bar)
1132 if ((rle = lkpi_pci_get_bar(pdev, bar, false)) == NULL)
1139 dr = lkpi_pci_devres_find(pdev);
1141 KASSERT(dr->region_table[bar] == rle->res, ("%s: pdev %p bar %d"
1142 " region_table res %p != rel->res %p\n", __func__, pdev,
1148 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
1151 TAILQ_REMOVE(&pdev->mmio, mmio, next);
1155 bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res);
1159 linuxkpi_pci_release_regions(struct pci_dev *pdev)
1164 pci_release_region(pdev, i);
1213 linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries,
1221 avail = pci_msix_count(pdev->dev.bsddev);
1228 if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0)
1235 pci_release_msi(pdev->dev.bsddev);
1238 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);
1239 pdev->dev.irq_start = rle->start;
1240 pdev->dev.irq_end = rle->start + avail;
1242 entries[i].vector = pdev->dev.irq_start + i;
1243 pdev->msix_enabled = true;
1248 _lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec)
1257 nvec = pci_msi_count(pdev->dev.bsddev);
1262 if ((error = -pci_alloc_msi(pdev->dev.bsddev, &nvec)) != 0)
1267 pci_release_msi(pdev->dev.bsddev);
1271 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);
1272 pdev->dev.irq_start = rle->start;
1273 pdev->dev.irq_end = rle->start + nvec;
1274 pdev->irq = rle->start;
1275 pdev->msi_enabled = true;
1280 pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv,
1296 error = pci_enable_msix(pdev, entries, maxv);
1299 if (error == 0 && pdev->msix_enabled)
1300 return (pdev->dev.irq_end - pdev->dev.irq_start);
1303 if (pci_msi_count(pdev->dev.bsddev) < minv)
1305 error = _lkpi_pci_enable_msi_range(pdev, minv, maxv);
1306 if (error == 0 && pdev->msi_enabled)
1307 return (pdev->dev.irq_end - pdev->dev.irq_start);
1310 if (pdev->irq)
1321 struct pci_dev *pdev;
1331 pdev = to_pci_dev(dev);
1333 if (pdev->msi_desc == NULL)
1336 if (irq < pdev->dev.irq_start || irq >= pdev->dev.irq_end)
1339 vec = pdev->dev.irq_start - irq;
1341 if (pdev->msi_desc[vec] != NULL)
1342 return (pdev->msi_desc[vec]);
1353 pdev->msi_desc[vec] = desc;
1359 pci_device_is_present(struct pci_dev *pdev)
1363 dev = pdev->dev.bsddev;
1902 struct pci_dev *pdev;
1905 pdev = device_get_softc(dev);
1907 props->brightness = pdev->dev.bd->props.brightness;
1908 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness;
1917 struct pci_dev *pdev;
1920 pdev = device_get_softc(dev);
1923 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH);
1930 struct pci_dev *pdev;
1933 pdev = device_get_softc(dev);
1935 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness *
1937 pdev->dev.bd->props.power = props->brightness == 0 ?
1939 return (pdev->dev.bd->ops->update_status(pdev->dev.bd));