Lines Matching refs:dev

187  *  dev - vmd_pci_device to allocate a base address for.
194 vmd_allocate_base_addr(struct vmd_adapter *vmd, struct vmd_pci_device *dev, uint32_t size)
209 if (dev) {
210 hp_bus = dev->parent;
234 vmd_is_end_device(struct vmd_pci_device *dev)
236 return (dev && dev->header) &&
237 ((dev->header->common.header_type & ~PCI_MULTI_FUNCTION) == PCI_HEADER_TYPE_NORMAL);
241 vmd_update_base_limit_register(struct vmd_pci_device *dev, uint16_t base, uint16_t limit)
250 if (dev->header->common.header_type == PCI_HEADER_TYPE_BRIDGE) {
251 bus = dev->bus_object;
253 bus = dev->parent;
260 if (dev->bus->vmd->scan_completed) {
283 vmd_get_base_addr(struct vmd_pci_device *dev, uint32_t index, uint32_t size)
285 struct vmd_pci_bus *bus = dev->parent;
287 if (dev->header_type == PCI_HEADER_TYPE_BRIDGE) {
288 return dev->header->zero.BAR[index] & ~0xf;
299 vmd_assign_base_addrs(struct vmd_pci_device *dev)
309 if (dev && dev->bus) {
310 vmd = dev->bus->vmd;
319 last = dev->header_type ? 2 : 6;
321 bar_value = dev->header->zero.BAR[i];
322 dev->header->zero.BAR[i] = ~(0U);
323 dev->bar[i].size = dev->header->zero.BAR[i];
324 dev->header->zero.BAR[i] = bar_value;
326 if (dev->bar[i].size == ~(0U) || dev->bar[i].size == 0 ||
327 dev->header->zero.BAR[i] & 1) {
328 dev->bar[i].size = 0;
331 mem_attr = dev->bar[i].size & PCI_BASE_ADDR_MASK;
332 dev->bar[i].size = TWOS_COMPLEMENT(dev->bar[i].size & PCI_BASE_ADDR_MASK);
335 dev->bar[i].start = vmd_get_base_addr(dev, i, dev->bar[i].size);
337 dev->bar[i].start = vmd_allocate_base_addr(vmd, dev, dev->bar[i].size);
340 dev->header->zero.BAR[i] = (uint32_t)dev->bar[i].start;
342 if (!dev->bar[i].start) {
349 dev->bar[i].vaddr = ((uint64_t)vmd->mem_vaddr + (dev->bar[i].start - vmd->membar));
350 mem_limit = BRIDGE_BASEREG(dev->header->zero.BAR[i]) +
351 BRIDGE_BASEREG(dev->bar[i].size - 1);
353 mem_base = BRIDGE_BASEREG(dev->header->zero.BAR[i]);
361 dev->header->zero.BAR[i] = (uint32_t)(dev->bar[i].start >> PCI_DWORD_SHIFT);
367 dev->header->zero.command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
372 { uint16_t cmd = dev->header->zero.command; (void)cmd; }
374 if (dev->msix_cap && ret_val) {
375 table_offset = ((volatile struct pci_msix_cap *)dev->msix_cap)->msix_table_offset;
376 if (dev->bar[table_offset & 0x3].vaddr) {
377 dev->msix_table = (volatile struct pci_msix_table_entry *)
378 (dev->bar[table_offset & 0x3].vaddr + (table_offset & 0xfff8));
382 if (ret_val && vmd_is_end_device(dev)) {
383 vmd_update_base_limit_register(dev, mem_base, mem_limit);
390 vmd_get_device_capabilities(struct vmd_pci_device *dev)
397 config_space = (volatile uint8_t *)dev->header;
398 if ((dev->header->common.status & PCI_CAPABILITIES_LIST) == 0) {
402 capabilities_offset = dev->header->zero.cap_pointer;
403 if (dev->header->common.header_type & PCI_HEADER_TYPE_BRIDGE) {
404 capabilities_offset = dev->header->one.cap_pointer;
412 dev->pcie_cap = (volatile struct pci_express_cap *)(capabilities_hdr);
416 dev->msi_cap = (volatile struct pci_msi_cap *)capabilities_hdr;
420 dev->msix_cap = (volatile struct pci_msix_capability *)capabilities_hdr;
421 dev->msix_table_size = dev->msix_cap->message_control.bit.table_size + 1;
432 vmd_get_enhanced_capabilities(struct vmd_pci_device *dev, uint16_t capability_id)
438 data = (uint8_t *)dev->header;
454 vmd_read_config_space(struct vmd_pci_device *dev)
460 dev->header->common.command |= (BUS_MASTER_ENABLE | MEMORY_SPACE_ENABLE);
461 { uint16_t cmd = dev->header->common.command; (void)cmd; }
463 vmd_get_device_capabilities(dev);
464 dev->sn_cap = (struct serial_number_capability *)vmd_get_enhanced_capabilities(dev,
469 vmd_update_scan_info(struct vmd_pci_device *dev)
471 struct vmd_adapter *vmd_adapter = dev->bus->vmd;
477 if (dev->header_type == PCI_HEADER_TYPE_NORMAL) {
481 if (vmd_device_is_root_port(dev->header)) {
486 dev->header->one.prefetch_base_upper,
487 dev->header->one.prefetch_limit_upper);
488 if (vmd_device_is_enumerated(dev->header)) {
532 vmd_init_hotplug(struct vmd_pci_device *dev, struct vmd_pci_bus *bus)
535 struct vmd_hot_plug *hp = &dev->hp;
538 dev->hotplug_capable = true;
590 struct vmd_pci_device *dev = NULL;
595 /* Make sure we're not creating two devices on the same dev/fn */
596 TAILQ_FOREACH(dev, &bus->dev_list, tailq) {
597 if (dev->devfn == devfn) {
612 dev = calloc(1, sizeof(*dev));
613 if (!dev) {
617 dev->header = header;
618 dev->vid = dev->header->common.vendor_id;
619 dev->did = dev->header->common.device_id;
620 dev->bus = bus;
621 dev->parent = bus;
622 dev->devfn = devfn;
623 header_type = dev->header->common.header_type;
624 rev_class = dev->header->common.rev_class;
625 dev->class = rev_class >> 8;
626 dev->header_type = header_type & 0x7;
629 vmd_update_scan_info(dev);
630 if (!dev->bus->vmd->scan_completed) {
631 vmd_reset_base_limit_registers(dev->header);
635 vmd_read_config_space(dev);
637 return dev;
662 bridge->pci.addr.dev = bridge->devfn;
683 vmd_get_hotplug_bus_numbers(struct vmd_pci_device *dev)
687 if (dev && dev->bus && dev->bus->vmd &&
688 ((dev->bus->vmd->next_bus_number + RESERVED_HOTPLUG_BUSES) < dev->bus->vmd->max_pci_bus)) {
690 dev->bus->vmd->next_bus_number += RESERVED_HOTPLUG_BUSES;
697 vmd_enable_msix(struct vmd_pci_device *dev)
701 control = dev->msix_cap->message_control.as_uint16_t | (1 << 14);
702 dev->msix_cap->message_control.as_uint16_t = control;
703 control = dev->msix_cap->message_control.as_uint16_t;
704 dev->msix_cap->message_control.as_uint16_t = (control | (1 << 15));
705 control = dev->msix_cap->message_control.as_uint16_t;
707 dev->msix_cap->message_control.as_uint16_t = control;
708 control = dev->msix_cap->message_control.as_uint16_t;
712 vmd_disable_msix(struct vmd_pci_device *dev)
716 control = dev->msix_cap->message_control.as_uint16_t | (1 << 14);
717 dev->msix_cap->message_control.as_uint16_t = control;
718 control = dev->msix_cap->message_control.as_uint16_t & ~(1 << 15);
719 dev->msix_cap->message_control.as_uint16_t = control;
720 control = dev->msix_cap->message_control.as_uint16_t;
728 vmd_setup_msix(struct vmd_pci_device *dev, volatile struct pci_msix_table_entry *vmdEntry)
732 if (!dev || !vmdEntry || !dev->msix_cap) {
736 vmd_disable_msix(dev);
737 if (dev->msix_table == NULL || dev->msix_table_size > MAX_MSIX_TABLE_SIZE) {
741 for (entry = 0; entry < dev->msix_table_size; ++entry) {
742 dev->msix_table[entry].vector_control = 1;
744 vmd_enable_msix(dev);
751 volatile struct vmd_pci_device *dev = bridge;
754 if (!dev) {
758 while (dev->parent_bridge != NULL) {
759 dev = dev->parent_bridge;
760 if (dev->header->one.subordinate < subordinate_bus) {
761 dev->header->one.subordinate = subordinate_bus;
762 subordinate_bus = dev->header->one.subordinate;
768 vmd_is_supported_device(struct vmd_pci_device *dev)
770 return dev->class == PCI_CLASS_STORAGE_EXPRESS;
777 struct vmd_pci_device *dev = SPDK_CONTAINEROF(pci_dev, struct vmd_pci_device, pci);
779 *size = dev->bar[bar].size;
780 *phys_addr = dev->bar[bar].start;
781 *mapped_addr = (void *)dev->bar[bar].vaddr;
796 struct vmd_pci_device *dev = SPDK_CONTAINEROF(_dev, struct vmd_pci_device, pci);
797 volatile uint8_t *src = (volatile uint8_t *)dev->header;
816 struct vmd_pci_device *dev = SPDK_CONTAINEROF(_dev, struct vmd_pci_device, pci);
817 volatile uint8_t *dst = (volatile uint8_t *)dev->header;
833 vmd_dev_free(struct vmd_pci_device *dev)
835 struct vmd_pci_device *bus_device = dev->bus->self;
836 size_t i, num_bars = dev->header_type ? 2 : 6;
841 if (dev->bar[i].start != 0) {
842 vmd_hotplug_free_addr(&bus_device->hp, dev->bar[i].start);
847 free(dev);
851 vmd_dev_detach(struct spdk_pci_device *dev)
853 struct vmd_pci_device *vmd_device = (struct vmd_pci_device *)dev;
856 spdk_pci_unhook_device(dev);
863 vmd_dev_init(struct vmd_pci_device *dev)
865 dev->pci.addr.domain = dev->bus->vmd->domain;
866 dev->pci.addr.bus = dev->bus->bus_number;
867 dev->pci.addr.dev = dev->devfn;
868 dev->pci.addr.func = 0;
869 dev->pci.numa_id = spdk_pci_device_get_numa_id(dev->bus->vmd->pci);
870 dev->pci.id.vendor_id = dev->header->common.vendor_id;
871 dev->pci.id.device_id = dev->header->common.device_id;
872 dev->pci.type = "vmd";
873 dev->pci.map_bar = vmd_dev_map_bar;
874 dev->pci.unmap_bar = vmd_dev_unmap_bar;
875 dev->pci.cfg_read = vmd_dev_cfg_read;
876 dev->pci.cfg_write = vmd_dev_cfg_write;
877 dev->hotplug_capable = false;
878 if (dev->pcie_cap != NULL) {
879 dev->cached_slot_control = dev->pcie_cap->slot_control;
884 vmd_init_end_device(struct vmd_pci_device *dev)
886 struct vmd_pci_bus *bus = dev->bus;
892 if (!vmd_assign_base_addrs(dev)) {
893 SPDK_ERRLOG("Failed to allocate BARs for device: %p\n", dev);
897 vmd_setup_msix(dev, &bus->vmd->msix_table[0]);
898 vmd_dev_init(dev);
900 if (vmd_is_supported_device(dev)) {
901 spdk_pci_addr_fmt(bdf, sizeof(bdf), &dev->pci.addr);
903 dev->pci.parent = dev->bus->vmd->pci;
907 rc = spdk_pci_hook_device(driver, &dev->pci);
914 vmd->target[vmd->nvme_count] = dev;
919 TAILQ_INSERT_TAIL(&bus->dev_list, dev, tailq);
1035 vmd_print_pci_info(struct vmd_pci_device *dev)
1037 if (!dev) {
1041 if (dev->pcie_cap != NULL) {
1043 dev->header->common.vendor_id, dev->header->common.device_id,
1044 dev->pcie_cap->express_cap_register.bit_field.device_type,
1045 device_type[dev->pcie_cap->express_cap_register.bit_field.device_type]);
1048 dev->header->common.vendor_id, dev->header->common.device_id);
1051 SPDK_INFOLOG(vmd, "\tDOMAIN:BDF: %04x:%02x:%02x:%x\n", dev->pci.addr.domain,
1052 dev->pci.addr.bus, dev->pci.addr.dev, dev->pci.addr.func);
1054 if (!(dev->header_type & PCI_HEADER_TYPE_BRIDGE) && dev->bus) {
1056 dev->header->zero.BAR[0], (void *)dev->bar[0].vaddr);
1059 if ((dev->header_type & PCI_HEADER_TYPE_BRIDGE)) {
1061 dev->header->one.primary, dev->header->one.secondary, dev->header->one.subordinate);
1062 if (dev->pcie_cap && dev->pcie_cap->express_cap_register.bit_field.slot_implemented) {
1064 if (dev->pcie_cap->slot_cap.bit_field.hotplug_capable) {
1070 if (dev->sn_cap != NULL) {
1071 uint8_t *snLow = (uint8_t *)&dev->sn_cap->sn_low;
1072 uint8_t *snHi = (uint8_t *)&dev->sn_cap->sn_hi;
1080 vmd_cache_scan_info(struct vmd_pci_device *dev)
1084 if (dev->header_type == PCI_HEADER_TYPE_NORMAL) {
1088 SPDK_INFOLOG(vmd, "vendor/device id:%x:%x\n", dev->header->common.vendor_id,
1089 dev->header->common.device_id);
1091 if (vmd_device_is_root_port(dev->header)) {
1092 dev->header->one.prefetch_base_upper = VMD_UPPER_BASE_SIGNATURE;
1093 reg = dev->header->one.prefetch_base_upper;
1094 dev->header->one.prefetch_limit_upper = VMD_UPPER_LIMIT_SIGNATURE;
1095 reg = dev->header->one.prefetch_limit_upper;
1098 dev->header->one.prefetch_base_upper,
1099 dev->header->one.prefetch_limit_upper);
1134 struct vmd_pci_device *dev;
1149 bus->vmd->pci->addr.dev, bus->vmd->pci->addr.func);
1157 TAILQ_FOREACH(dev, &bus_entry->dev_list, tailq) {
1158 vmd_print_pci_info(dev);
1267 struct vmd_pci_device *dev;
1278 TAILQ_FOREACH(dev, &bus->dev_list, tailq) {
1279 if (spdk_pci_addr_compare(&dev->pci.addr, addr) == 0) {
1280 return dev;
1307 vmd->domain = (pci_dev->addr.bus << 16) | (pci_dev->addr.dev << 8) | pci_dev->addr.func;
1332 struct vmd_pci_device *dev;
1342 TAILQ_FOREACH(dev, &bus->dev_list, tailq) {
1343 nvme_list[cnt++] = dev->pci;
1344 if (!dev->is_hooked) {
1345 vmd_dev_init(dev);
1346 dev->is_hooked = 1;
1488 struct vmd_pci_device *dev;
1508 dev = vmd_alloc_dev(bus, addr->dev);
1509 if (dev == NULL) {
1514 if (dev->header->common.header_type & PCI_HEADER_TYPE_BRIDGE) {
1515 free(dev);
1519 rc = vmd_init_end_device(dev);
1521 free(dev);
1535 struct vmd_pci_device *dev = SPDK_CONTAINEROF(pci_dev, struct vmd_pci_device, pci);
1540 vmd_remove_device(dev);