1 /* 2 * SPDX-License-Identifier: BSD-3-Clause 3 * Copyright(c) 2023 Napatech A/S 4 */ 5 6 #include <rte_eal.h> 7 #include <rte_dev.h> 8 #include <rte_vfio.h> 9 #include <rte_ethdev.h> 10 #include <rte_bus_pci.h> 11 #include <ethdev_pci.h> 12 13 #include "ntlog.h" 14 #include "ntdrv_4ga.h" 15 #include "ntos_drv.h" 16 #include "ntos_system.h" 17 #include "nthw_fpga_instances.h" 18 #include "ntnic_vfio.h" 19 #include "ntnic_mod_reg.h" 20 #include "nt_util.h" 21 22 #define HW_MAX_PKT_LEN (10000) 23 #define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN) 24 25 #define EXCEPTION_PATH_HID 0 26 27 static const struct rte_pci_id nthw_pci_id_map[] = { 28 { RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT200A02) }, 29 { 30 .vendor_id = 0, 31 }, /* sentinel */ 32 }; 33 34 static rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER; 35 36 /* 37 * Store and get adapter info 38 */ 39 40 static struct drv_s *_g_p_drv[NUM_ADAPTER_MAX] = { NULL }; 41 42 static void 43 store_pdrv(struct drv_s *p_drv) 44 { 45 if (p_drv->adapter_no >= NUM_ADAPTER_MAX) { 46 NT_LOG(ERR, NTNIC, 47 "Internal error adapter number %u out of range. Max number of adapters: %u", 48 p_drv->adapter_no, NUM_ADAPTER_MAX); 49 return; 50 } 51 52 if (_g_p_drv[p_drv->adapter_no] != 0) { 53 NT_LOG(WRN, NTNIC, 54 "Overwriting adapter structure for PCI " PCIIDENT_PRINT_STR 55 " with adapter structure for PCI " PCIIDENT_PRINT_STR, 56 PCIIDENT_TO_DOMAIN(_g_p_drv[p_drv->adapter_no]->ntdrv.pciident), 57 PCIIDENT_TO_BUSNR(_g_p_drv[p_drv->adapter_no]->ntdrv.pciident), 58 PCIIDENT_TO_DEVNR(_g_p_drv[p_drv->adapter_no]->ntdrv.pciident), 59 PCIIDENT_TO_FUNCNR(_g_p_drv[p_drv->adapter_no]->ntdrv.pciident), 60 PCIIDENT_TO_DOMAIN(p_drv->ntdrv.pciident), 61 PCIIDENT_TO_BUSNR(p_drv->ntdrv.pciident), 62 PCIIDENT_TO_DEVNR(p_drv->ntdrv.pciident), 63 PCIIDENT_TO_FUNCNR(p_drv->ntdrv.pciident)); 64 } 65 66 rte_spinlock_lock(&hwlock); 67 _g_p_drv[p_drv->adapter_no] = p_drv; 68 rte_spinlock_unlock(&hwlock); 69 } 70 71 static struct drv_s * 72 get_pdrv_from_pci(struct rte_pci_addr addr) 73 { 74 int i; 75 struct drv_s *p_drv = NULL; 76 rte_spinlock_lock(&hwlock); 77 78 for (i = 0; i < NUM_ADAPTER_MAX; i++) { 79 if (_g_p_drv[i]) { 80 if (PCIIDENT_TO_DOMAIN(_g_p_drv[i]->ntdrv.pciident) == addr.domain && 81 PCIIDENT_TO_BUSNR(_g_p_drv[i]->ntdrv.pciident) == addr.bus) { 82 p_drv = _g_p_drv[i]; 83 break; 84 } 85 } 86 } 87 88 rte_spinlock_unlock(&hwlock); 89 return p_drv; 90 } 91 92 static int 93 eth_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete __rte_unused) 94 { 95 const struct port_ops *port_ops = get_port_ops(); 96 97 if (port_ops == NULL) { 98 NT_LOG(ERR, NTNIC, "Link management module uninitialized"); 99 return -1; 100 } 101 102 struct pmd_internals *internals = (struct pmd_internals *)eth_dev->data->dev_private; 103 104 const int n_intf_no = internals->n_intf_no; 105 struct adapter_info_s *p_adapter_info = &internals->p_drv->ntdrv.adapter_info; 106 107 if (eth_dev->data->dev_started) { 108 const bool port_link_status = port_ops->get_link_status(p_adapter_info, n_intf_no); 109 eth_dev->data->dev_link.link_status = 110 port_link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; 111 112 nt_link_speed_t port_link_speed = 113 port_ops->get_link_speed(p_adapter_info, n_intf_no); 114 eth_dev->data->dev_link.link_speed = 115 nt_link_speed_to_eth_speed_num(port_link_speed); 116 117 nt_link_duplex_t nt_link_duplex = 118 port_ops->get_link_duplex(p_adapter_info, n_intf_no); 119 eth_dev->data->dev_link.link_duplex = nt_link_duplex_to_eth_duplex(nt_link_duplex); 120 121 } else { 122 eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; 123 eth_dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE; 124 eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 125 } 126 127 return 0; 128 } 129 130 static int 131 eth_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *dev_info) 132 { 133 const struct port_ops *port_ops = get_port_ops(); 134 135 if (port_ops == NULL) { 136 NT_LOG(ERR, NTNIC, "Link management module uninitialized"); 137 return -1; 138 } 139 140 struct pmd_internals *internals = (struct pmd_internals *)eth_dev->data->dev_private; 141 142 const int n_intf_no = internals->n_intf_no; 143 struct adapter_info_s *p_adapter_info = &internals->p_drv->ntdrv.adapter_info; 144 145 dev_info->driver_name = internals->name; 146 dev_info->max_mac_addrs = NUM_MAC_ADDRS_PER_PORT; 147 dev_info->max_rx_pktlen = HW_MAX_PKT_LEN; 148 dev_info->max_mtu = MAX_MTU; 149 150 if (internals->p_drv) { 151 dev_info->max_rx_queues = internals->nb_rx_queues; 152 dev_info->max_tx_queues = internals->nb_tx_queues; 153 154 dev_info->min_rx_bufsize = 64; 155 156 const uint32_t nt_port_speed_capa = 157 port_ops->get_link_speed_capabilities(p_adapter_info, n_intf_no); 158 dev_info->speed_capa = nt_link_speed_capa_to_eth_speed_capa(nt_port_speed_capa); 159 } 160 161 return 0; 162 } 163 164 static int 165 eth_mac_addr_add(struct rte_eth_dev *eth_dev, 166 struct rte_ether_addr *mac_addr, 167 uint32_t index, 168 uint32_t vmdq __rte_unused) 169 { 170 struct rte_ether_addr *const eth_addrs = eth_dev->data->mac_addrs; 171 172 assert(index < NUM_MAC_ADDRS_PER_PORT); 173 174 if (index >= NUM_MAC_ADDRS_PER_PORT) { 175 const struct pmd_internals *const internals = 176 (struct pmd_internals *)eth_dev->data->dev_private; 177 NT_LOG_DBGX(DBG, NTNIC, "Port %i: illegal index %u (>= %u)", 178 internals->n_intf_no, index, NUM_MAC_ADDRS_PER_PORT); 179 return -1; 180 } 181 182 eth_addrs[index] = *mac_addr; 183 184 return 0; 185 } 186 187 static int 188 eth_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 189 { 190 struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs; 191 192 eth_addrs[0U] = *mac_addr; 193 194 return 0; 195 } 196 197 static int 198 eth_set_mc_addr_list(struct rte_eth_dev *eth_dev, 199 struct rte_ether_addr *mc_addr_set, 200 uint32_t nb_mc_addr) 201 { 202 struct pmd_internals *const internals = (struct pmd_internals *)eth_dev->data->dev_private; 203 struct rte_ether_addr *const mc_addrs = internals->mc_addrs; 204 size_t i; 205 206 if (nb_mc_addr >= NUM_MULTICAST_ADDRS_PER_PORT) { 207 NT_LOG_DBGX(DBG, NTNIC, 208 "Port %i: too many multicast addresses %u (>= %u)", 209 internals->n_intf_no, nb_mc_addr, NUM_MULTICAST_ADDRS_PER_PORT); 210 return -1; 211 } 212 213 for (i = 0U; i < NUM_MULTICAST_ADDRS_PER_PORT; i++) 214 if (i < nb_mc_addr) 215 mc_addrs[i] = mc_addr_set[i]; 216 217 else 218 (void)memset(&mc_addrs[i], 0, sizeof(mc_addrs[i])); 219 220 return 0; 221 } 222 223 static int 224 eth_dev_configure(struct rte_eth_dev *eth_dev) 225 { 226 NT_LOG_DBGX(DBG, NTNIC, "Called for eth_dev %p", eth_dev); 227 228 /* The device is ALWAYS running promiscuous mode. */ 229 eth_dev->data->promiscuous ^= ~eth_dev->data->promiscuous; 230 return 0; 231 } 232 233 static int 234 eth_dev_start(struct rte_eth_dev *eth_dev) 235 { 236 const struct port_ops *port_ops = get_port_ops(); 237 238 if (port_ops == NULL) { 239 NT_LOG(ERR, NTNIC, "Link management module uninitialized"); 240 return -1; 241 } 242 243 struct pmd_internals *internals = (struct pmd_internals *)eth_dev->data->dev_private; 244 245 const int n_intf_no = internals->n_intf_no; 246 struct adapter_info_s *p_adapter_info = &internals->p_drv->ntdrv.adapter_info; 247 248 NT_LOG_DBGX(DBG, NTNIC, "Port %u", internals->n_intf_no); 249 250 if (internals->type == PORT_TYPE_VIRTUAL || internals->type == PORT_TYPE_OVERRIDE) { 251 eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP; 252 253 } else { 254 /* Enable the port */ 255 port_ops->set_adm_state(p_adapter_info, internals->n_intf_no, true); 256 257 /* 258 * wait for link on port 259 * If application starts sending too soon before FPGA port is ready, garbage is 260 * produced 261 */ 262 int loop = 0; 263 264 while (port_ops->get_link_status(p_adapter_info, n_intf_no) == RTE_ETH_LINK_DOWN) { 265 /* break out after 5 sec */ 266 if (++loop >= 50) { 267 NT_LOG_DBGX(DBG, NTNIC, 268 "TIMEOUT No link on port %i (5sec timeout)", 269 internals->n_intf_no); 270 break; 271 } 272 273 nt_os_wait_usec(100 * 1000); 274 } 275 276 if (internals->lpbk_mode) { 277 if (internals->lpbk_mode & 1 << 0) { 278 port_ops->set_loopback_mode(p_adapter_info, n_intf_no, 279 NT_LINK_LOOPBACK_HOST); 280 } 281 282 if (internals->lpbk_mode & 1 << 1) { 283 port_ops->set_loopback_mode(p_adapter_info, n_intf_no, 284 NT_LINK_LOOPBACK_LINE); 285 } 286 } 287 } 288 289 return 0; 290 } 291 292 static int 293 eth_dev_stop(struct rte_eth_dev *eth_dev) 294 { 295 struct pmd_internals *internals = (struct pmd_internals *)eth_dev->data->dev_private; 296 297 NT_LOG_DBGX(DBG, NTNIC, "Port %u", internals->n_intf_no); 298 299 eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; 300 return 0; 301 } 302 303 static int 304 eth_dev_set_link_up(struct rte_eth_dev *eth_dev) 305 { 306 const struct port_ops *port_ops = get_port_ops(); 307 308 if (port_ops == NULL) { 309 NT_LOG(ERR, NTNIC, "Link management module uninitialized"); 310 return -1; 311 } 312 313 struct pmd_internals *const internals = (struct pmd_internals *)eth_dev->data->dev_private; 314 315 struct adapter_info_s *p_adapter_info = &internals->p_drv->ntdrv.adapter_info; 316 const int port = internals->n_intf_no; 317 318 if (internals->type == PORT_TYPE_VIRTUAL || internals->type == PORT_TYPE_OVERRIDE) 319 return 0; 320 321 assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX); 322 assert(port == internals->n_intf_no); 323 324 port_ops->set_adm_state(p_adapter_info, port, true); 325 326 return 0; 327 } 328 329 static int 330 eth_dev_set_link_down(struct rte_eth_dev *eth_dev) 331 { 332 const struct port_ops *port_ops = get_port_ops(); 333 334 if (port_ops == NULL) { 335 NT_LOG(ERR, NTNIC, "Link management module uninitialized"); 336 return -1; 337 } 338 339 struct pmd_internals *const internals = (struct pmd_internals *)eth_dev->data->dev_private; 340 341 struct adapter_info_s *p_adapter_info = &internals->p_drv->ntdrv.adapter_info; 342 const int port = internals->n_intf_no; 343 344 if (internals->type == PORT_TYPE_VIRTUAL || internals->type == PORT_TYPE_OVERRIDE) 345 return 0; 346 347 assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX); 348 assert(port == internals->n_intf_no); 349 350 port_ops->set_link_status(p_adapter_info, port, false); 351 352 return 0; 353 } 354 355 static void 356 drv_deinit(struct drv_s *p_drv) 357 { 358 const struct adapter_ops *adapter_ops = get_adapter_ops(); 359 360 if (adapter_ops == NULL) { 361 NT_LOG(ERR, NTNIC, "Adapter module uninitialized"); 362 return; 363 } 364 365 if (p_drv == NULL) 366 return; 367 368 ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv; 369 370 /* stop adapter */ 371 adapter_ops->deinit(&p_nt_drv->adapter_info); 372 373 /* clean memory */ 374 rte_free(p_drv); 375 p_drv = NULL; 376 } 377 378 static int 379 eth_dev_close(struct rte_eth_dev *eth_dev) 380 { 381 struct pmd_internals *internals = (struct pmd_internals *)eth_dev->data->dev_private; 382 struct drv_s *p_drv = internals->p_drv; 383 384 internals->p_drv = NULL; 385 386 if (p_drv) { 387 /* decrease initialized ethernet devices */ 388 p_drv->n_eth_dev_init_count--; 389 390 /* 391 * rte_pci_dev has no private member for p_drv 392 * wait until all rte_eth_dev's are closed - then close adapters via p_drv 393 */ 394 if (!p_drv->n_eth_dev_init_count) 395 drv_deinit(p_drv); 396 } 397 398 return 0; 399 } 400 401 static int 402 eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, size_t fw_size) 403 { 404 struct pmd_internals *internals = (struct pmd_internals *)eth_dev->data->dev_private; 405 406 if (internals->type == PORT_TYPE_VIRTUAL || internals->type == PORT_TYPE_OVERRIDE) 407 return 0; 408 409 fpga_info_t *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info; 410 const int length = snprintf(fw_version, fw_size, "%03d-%04d-%02d-%02d", 411 fpga_info->n_fpga_type_id, fpga_info->n_fpga_prod_id, 412 fpga_info->n_fpga_ver_id, fpga_info->n_fpga_rev_id); 413 414 if ((size_t)length < fw_size) { 415 /* We have space for the version string */ 416 return 0; 417 418 } else { 419 /* We do not have space for the version string -return the needed space */ 420 return length + 1; 421 } 422 } 423 424 static int 425 promiscuous_enable(struct rte_eth_dev __rte_unused(*dev)) 426 { 427 NT_LOG(DBG, NTHW, "The device always run promiscuous mode"); 428 return 0; 429 } 430 431 static const struct eth_dev_ops nthw_eth_dev_ops = { 432 .dev_configure = eth_dev_configure, 433 .dev_start = eth_dev_start, 434 .dev_stop = eth_dev_stop, 435 .dev_set_link_up = eth_dev_set_link_up, 436 .dev_set_link_down = eth_dev_set_link_down, 437 .dev_close = eth_dev_close, 438 .link_update = eth_link_update, 439 .dev_infos_get = eth_dev_infos_get, 440 .fw_version_get = eth_fw_version_get, 441 .mac_addr_add = eth_mac_addr_add, 442 .mac_addr_set = eth_mac_addr_set, 443 .set_mc_addr_list = eth_set_mc_addr_list, 444 .promiscuous_enable = promiscuous_enable, 445 }; 446 447 static int 448 nthw_pci_dev_init(struct rte_pci_device *pci_dev) 449 { 450 nt_vfio_init(); 451 const struct port_ops *port_ops = get_port_ops(); 452 453 if (port_ops == NULL) { 454 NT_LOG(ERR, NTNIC, "Link management module uninitialized"); 455 return -1; 456 } 457 458 const struct adapter_ops *adapter_ops = get_adapter_ops(); 459 460 if (adapter_ops == NULL) { 461 NT_LOG(ERR, NTNIC, "Adapter module uninitialized"); 462 return -1; 463 } 464 465 struct drv_s *p_drv; 466 ntdrv_4ga_t *p_nt_drv; 467 hw_info_t *p_hw_info; 468 fpga_info_t *fpga_info; 469 uint32_t n_port_mask = -1; /* All ports enabled by default */ 470 uint32_t nb_rx_queues = 1; 471 uint32_t nb_tx_queues = 1; 472 int n_phy_ports; 473 struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = { 0 }; 474 int num_port_speeds = 0; 475 NT_LOG_DBGX(DBG, NTNIC, "Dev %s PF #%i Init : %02x:%02x:%i", pci_dev->name, 476 pci_dev->addr.function, pci_dev->addr.bus, pci_dev->addr.devid, 477 pci_dev->addr.function); 478 479 480 /* alloc */ 481 p_drv = rte_zmalloc_socket(pci_dev->name, sizeof(struct drv_s), RTE_CACHE_LINE_SIZE, 482 pci_dev->device.numa_node); 483 484 if (!p_drv) { 485 NT_LOG_DBGX(ERR, NTNIC, "%s: error %d", 486 (pci_dev->name[0] ? pci_dev->name : "NA"), -1); 487 return -1; 488 } 489 490 /* Setup VFIO context */ 491 int vfio = nt_vfio_setup(pci_dev); 492 493 if (vfio < 0) { 494 NT_LOG_DBGX(ERR, NTNIC, "%s: vfio_setup error %d", 495 (pci_dev->name[0] ? pci_dev->name : "NA"), -1); 496 rte_free(p_drv); 497 return -1; 498 } 499 500 /* context */ 501 p_nt_drv = &p_drv->ntdrv; 502 p_hw_info = &p_nt_drv->adapter_info.hw_info; 503 fpga_info = &p_nt_drv->adapter_info.fpga_info; 504 505 p_drv->p_dev = pci_dev; 506 507 /* Set context for NtDrv */ 508 p_nt_drv->pciident = BDF_TO_PCIIDENT(pci_dev->addr.domain, pci_dev->addr.bus, 509 pci_dev->addr.devid, pci_dev->addr.function); 510 p_nt_drv->adapter_info.n_rx_host_buffers = nb_rx_queues; 511 p_nt_drv->adapter_info.n_tx_host_buffers = nb_tx_queues; 512 513 fpga_info->bar0_addr = (void *)pci_dev->mem_resource[0].addr; 514 fpga_info->bar0_size = pci_dev->mem_resource[0].len; 515 fpga_info->numa_node = pci_dev->device.numa_node; 516 fpga_info->pciident = p_nt_drv->pciident; 517 fpga_info->adapter_no = p_drv->adapter_no; 518 519 p_nt_drv->adapter_info.hw_info.pci_class_id = pci_dev->id.class_id; 520 p_nt_drv->adapter_info.hw_info.pci_vendor_id = pci_dev->id.vendor_id; 521 p_nt_drv->adapter_info.hw_info.pci_device_id = pci_dev->id.device_id; 522 p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id = pci_dev->id.subsystem_vendor_id; 523 p_nt_drv->adapter_info.hw_info.pci_sub_device_id = pci_dev->id.subsystem_device_id; 524 525 NT_LOG(DBG, NTNIC, "%s: " PCIIDENT_PRINT_STR " %04X:%04X: %04X:%04X:", 526 p_nt_drv->adapter_info.mp_adapter_id_str, PCIIDENT_TO_DOMAIN(p_nt_drv->pciident), 527 PCIIDENT_TO_BUSNR(p_nt_drv->pciident), PCIIDENT_TO_DEVNR(p_nt_drv->pciident), 528 PCIIDENT_TO_FUNCNR(p_nt_drv->pciident), 529 p_nt_drv->adapter_info.hw_info.pci_vendor_id, 530 p_nt_drv->adapter_info.hw_info.pci_device_id, 531 p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id, 532 p_nt_drv->adapter_info.hw_info.pci_sub_device_id); 533 534 p_nt_drv->b_shutdown = false; 535 p_nt_drv->adapter_info.pb_shutdown = &p_nt_drv->b_shutdown; 536 537 for (int i = 0; i < num_port_speeds; ++i) { 538 struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info; 539 nt_link_speed_t link_speed = convert_link_speed(pls_mbps[i].link_speed); 540 port_ops->set_link_speed(p_adapter_info, i, link_speed); 541 } 542 543 /* store context */ 544 store_pdrv(p_drv); 545 546 /* initialize nt4ga nthw fpga module instance in drv */ 547 int err = adapter_ops->init(&p_nt_drv->adapter_info); 548 549 if (err != 0) { 550 NT_LOG(ERR, NTNIC, "%s: Cannot initialize the adapter instance", 551 p_nt_drv->adapter_info.mp_adapter_id_str); 552 return -1; 553 } 554 555 /* Start ctrl, monitor, stat thread only for primary process. */ 556 if (err == 0) { 557 /* mp_adapter_id_str is initialized after nt4ga_adapter_init(p_nt_drv) */ 558 const char *const p_adapter_id_str = p_nt_drv->adapter_info.mp_adapter_id_str; 559 (void)p_adapter_id_str; 560 NT_LOG(DBG, NTNIC, 561 "%s: %s: AdapterPCI=" PCIIDENT_PRINT_STR " Hw=0x%02X_rev%d PhyPorts=%d", 562 (pci_dev->name[0] ? pci_dev->name : "NA"), p_adapter_id_str, 563 PCIIDENT_TO_DOMAIN(p_nt_drv->adapter_info.fpga_info.pciident), 564 PCIIDENT_TO_BUSNR(p_nt_drv->adapter_info.fpga_info.pciident), 565 PCIIDENT_TO_DEVNR(p_nt_drv->adapter_info.fpga_info.pciident), 566 PCIIDENT_TO_FUNCNR(p_nt_drv->adapter_info.fpga_info.pciident), 567 p_hw_info->hw_platform_id, fpga_info->nthw_hw_info.hw_id, 568 fpga_info->n_phy_ports); 569 570 } else { 571 NT_LOG_DBGX(ERR, NTNIC, "%s: error=%d", 572 (pci_dev->name[0] ? pci_dev->name : "NA"), err); 573 return -1; 574 } 575 576 n_phy_ports = fpga_info->n_phy_ports; 577 578 for (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) { 579 const char *const p_port_id_str = p_nt_drv->adapter_info.mp_port_id_str[n_intf_no]; 580 (void)p_port_id_str; 581 struct pmd_internals *internals = NULL; 582 struct rte_eth_dev *eth_dev = NULL; 583 char name[32]; 584 585 if ((1 << n_intf_no) & ~n_port_mask) { 586 NT_LOG_DBGX(DBG, NTNIC, 587 "%s: interface #%d: skipping due to portmask 0x%02X", 588 p_port_id_str, n_intf_no, n_port_mask); 589 continue; 590 } 591 592 snprintf(name, sizeof(name), "ntnic%d", n_intf_no); 593 NT_LOG_DBGX(DBG, NTNIC, "%s: interface #%d: %s: '%s'", p_port_id_str, 594 n_intf_no, (pci_dev->name[0] ? pci_dev->name : "NA"), name); 595 596 internals = rte_zmalloc_socket(name, sizeof(struct pmd_internals), 597 RTE_CACHE_LINE_SIZE, pci_dev->device.numa_node); 598 599 if (!internals) { 600 NT_LOG_DBGX(ERR, NTNIC, "%s: %s: error=%d", 601 (pci_dev->name[0] ? pci_dev->name : "NA"), name, -1); 602 return -1; 603 } 604 605 internals->pci_dev = pci_dev; 606 internals->n_intf_no = n_intf_no; 607 internals->type = PORT_TYPE_PHYSICAL; 608 internals->nb_rx_queues = nb_rx_queues; 609 internals->nb_tx_queues = nb_tx_queues; 610 611 612 /* Setup queue_ids */ 613 if (nb_rx_queues > 1) { 614 NT_LOG(DBG, NTNIC, 615 "(%i) NTNIC configured with Rx multi queues. %i queues", 616 internals->n_intf_no, nb_rx_queues); 617 } 618 619 if (nb_tx_queues > 1) { 620 NT_LOG(DBG, NTNIC, 621 "(%i) NTNIC configured with Tx multi queues. %i queues", 622 internals->n_intf_no, nb_tx_queues); 623 } 624 625 /* Set MAC address (but only if the MAC address is permitted) */ 626 if (n_intf_no < fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count) { 627 const uint64_t mac = 628 fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value + n_intf_no; 629 internals->eth_addrs[0].addr_bytes[0] = (mac >> 40) & 0xFFu; 630 internals->eth_addrs[0].addr_bytes[1] = (mac >> 32) & 0xFFu; 631 internals->eth_addrs[0].addr_bytes[2] = (mac >> 24) & 0xFFu; 632 internals->eth_addrs[0].addr_bytes[3] = (mac >> 16) & 0xFFu; 633 internals->eth_addrs[0].addr_bytes[4] = (mac >> 8) & 0xFFu; 634 internals->eth_addrs[0].addr_bytes[5] = (mac >> 0) & 0xFFu; 635 } 636 637 eth_dev = rte_eth_dev_allocate(name); 638 639 if (!eth_dev) { 640 NT_LOG_DBGX(ERR, NTNIC, "%s: %s: error=%d", 641 (pci_dev->name[0] ? pci_dev->name : "NA"), name, -1); 642 return -1; 643 } 644 645 /* connect structs */ 646 internals->p_drv = p_drv; 647 eth_dev->data->dev_private = internals; 648 eth_dev->data->mac_addrs = rte_malloc(NULL, 649 NUM_MAC_ADDRS_PER_PORT * sizeof(struct rte_ether_addr), 0); 650 rte_memcpy(ð_dev->data->mac_addrs[0], 651 &internals->eth_addrs[0], RTE_ETHER_ADDR_LEN); 652 653 654 struct rte_eth_link pmd_link; 655 pmd_link.link_speed = RTE_ETH_SPEED_NUM_NONE; 656 pmd_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 657 pmd_link.link_status = RTE_ETH_LINK_DOWN; 658 pmd_link.link_autoneg = RTE_ETH_LINK_AUTONEG; 659 660 eth_dev->device = &pci_dev->device; 661 eth_dev->data->dev_link = pmd_link; 662 eth_dev->dev_ops = &nthw_eth_dev_ops; 663 664 eth_dev_pci_specific_init(eth_dev, pci_dev); 665 rte_eth_dev_probing_finish(eth_dev); 666 667 /* increase initialized ethernet devices - PF */ 668 p_drv->n_eth_dev_init_count++; 669 } 670 671 return 0; 672 } 673 674 static int 675 nthw_pci_dev_deinit(struct rte_eth_dev *eth_dev __rte_unused) 676 { 677 NT_LOG_DBGX(DBG, NTNIC, "PCI device deinitialization"); 678 679 int i; 680 char name[32]; 681 682 struct pmd_internals *internals = eth_dev->data->dev_private; 683 ntdrv_4ga_t *p_ntdrv = &internals->p_drv->ntdrv; 684 fpga_info_t *fpga_info = &p_ntdrv->adapter_info.fpga_info; 685 const int n_phy_ports = fpga_info->n_phy_ports; 686 for (i = 0; i < n_phy_ports; i++) { 687 sprintf(name, "ntnic%d", i); 688 eth_dev = rte_eth_dev_allocated(name); 689 if (eth_dev == NULL) 690 continue; /* port already released */ 691 rte_eth_dev_release_port(eth_dev); 692 } 693 694 nt_vfio_remove(EXCEPTION_PATH_HID); 695 return 0; 696 } 697 698 static int 699 nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 700 struct rte_pci_device *pci_dev) 701 { 702 int ret; 703 704 NT_LOG_DBGX(DBG, NTNIC, "pcidev: name: '%s'", pci_dev->name); 705 NT_LOG_DBGX(DBG, NTNIC, "devargs: name: '%s'", pci_dev->device.name); 706 707 if (pci_dev->device.devargs) { 708 NT_LOG_DBGX(DBG, NTNIC, "devargs: args: '%s'", 709 (pci_dev->device.devargs->args ? pci_dev->device.devargs->args : "NULL")); 710 NT_LOG_DBGX(DBG, NTNIC, "devargs: data: '%s'", 711 (pci_dev->device.devargs->data ? pci_dev->device.devargs->data : "NULL")); 712 } 713 714 const int n_rte_vfio_no_io_mmu_enabled = rte_vfio_noiommu_is_enabled(); 715 NT_LOG(DBG, NTNIC, "vfio_no_iommu_enabled=%d", n_rte_vfio_no_io_mmu_enabled); 716 717 if (n_rte_vfio_no_io_mmu_enabled) { 718 NT_LOG(ERR, NTNIC, "vfio_no_iommu_enabled=%d: this PMD needs VFIO IOMMU", 719 n_rte_vfio_no_io_mmu_enabled); 720 return -1; 721 } 722 723 const enum rte_iova_mode n_rte_io_va_mode = rte_eal_iova_mode(); 724 NT_LOG(DBG, NTNIC, "iova mode=%d", n_rte_io_va_mode); 725 726 NT_LOG(DBG, NTNIC, 727 "busid=" PCI_PRI_FMT 728 " pciid=%04x:%04x_%04x:%04x locstr=%s @ numanode=%d: drv=%s drvalias=%s", 729 pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid, 730 pci_dev->addr.function, pci_dev->id.vendor_id, pci_dev->id.device_id, 731 pci_dev->id.subsystem_vendor_id, pci_dev->id.subsystem_device_id, 732 pci_dev->name[0] ? pci_dev->name : "NA", 733 pci_dev->device.numa_node, 734 pci_dev->driver->driver.name ? pci_dev->driver->driver.name : "NA", 735 pci_dev->driver->driver.alias ? pci_dev->driver->driver.alias : "NA"); 736 737 738 ret = nthw_pci_dev_init(pci_dev); 739 740 NT_LOG_DBGX(DBG, NTNIC, "leave: ret=%d", ret); 741 return ret; 742 } 743 744 static int 745 nthw_pci_remove(struct rte_pci_device *pci_dev) 746 { 747 NT_LOG_DBGX(DBG, NTNIC); 748 749 struct drv_s *p_drv = get_pdrv_from_pci(pci_dev->addr); 750 drv_deinit(p_drv); 751 752 return rte_eth_dev_pci_generic_remove(pci_dev, nthw_pci_dev_deinit); 753 } 754 755 static struct rte_pci_driver rte_nthw_pmd = { 756 .id_table = nthw_pci_id_map, 757 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 758 .probe = nthw_pci_probe, 759 .remove = nthw_pci_remove, 760 }; 761 762 RTE_PMD_REGISTER_PCI(net_ntnic, rte_nthw_pmd); 763 RTE_PMD_REGISTER_PCI_TABLE(net_ntnic, nthw_pci_id_map); 764 RTE_PMD_REGISTER_KMOD_DEP(net_ntnic, "* vfio-pci"); 765 766 RTE_LOG_REGISTER_SUFFIX(nt_log_general, general, INFO); 767 RTE_LOG_REGISTER_SUFFIX(nt_log_nthw, nthw, INFO); 768 RTE_LOG_REGISTER_SUFFIX(nt_log_filter, filter, INFO); 769 RTE_LOG_REGISTER_SUFFIX(nt_log_ntnic, ntnic, INFO); 770