1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2015-2018 Atomic Rules LLC 3 */ 4 5 #include <unistd.h> 6 #include <sys/stat.h> 7 #include <dlfcn.h> 8 9 #include <rte_bus_pci.h> 10 #include <ethdev_pci.h> 11 #include <rte_kvargs.h> 12 13 #include "ark_global.h" 14 #include "ark_logs.h" 15 #include "ark_ethdev_tx.h" 16 #include "ark_ethdev_rx.h" 17 #include "ark_mpu.h" 18 #include "ark_ddm.h" 19 #include "ark_udm.h" 20 #include "ark_rqp.h" 21 #include "ark_pktdir.h" 22 #include "ark_pktgen.h" 23 #include "ark_pktchkr.h" 24 25 /* Internal prototypes */ 26 static int eth_ark_check_args(struct ark_adapter *ark, const char *params); 27 static int eth_ark_dev_init(struct rte_eth_dev *dev); 28 static int ark_config_device(struct rte_eth_dev *dev); 29 static int eth_ark_dev_uninit(struct rte_eth_dev *eth_dev); 30 static int eth_ark_dev_configure(struct rte_eth_dev *dev); 31 static int eth_ark_dev_start(struct rte_eth_dev *dev); 32 static int eth_ark_dev_stop(struct rte_eth_dev *dev); 33 static int eth_ark_dev_close(struct rte_eth_dev *dev); 34 static int eth_ark_dev_info_get(struct rte_eth_dev *dev, 35 struct rte_eth_dev_info *dev_info); 36 static int eth_ark_dev_link_update(struct rte_eth_dev *dev, 37 int wait_to_complete); 38 static int eth_ark_dev_set_link_up(struct rte_eth_dev *dev); 39 static int eth_ark_dev_set_link_down(struct rte_eth_dev *dev); 40 static int eth_ark_dev_stats_get(struct rte_eth_dev *dev, 41 struct rte_eth_stats *stats); 42 static int eth_ark_dev_stats_reset(struct rte_eth_dev *dev); 43 static int eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, 44 struct rte_ether_addr *mac_addr); 45 static int eth_ark_macaddr_add(struct rte_eth_dev *dev, 46 struct rte_ether_addr *mac_addr, 47 uint32_t index, 48 uint32_t pool); 49 static void eth_ark_macaddr_remove(struct rte_eth_dev *dev, 50 uint32_t index); 51 static int eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size); 52 53 /* 54 * The packet generator is a functional block used to generate packet 55 * patterns for testing. It is not intended for nominal use. 56 */ 57 #define ARK_PKTGEN_ARG "Pkt_gen" 58 59 /* 60 * The packet checker is a functional block used to verify packet 61 * patterns for testing. It is not intended for nominal use. 62 */ 63 #define ARK_PKTCHKR_ARG "Pkt_chkr" 64 65 /* 66 * The packet director is used to select the internal ingress and 67 * egress packets paths during testing. It is not intended for 68 * nominal use. 69 */ 70 #define ARK_PKTDIR_ARG "Pkt_dir" 71 72 /* Devinfo configurations */ 73 #define ARK_RX_MAX_QUEUE (4096 * 4) 74 #define ARK_RX_MIN_QUEUE (512) 75 #define ARK_RX_MAX_PKT_LEN ((16 * 1024) - 128) 76 #define ARK_RX_MIN_BUFSIZE (1024) 77 78 #define ARK_TX_MAX_QUEUE (4096 * 4) 79 #define ARK_TX_MIN_QUEUE (256) 80 81 static const char * const valid_arguments[] = { 82 ARK_PKTGEN_ARG, 83 ARK_PKTCHKR_ARG, 84 ARK_PKTDIR_ARG, 85 NULL 86 }; 87 88 #define AR_VENDOR_ID 0x1d6c 89 static const struct rte_pci_id pci_id_ark_map[] = { 90 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x100d)}, 91 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x100e)}, 92 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x100f)}, 93 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1010)}, 94 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1017)}, 95 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1018)}, 96 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1019)}, 97 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x101e)}, 98 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x101f)}, 99 {.vendor_id = 0, /* sentinel */ }, 100 }; 101 102 /* 103 * This structure is used to statically define the capabilities 104 * of supported devices. 105 * Capabilities: 106 * rqpacing - 107 * Some HW variants require that PCIe read-requests be correctly throttled. 108 * This is called "rqpacing" and has to do with credit and flow control 109 * on certain Arkville implementations. 110 */ 111 struct ark_caps { 112 bool rqpacing; 113 }; 114 struct ark_dev_caps { 115 uint32_t device_id; 116 struct ark_caps caps; 117 }; 118 #define SET_DEV_CAPS(id, rqp) \ 119 {id, {.rqpacing = rqp} } 120 121 static const struct ark_dev_caps 122 ark_device_caps[] = { 123 SET_DEV_CAPS(0x100d, true), 124 SET_DEV_CAPS(0x100e, true), 125 SET_DEV_CAPS(0x100f, true), 126 SET_DEV_CAPS(0x1010, false), 127 SET_DEV_CAPS(0x1017, true), 128 SET_DEV_CAPS(0x1018, true), 129 SET_DEV_CAPS(0x1019, true), 130 SET_DEV_CAPS(0x101e, false), 131 SET_DEV_CAPS(0x101f, false), 132 {.device_id = 0,} 133 }; 134 135 static int 136 eth_ark_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 137 struct rte_pci_device *pci_dev) 138 { 139 struct rte_eth_dev *eth_dev; 140 int ret; 141 142 eth_dev = rte_eth_dev_pci_allocate(pci_dev, sizeof(struct ark_adapter)); 143 144 if (eth_dev == NULL) 145 return -ENOMEM; 146 147 ret = eth_ark_dev_init(eth_dev); 148 if (ret) 149 rte_eth_dev_release_port(eth_dev); 150 151 return ret; 152 } 153 154 static int 155 eth_ark_pci_remove(struct rte_pci_device *pci_dev) 156 { 157 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ark_dev_uninit); 158 } 159 160 static struct rte_pci_driver rte_ark_pmd = { 161 .id_table = pci_id_ark_map, 162 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 163 .probe = eth_ark_pci_probe, 164 .remove = eth_ark_pci_remove, 165 }; 166 167 static const struct eth_dev_ops ark_eth_dev_ops = { 168 .dev_configure = eth_ark_dev_configure, 169 .dev_start = eth_ark_dev_start, 170 .dev_stop = eth_ark_dev_stop, 171 .dev_close = eth_ark_dev_close, 172 173 .dev_infos_get = eth_ark_dev_info_get, 174 175 .rx_queue_setup = eth_ark_dev_rx_queue_setup, 176 .tx_queue_setup = eth_ark_tx_queue_setup, 177 178 .link_update = eth_ark_dev_link_update, 179 .dev_set_link_up = eth_ark_dev_set_link_up, 180 .dev_set_link_down = eth_ark_dev_set_link_down, 181 182 .rx_queue_start = eth_ark_rx_start_queue, 183 .rx_queue_stop = eth_ark_rx_stop_queue, 184 185 .tx_queue_start = eth_ark_tx_queue_start, 186 .tx_queue_stop = eth_ark_tx_queue_stop, 187 188 .stats_get = eth_ark_dev_stats_get, 189 .stats_reset = eth_ark_dev_stats_reset, 190 191 .mac_addr_add = eth_ark_macaddr_add, 192 .mac_addr_remove = eth_ark_macaddr_remove, 193 .mac_addr_set = eth_ark_set_default_mac_addr, 194 195 .mtu_set = eth_ark_set_mtu, 196 }; 197 198 static int 199 check_for_ext(struct ark_adapter *ark) 200 { 201 int found = 0; 202 203 /* Get the env */ 204 const char *dllpath = getenv("ARK_EXT_PATH"); 205 206 if (dllpath == NULL) { 207 ARK_PMD_LOG(DEBUG, "EXT NO dll path specified\n"); 208 return 0; 209 } 210 ARK_PMD_LOG(NOTICE, "EXT found dll path at %s\n", dllpath); 211 212 /* Open and load the .so */ 213 ark->d_handle = dlopen(dllpath, RTLD_LOCAL | RTLD_LAZY); 214 if (ark->d_handle == NULL) { 215 ARK_PMD_LOG(ERR, "Could not load user extension %s\n", 216 dllpath); 217 return -1; 218 } 219 ARK_PMD_LOG(DEBUG, "SUCCESS: loaded user extension %s\n", 220 dllpath); 221 222 /* Get the entry points */ 223 ark->user_ext.dev_init = 224 (void *(*)(struct rte_eth_dev *, void *, int)) 225 dlsym(ark->d_handle, "rte_pmd_ark_dev_init"); 226 ARK_PMD_LOG(DEBUG, "device ext init pointer = %p\n", 227 ark->user_ext.dev_init); 228 ark->user_ext.dev_get_port_count = 229 (int (*)(struct rte_eth_dev *, void *)) 230 dlsym(ark->d_handle, "rte_pmd_ark_dev_get_port_count"); 231 ark->user_ext.dev_uninit = 232 (void (*)(struct rte_eth_dev *, void *)) 233 dlsym(ark->d_handle, "rte_pmd_ark_dev_uninit"); 234 ark->user_ext.dev_configure = 235 (int (*)(struct rte_eth_dev *, void *)) 236 dlsym(ark->d_handle, "rte_pmd_ark_dev_configure"); 237 ark->user_ext.dev_start = 238 (int (*)(struct rte_eth_dev *, void *)) 239 dlsym(ark->d_handle, "rte_pmd_ark_dev_start"); 240 ark->user_ext.dev_stop = 241 (void (*)(struct rte_eth_dev *, void *)) 242 dlsym(ark->d_handle, "rte_pmd_ark_dev_stop"); 243 ark->user_ext.dev_close = 244 (void (*)(struct rte_eth_dev *, void *)) 245 dlsym(ark->d_handle, "rte_pmd_ark_dev_close"); 246 ark->user_ext.link_update = 247 (int (*)(struct rte_eth_dev *, int, void *)) 248 dlsym(ark->d_handle, "rte_pmd_ark_link_update"); 249 ark->user_ext.dev_set_link_up = 250 (int (*)(struct rte_eth_dev *, void *)) 251 dlsym(ark->d_handle, "rte_pmd_ark_dev_set_link_up"); 252 ark->user_ext.dev_set_link_down = 253 (int (*)(struct rte_eth_dev *, void *)) 254 dlsym(ark->d_handle, "rte_pmd_ark_dev_set_link_down"); 255 ark->user_ext.stats_get = 256 (int (*)(struct rte_eth_dev *, struct rte_eth_stats *, 257 void *)) 258 dlsym(ark->d_handle, "rte_pmd_ark_stats_get"); 259 ark->user_ext.stats_reset = 260 (void (*)(struct rte_eth_dev *, void *)) 261 dlsym(ark->d_handle, "rte_pmd_ark_stats_reset"); 262 ark->user_ext.mac_addr_add = 263 (void (*)(struct rte_eth_dev *, struct rte_ether_addr *, 264 uint32_t, uint32_t, void *)) 265 dlsym(ark->d_handle, "rte_pmd_ark_mac_addr_add"); 266 ark->user_ext.mac_addr_remove = 267 (void (*)(struct rte_eth_dev *, uint32_t, void *)) 268 dlsym(ark->d_handle, "rte_pmd_ark_mac_addr_remove"); 269 ark->user_ext.mac_addr_set = 270 (void (*)(struct rte_eth_dev *, struct rte_ether_addr *, 271 void *)) 272 dlsym(ark->d_handle, "rte_pmd_ark_mac_addr_set"); 273 ark->user_ext.set_mtu = 274 (int (*)(struct rte_eth_dev *, uint16_t, 275 void *)) 276 dlsym(ark->d_handle, "rte_pmd_ark_set_mtu"); 277 ark->user_ext.rx_user_meta_hook = 278 (rx_user_meta_hook_fn)dlsym(ark->d_handle, 279 "rte_pmd_ark_rx_user_meta_hook"); 280 ark->user_ext.tx_user_meta_hook = 281 (tx_user_meta_hook_fn)dlsym(ark->d_handle, 282 "rte_pmd_ark_tx_user_meta_hook"); 283 284 return found; 285 } 286 287 static int 288 eth_ark_dev_init(struct rte_eth_dev *dev) 289 { 290 struct ark_adapter *ark = dev->data->dev_private; 291 struct rte_pci_device *pci_dev; 292 int ret; 293 int port_count = 1; 294 int p; 295 bool rqpacing = false; 296 297 ark->eth_dev = dev; 298 299 ARK_PMD_LOG(DEBUG, "\n"); 300 301 /* Check to see if there is an extension that we need to load */ 302 ret = check_for_ext(ark); 303 if (ret) 304 return ret; 305 306 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 307 rte_eth_copy_pci_info(dev, pci_dev); 308 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 309 310 p = 0; 311 while (ark_device_caps[p].device_id != 0) { 312 if (pci_dev->id.device_id == ark_device_caps[p].device_id) { 313 rqpacing = ark_device_caps[p].caps.rqpacing; 314 break; 315 } 316 p++; 317 } 318 319 /* Use dummy function until setup */ 320 dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; 321 dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; 322 323 ark->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr; 324 ark->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr; 325 326 ark->sysctrl.v = (void *)&ark->bar0[ARK_SYSCTRL_BASE]; 327 ark->mpurx.v = (void *)&ark->bar0[ARK_MPU_RX_BASE]; 328 ark->udm.v = (void *)&ark->bar0[ARK_UDM_BASE]; 329 ark->mputx.v = (void *)&ark->bar0[ARK_MPU_TX_BASE]; 330 ark->ddm.v = (void *)&ark->bar0[ARK_DDM_BASE]; 331 ark->cmac.v = (void *)&ark->bar0[ARK_CMAC_BASE]; 332 ark->external.v = (void *)&ark->bar0[ARK_EXTERNAL_BASE]; 333 ark->pktdir.v = (void *)&ark->bar0[ARK_PKTDIR_BASE]; 334 ark->pktgen.v = (void *)&ark->bar0[ARK_PKTGEN_BASE]; 335 ark->pktchkr.v = (void *)&ark->bar0[ARK_PKTCHKR_BASE]; 336 337 if (rqpacing) { 338 ark->rqpacing = 339 (struct ark_rqpace_t *)(ark->bar0 + ARK_RCPACING_BASE); 340 } else { 341 ark->rqpacing = NULL; 342 } 343 ark->started = 0; 344 ark->pkt_dir_v = ARK_PKT_DIR_INIT_VAL; 345 346 ARK_PMD_LOG(INFO, "Sys Ctrl Const = 0x%x HW Commit_ID: %08x\n", 347 ark->sysctrl.t32[4], 348 rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4])); 349 ARK_PMD_LOG(NOTICE, "Arkville HW Commit_ID: %08x\n", 350 rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4])); 351 352 /* If HW sanity test fails, return an error */ 353 if (ark->sysctrl.t32[4] != 0xcafef00d) { 354 ARK_PMD_LOG(ERR, 355 "HW Sanity test has failed, expected constant" 356 " 0x%x, read 0x%x (%s)\n", 357 0xcafef00d, 358 ark->sysctrl.t32[4], __func__); 359 return -1; 360 } 361 if (ark->sysctrl.t32[3] != 0) { 362 if (ark->rqpacing) { 363 if (ark_rqp_lasped(ark->rqpacing)) { 364 ARK_PMD_LOG(ERR, "Arkville Evaluation System - " 365 "Timer has Expired\n"); 366 return -1; 367 } 368 ARK_PMD_LOG(WARNING, "Arkville Evaluation System - " 369 "Timer is Running\n"); 370 } 371 } 372 373 ARK_PMD_LOG(DEBUG, 374 "HW Sanity test has PASSED, expected constant" 375 " 0x%x, read 0x%x (%s)\n", 376 0xcafef00d, ark->sysctrl.t32[4], __func__); 377 378 /* We are a single function multi-port device. */ 379 ret = ark_config_device(dev); 380 if (ret) 381 return -1; 382 383 dev->dev_ops = &ark_eth_dev_ops; 384 dev->rx_queue_count = eth_ark_dev_rx_queue_count; 385 386 dev->data->mac_addrs = rte_zmalloc("ark", RTE_ETHER_ADDR_LEN, 0); 387 if (!dev->data->mac_addrs) { 388 ARK_PMD_LOG(ERR, 389 "Failed to allocated memory for storing mac address" 390 ); 391 } 392 393 if (ark->user_ext.dev_init) { 394 ark->user_data[dev->data->port_id] = 395 ark->user_ext.dev_init(dev, ark->a_bar, 0); 396 if (!ark->user_data[dev->data->port_id]) { 397 ARK_PMD_LOG(WARNING, 398 "Failed to initialize PMD extension!" 399 " continuing without it\n"); 400 memset(&ark->user_ext, 0, sizeof(struct ark_user_ext)); 401 dlclose(ark->d_handle); 402 } 403 } 404 405 if (pci_dev->device.devargs) 406 ret = eth_ark_check_args(ark, pci_dev->device.devargs->args); 407 else 408 ARK_PMD_LOG(INFO, "No Device args found\n"); 409 410 if (ret) 411 goto error; 412 /* 413 * We will create additional devices based on the number of requested 414 * ports 415 */ 416 if (ark->user_ext.dev_get_port_count) 417 port_count = 418 ark->user_ext.dev_get_port_count(dev, 419 ark->user_data[dev->data->port_id]); 420 ark->num_ports = port_count; 421 422 for (p = 0; p < port_count; p++) { 423 struct rte_eth_dev *eth_dev; 424 char name[RTE_ETH_NAME_MAX_LEN]; 425 426 snprintf(name, sizeof(name), "arketh%d", 427 dev->data->port_id + p); 428 429 if (p == 0) { 430 /* First port is already allocated by DPDK */ 431 eth_dev = ark->eth_dev; 432 rte_eth_dev_probing_finish(eth_dev); 433 continue; 434 } 435 436 /* reserve an ethdev entry */ 437 eth_dev = rte_eth_dev_allocate(name); 438 if (!eth_dev) { 439 ARK_PMD_LOG(ERR, 440 "Could not allocate eth_dev for port %d\n", 441 p); 442 goto error; 443 } 444 445 eth_dev->device = &pci_dev->device; 446 eth_dev->data->dev_private = ark; 447 eth_dev->dev_ops = ark->eth_dev->dev_ops; 448 eth_dev->tx_pkt_burst = ark->eth_dev->tx_pkt_burst; 449 eth_dev->rx_pkt_burst = ark->eth_dev->rx_pkt_burst; 450 451 rte_eth_copy_pci_info(eth_dev, pci_dev); 452 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 453 454 eth_dev->data->mac_addrs = rte_zmalloc(name, 455 RTE_ETHER_ADDR_LEN, 0); 456 if (!eth_dev->data->mac_addrs) { 457 ARK_PMD_LOG(ERR, 458 "Memory allocation for MAC failed!" 459 " Exiting.\n"); 460 goto error; 461 } 462 463 if (ark->user_ext.dev_init) { 464 ark->user_data[eth_dev->data->port_id] = 465 ark->user_ext.dev_init(dev, ark->a_bar, p); 466 } 467 468 rte_eth_dev_probing_finish(eth_dev); 469 } 470 471 return ret; 472 473 error: 474 rte_free(dev->data->mac_addrs); 475 dev->data->mac_addrs = NULL; 476 return -1; 477 } 478 479 /* 480 *Initial device configuration when device is opened 481 * setup the DDM, and UDM 482 * Called once per PCIE device 483 */ 484 static int 485 ark_config_device(struct rte_eth_dev *dev) 486 { 487 struct ark_adapter *ark = dev->data->dev_private; 488 uint16_t num_q, i; 489 struct ark_mpu_t *mpu; 490 491 /* 492 * Make sure that the packet director, generator and checker are in a 493 * known state 494 */ 495 ark->start_pg = 0; 496 ark->pg = ark_pktgen_init(ark->pktgen.v, 0, 1); 497 if (ark->pg == NULL) 498 return -1; 499 ark_pktgen_reset(ark->pg); 500 ark->pc = ark_pktchkr_init(ark->pktchkr.v, 0, 1); 501 if (ark->pc == NULL) 502 return -1; 503 ark_pktchkr_stop(ark->pc); 504 ark->pd = ark_pktdir_init(ark->pktdir.v); 505 if (ark->pd == NULL) 506 return -1; 507 508 /* Verify HW */ 509 if (ark_udm_verify(ark->udm.v)) 510 return -1; 511 if (ark_ddm_verify(ark->ddm.v)) 512 return -1; 513 514 /* UDM */ 515 if (ark_udm_reset(ark->udm.v)) { 516 ARK_PMD_LOG(ERR, "Unable to stop and reset UDM\n"); 517 return -1; 518 } 519 /* Keep in reset until the MPU are cleared */ 520 521 /* MPU reset */ 522 mpu = ark->mpurx.v; 523 num_q = ark_api_num_queues(mpu); 524 ark->rx_queues = num_q; 525 for (i = 0; i < num_q; i++) { 526 ark_mpu_reset(mpu); 527 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 528 } 529 530 /* TX -- DDM */ 531 if (ark_ddm_stop(ark->ddm.v, 1)) 532 ARK_PMD_LOG(ERR, "Unable to stop DDM\n"); 533 534 mpu = ark->mputx.v; 535 num_q = ark_api_num_queues(mpu); 536 ark->tx_queues = num_q; 537 for (i = 0; i < num_q; i++) { 538 ark_mpu_reset(mpu); 539 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 540 } 541 542 ark_ddm_reset(ark->ddm.v); 543 ark_ddm_stats_reset(ark->ddm.v); 544 545 ark_ddm_stop(ark->ddm.v, 0); 546 if (ark->rqpacing) 547 ark_rqp_stats_reset(ark->rqpacing); 548 549 return 0; 550 } 551 552 static int 553 eth_ark_dev_uninit(struct rte_eth_dev *dev) 554 { 555 struct ark_adapter *ark = dev->data->dev_private; 556 557 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 558 return 0; 559 560 if (ark->user_ext.dev_uninit) 561 ark->user_ext.dev_uninit(dev, 562 ark->user_data[dev->data->port_id]); 563 564 ark_pktgen_uninit(ark->pg); 565 ark_pktchkr_uninit(ark->pc); 566 567 return 0; 568 } 569 570 static int 571 eth_ark_dev_configure(struct rte_eth_dev *dev) 572 { 573 struct ark_adapter *ark = dev->data->dev_private; 574 575 eth_ark_dev_set_link_up(dev); 576 if (ark->user_ext.dev_configure) 577 return ark->user_ext.dev_configure(dev, 578 ark->user_data[dev->data->port_id]); 579 return 0; 580 } 581 582 static int 583 eth_ark_dev_start(struct rte_eth_dev *dev) 584 { 585 struct ark_adapter *ark = dev->data->dev_private; 586 int i; 587 588 /* RX Side */ 589 /* start UDM */ 590 ark_udm_start(ark->udm.v); 591 592 for (i = 0; i < dev->data->nb_rx_queues; i++) 593 eth_ark_rx_start_queue(dev, i); 594 595 /* TX Side */ 596 for (i = 0; i < dev->data->nb_tx_queues; i++) 597 eth_ark_tx_queue_start(dev, i); 598 599 /* start DDM */ 600 ark_ddm_start(ark->ddm.v); 601 602 ark->started = 1; 603 /* set xmit and receive function */ 604 dev->rx_pkt_burst = ð_ark_recv_pkts; 605 dev->tx_pkt_burst = ð_ark_xmit_pkts; 606 607 if (ark->start_pg) 608 ark_pktchkr_run(ark->pc); 609 610 if (ark->start_pg && (dev->data->port_id == 0)) { 611 pthread_t thread; 612 613 /* Delay packet generatpr start allow the hardware to be ready 614 * This is only used for sanity checking with internal generator 615 */ 616 if (rte_ctrl_thread_create(&thread, "ark-delay-pg", NULL, 617 ark_pktgen_delay_start, ark->pg)) { 618 ARK_PMD_LOG(ERR, "Could not create pktgen " 619 "starter thread\n"); 620 return -1; 621 } 622 } 623 624 if (ark->user_ext.dev_start) 625 ark->user_ext.dev_start(dev, 626 ark->user_data[dev->data->port_id]); 627 628 return 0; 629 } 630 631 static int 632 eth_ark_dev_stop(struct rte_eth_dev *dev) 633 { 634 uint16_t i; 635 int status; 636 struct ark_adapter *ark = dev->data->dev_private; 637 struct ark_mpu_t *mpu; 638 639 if (ark->started == 0) 640 return 0; 641 ark->started = 0; 642 dev->data->dev_started = 0; 643 644 /* Stop the extension first */ 645 if (ark->user_ext.dev_stop) 646 ark->user_ext.dev_stop(dev, 647 ark->user_data[dev->data->port_id]); 648 649 /* Stop the packet generator */ 650 if (ark->start_pg) 651 ark_pktgen_pause(ark->pg); 652 653 dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; 654 dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; 655 656 /* STOP TX Side */ 657 for (i = 0; i < dev->data->nb_tx_queues; i++) { 658 status = eth_ark_tx_queue_stop(dev, i); 659 if (status != 0) { 660 uint16_t port = dev->data->port_id; 661 ARK_PMD_LOG(ERR, 662 "tx_queue stop anomaly" 663 " port %u, queue %u\n", 664 port, i); 665 } 666 } 667 668 /* Stop DDM */ 669 /* Wait up to 0.1 second. each stop is up to 1000 * 10 useconds */ 670 for (i = 0; i < 10; i++) { 671 status = ark_ddm_stop(ark->ddm.v, 1); 672 if (status == 0) 673 break; 674 } 675 if (status || i != 0) { 676 ARK_PMD_LOG(ERR, "DDM stop anomaly. status:" 677 " %d iter: %u. (%s)\n", 678 status, 679 i, 680 __func__); 681 ark_ddm_dump(ark->ddm.v, "Stop anomaly"); 682 683 mpu = ark->mputx.v; 684 for (i = 0; i < ark->tx_queues; i++) { 685 ark_mpu_dump(mpu, "DDM failure dump", i); 686 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 687 } 688 } 689 690 /* STOP RX Side */ 691 /* Stop UDM multiple tries attempted */ 692 for (i = 0; i < 10; i++) { 693 status = ark_udm_stop(ark->udm.v, 1); 694 if (status == 0) 695 break; 696 } 697 if (status || i != 0) { 698 ARK_PMD_LOG(ERR, "UDM stop anomaly. status %d iter: %u. (%s)\n", 699 status, i, __func__); 700 ark_udm_dump(ark->udm.v, "Stop anomaly"); 701 702 mpu = ark->mpurx.v; 703 for (i = 0; i < ark->rx_queues; i++) { 704 ark_mpu_dump(mpu, "UDM Stop anomaly", i); 705 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 706 } 707 } 708 709 ark_udm_dump_stats(ark->udm.v, "Post stop"); 710 ark_udm_dump_perf(ark->udm.v, "Post stop"); 711 712 for (i = 0; i < dev->data->nb_rx_queues; i++) 713 eth_ark_rx_dump_queue(dev, i, __func__); 714 715 /* Stop the packet checker if it is running */ 716 if (ark->start_pg) { 717 ark_pktchkr_dump_stats(ark->pc); 718 ark_pktchkr_stop(ark->pc); 719 } 720 721 return 0; 722 } 723 724 static int 725 eth_ark_dev_close(struct rte_eth_dev *dev) 726 { 727 struct ark_adapter *ark = dev->data->dev_private; 728 uint16_t i; 729 730 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 731 return 0; 732 733 if (ark->user_ext.dev_close) 734 ark->user_ext.dev_close(dev, 735 ark->user_data[dev->data->port_id]); 736 737 eth_ark_dev_stop(dev); 738 eth_ark_udm_force_close(dev); 739 740 /* 741 * TODO This should only be called once for the device during shutdown 742 */ 743 if (ark->rqpacing) 744 ark_rqp_dump(ark->rqpacing); 745 746 for (i = 0; i < dev->data->nb_tx_queues; i++) { 747 eth_ark_tx_queue_release(dev->data->tx_queues[i]); 748 dev->data->tx_queues[i] = 0; 749 } 750 751 for (i = 0; i < dev->data->nb_rx_queues; i++) { 752 eth_ark_dev_rx_queue_release(dev->data->rx_queues[i]); 753 dev->data->rx_queues[i] = 0; 754 } 755 756 return 0; 757 } 758 759 static int 760 eth_ark_dev_info_get(struct rte_eth_dev *dev, 761 struct rte_eth_dev_info *dev_info) 762 { 763 struct ark_adapter *ark = dev->data->dev_private; 764 struct ark_mpu_t *tx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_TX_BASE); 765 struct ark_mpu_t *rx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_RX_BASE); 766 uint16_t ports = ark->num_ports; 767 768 dev_info->max_rx_pktlen = ARK_RX_MAX_PKT_LEN; 769 dev_info->min_rx_bufsize = ARK_RX_MIN_BUFSIZE; 770 771 dev_info->max_rx_queues = ark_api_num_queues_per_port(rx_mpu, ports); 772 dev_info->max_tx_queues = ark_api_num_queues_per_port(tx_mpu, ports); 773 774 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 775 .nb_max = ARK_RX_MAX_QUEUE, 776 .nb_min = ARK_RX_MIN_QUEUE, 777 .nb_align = ARK_RX_MIN_QUEUE}; /* power of 2 */ 778 779 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 780 .nb_max = ARK_TX_MAX_QUEUE, 781 .nb_min = ARK_TX_MIN_QUEUE, 782 .nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */ 783 784 /* ARK PMD supports all line rates, how do we indicate that here ?? */ 785 dev_info->speed_capa = (RTE_ETH_LINK_SPEED_1G | 786 RTE_ETH_LINK_SPEED_10G | 787 RTE_ETH_LINK_SPEED_25G | 788 RTE_ETH_LINK_SPEED_40G | 789 RTE_ETH_LINK_SPEED_50G | 790 RTE_ETH_LINK_SPEED_100G); 791 792 dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_TIMESTAMP; 793 794 return 0; 795 } 796 797 static int 798 eth_ark_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 799 { 800 ARK_PMD_LOG(DEBUG, "link status = %d\n", 801 dev->data->dev_link.link_status); 802 struct ark_adapter *ark = dev->data->dev_private; 803 804 if (ark->user_ext.link_update) { 805 return ark->user_ext.link_update 806 (dev, wait_to_complete, 807 ark->user_data[dev->data->port_id]); 808 } 809 return 0; 810 } 811 812 static int 813 eth_ark_dev_set_link_up(struct rte_eth_dev *dev) 814 { 815 dev->data->dev_link.link_status = 1; 816 struct ark_adapter *ark = dev->data->dev_private; 817 818 if (ark->user_ext.dev_set_link_up) 819 return ark->user_ext.dev_set_link_up(dev, 820 ark->user_data[dev->data->port_id]); 821 return 0; 822 } 823 824 static int 825 eth_ark_dev_set_link_down(struct rte_eth_dev *dev) 826 { 827 dev->data->dev_link.link_status = 0; 828 struct ark_adapter *ark = dev->data->dev_private; 829 830 if (ark->user_ext.dev_set_link_down) 831 return ark->user_ext.dev_set_link_down(dev, 832 ark->user_data[dev->data->port_id]); 833 return 0; 834 } 835 836 static int 837 eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 838 { 839 uint16_t i; 840 struct ark_adapter *ark = dev->data->dev_private; 841 842 stats->ipackets = 0; 843 stats->ibytes = 0; 844 stats->opackets = 0; 845 stats->obytes = 0; 846 stats->imissed = 0; 847 stats->oerrors = 0; 848 849 for (i = 0; i < dev->data->nb_tx_queues; i++) 850 eth_tx_queue_stats_get(dev->data->tx_queues[i], stats); 851 for (i = 0; i < dev->data->nb_rx_queues; i++) 852 eth_rx_queue_stats_get(dev->data->rx_queues[i], stats); 853 if (ark->user_ext.stats_get) 854 return ark->user_ext.stats_get(dev, stats, 855 ark->user_data[dev->data->port_id]); 856 return 0; 857 } 858 859 static int 860 eth_ark_dev_stats_reset(struct rte_eth_dev *dev) 861 { 862 uint16_t i; 863 struct ark_adapter *ark = dev->data->dev_private; 864 865 for (i = 0; i < dev->data->nb_tx_queues; i++) 866 eth_tx_queue_stats_reset(dev->data->tx_queues[i]); 867 for (i = 0; i < dev->data->nb_rx_queues; i++) 868 eth_rx_queue_stats_reset(dev->data->rx_queues[i]); 869 if (ark->user_ext.stats_reset) 870 ark->user_ext.stats_reset(dev, 871 ark->user_data[dev->data->port_id]); 872 873 return 0; 874 } 875 876 static int 877 eth_ark_macaddr_add(struct rte_eth_dev *dev, 878 struct rte_ether_addr *mac_addr, 879 uint32_t index, 880 uint32_t pool) 881 { 882 struct ark_adapter *ark = dev->data->dev_private; 883 884 if (ark->user_ext.mac_addr_add) { 885 ark->user_ext.mac_addr_add(dev, 886 mac_addr, 887 index, 888 pool, 889 ark->user_data[dev->data->port_id]); 890 return 0; 891 } 892 return -ENOTSUP; 893 } 894 895 static void 896 eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) 897 { 898 struct ark_adapter *ark = dev->data->dev_private; 899 900 if (ark->user_ext.mac_addr_remove) 901 ark->user_ext.mac_addr_remove(dev, index, 902 ark->user_data[dev->data->port_id]); 903 } 904 905 static int 906 eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, 907 struct rte_ether_addr *mac_addr) 908 { 909 struct ark_adapter *ark = dev->data->dev_private; 910 911 if (ark->user_ext.mac_addr_set) { 912 ark->user_ext.mac_addr_set(dev, mac_addr, 913 ark->user_data[dev->data->port_id]); 914 return 0; 915 } 916 return -ENOTSUP; 917 } 918 919 static int 920 eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size) 921 { 922 struct ark_adapter *ark = dev->data->dev_private; 923 924 if (ark->user_ext.set_mtu) 925 return ark->user_ext.set_mtu(dev, size, 926 ark->user_data[dev->data->port_id]); 927 928 return -ENOTSUP; 929 } 930 931 static inline int 932 process_pktdir_arg(const char *key, const char *value, 933 void *extra_args) 934 { 935 ARK_PMD_LOG(DEBUG, "key = %s, value = %s\n", 936 key, value); 937 struct ark_adapter *ark = 938 (struct ark_adapter *)extra_args; 939 940 ark->pkt_dir_v = strtol(value, NULL, 16); 941 ARK_PMD_LOG(DEBUG, "pkt_dir_v = 0x%x\n", ark->pkt_dir_v); 942 return 0; 943 } 944 945 static inline int 946 process_file_args(const char *key, const char *value, void *extra_args) 947 { 948 ARK_PMD_LOG(DEBUG, "key = %s, value = %s\n", 949 key, value); 950 char *args = (char *)extra_args; 951 952 /* Open the configuration file */ 953 FILE *file = fopen(value, "r"); 954 char line[ARK_MAX_ARG_LEN]; 955 int size = 0; 956 int first = 1; 957 958 if (file == NULL) { 959 ARK_PMD_LOG(ERR, "Unable to open " 960 "config file %s\n", value); 961 return -1; 962 } 963 964 while (fgets(line, sizeof(line), file)) { 965 size += strlen(line); 966 if (size >= ARK_MAX_ARG_LEN) { 967 ARK_PMD_LOG(ERR, "Unable to parse file %s args, " 968 "parameter list is too long\n", value); 969 fclose(file); 970 return -1; 971 } 972 if (first) { 973 strncpy(args, line, ARK_MAX_ARG_LEN); 974 first = 0; 975 } else { 976 strncat(args, line, ARK_MAX_ARG_LEN); 977 } 978 } 979 ARK_PMD_LOG(DEBUG, "file = %s\n", args); 980 fclose(file); 981 return 0; 982 } 983 984 static int 985 eth_ark_check_args(struct ark_adapter *ark, const char *params) 986 { 987 struct rte_kvargs *kvlist; 988 unsigned int k_idx; 989 struct rte_kvargs_pair *pair = NULL; 990 int ret = -1; 991 992 kvlist = rte_kvargs_parse(params, valid_arguments); 993 if (kvlist == NULL) 994 return 0; 995 996 ark->pkt_gen_args[0] = 0; 997 ark->pkt_chkr_args[0] = 0; 998 999 for (k_idx = 0; k_idx < kvlist->count; k_idx++) { 1000 pair = &kvlist->pairs[k_idx]; 1001 ARK_PMD_LOG(DEBUG, "**** Arg passed to PMD = %s:%s\n", 1002 pair->key, 1003 pair->value); 1004 } 1005 1006 if (rte_kvargs_process(kvlist, 1007 ARK_PKTDIR_ARG, 1008 &process_pktdir_arg, 1009 ark) != 0) { 1010 ARK_PMD_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTDIR_ARG); 1011 goto free_kvlist; 1012 } 1013 1014 if (rte_kvargs_process(kvlist, 1015 ARK_PKTGEN_ARG, 1016 &process_file_args, 1017 ark->pkt_gen_args) != 0) { 1018 ARK_PMD_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTGEN_ARG); 1019 goto free_kvlist; 1020 } 1021 1022 if (rte_kvargs_process(kvlist, 1023 ARK_PKTCHKR_ARG, 1024 &process_file_args, 1025 ark->pkt_chkr_args) != 0) { 1026 ARK_PMD_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTCHKR_ARG); 1027 goto free_kvlist; 1028 } 1029 1030 ARK_PMD_LOG(INFO, "packet director set to 0x%x\n", ark->pkt_dir_v); 1031 /* Setup the packet director */ 1032 ark_pktdir_setup(ark->pd, ark->pkt_dir_v); 1033 1034 /* Setup the packet generator */ 1035 if (ark->pkt_gen_args[0]) { 1036 ARK_PMD_LOG(DEBUG, "Setting up the packet generator\n"); 1037 ark_pktgen_parse(ark->pkt_gen_args); 1038 ark_pktgen_reset(ark->pg); 1039 ark_pktgen_setup(ark->pg); 1040 ark->start_pg = 1; 1041 } 1042 1043 /* Setup the packet checker */ 1044 if (ark->pkt_chkr_args[0]) { 1045 ark_pktchkr_parse(ark->pkt_chkr_args); 1046 ark_pktchkr_setup(ark->pc); 1047 } 1048 1049 ret = 0; 1050 1051 free_kvlist: 1052 rte_kvargs_free(kvlist); 1053 1054 return ret; 1055 } 1056 1057 RTE_PMD_REGISTER_PCI(net_ark, rte_ark_pmd); 1058 RTE_PMD_REGISTER_KMOD_DEP(net_ark, "* igb_uio | uio_pci_generic "); 1059 RTE_PMD_REGISTER_PCI_TABLE(net_ark, pci_id_ark_map); 1060 RTE_PMD_REGISTER_PARAM_STRING(net_ark, 1061 ARK_PKTGEN_ARG "=<filename> " 1062 ARK_PKTCHKR_ARG "=<filename> " 1063 ARK_PKTDIR_ARG "=<bitmap>"); 1064 RTE_LOG_REGISTER_DEFAULT(ark_logtype, NOTICE); 1065