1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2015-2018 Atomic Rules LLC 3 */ 4 5 #include <unistd.h> 6 #include <sys/stat.h> 7 #include <dlfcn.h> 8 9 #include <rte_bus_pci.h> 10 #include <rte_ethdev_pci.h> 11 #include <rte_kvargs.h> 12 13 #include "rte_pmd_ark.h" 14 #include "ark_global.h" 15 #include "ark_logs.h" 16 #include "ark_ethdev_tx.h" 17 #include "ark_ethdev_rx.h" 18 #include "ark_mpu.h" 19 #include "ark_ddm.h" 20 #include "ark_udm.h" 21 #include "ark_rqp.h" 22 #include "ark_pktdir.h" 23 #include "ark_pktgen.h" 24 #include "ark_pktchkr.h" 25 26 /* Internal prototypes */ 27 static int eth_ark_check_args(struct ark_adapter *ark, const char *params); 28 static int eth_ark_dev_init(struct rte_eth_dev *dev); 29 static int ark_config_device(struct rte_eth_dev *dev); 30 static int eth_ark_dev_uninit(struct rte_eth_dev *eth_dev); 31 static int eth_ark_dev_configure(struct rte_eth_dev *dev); 32 static int eth_ark_dev_start(struct rte_eth_dev *dev); 33 static int eth_ark_dev_stop(struct rte_eth_dev *dev); 34 static int eth_ark_dev_close(struct rte_eth_dev *dev); 35 static int eth_ark_dev_info_get(struct rte_eth_dev *dev, 36 struct rte_eth_dev_info *dev_info); 37 static int eth_ark_dev_link_update(struct rte_eth_dev *dev, 38 int wait_to_complete); 39 static int eth_ark_dev_set_link_up(struct rte_eth_dev *dev); 40 static int eth_ark_dev_set_link_down(struct rte_eth_dev *dev); 41 static int eth_ark_dev_stats_get(struct rte_eth_dev *dev, 42 struct rte_eth_stats *stats); 43 static int eth_ark_dev_stats_reset(struct rte_eth_dev *dev); 44 static int eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, 45 struct rte_ether_addr *mac_addr); 46 static int eth_ark_macaddr_add(struct rte_eth_dev *dev, 47 struct rte_ether_addr *mac_addr, 48 uint32_t index, 49 uint32_t pool); 50 static void eth_ark_macaddr_remove(struct rte_eth_dev *dev, 51 uint32_t index); 52 static int eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size); 53 54 /* 55 * The packet generator is a functional block used to generate packet 56 * patterns for testing. It is not intended for nominal use. 57 */ 58 #define ARK_PKTGEN_ARG "Pkt_gen" 59 60 /* 61 * The packet checker is a functional block used to verify packet 62 * patterns for testing. It is not intended for nominal use. 63 */ 64 #define ARK_PKTCHKR_ARG "Pkt_chkr" 65 66 /* 67 * The packet director is used to select the internal ingress and 68 * egress packets paths during testing. It is not intended for 69 * nominal use. 70 */ 71 #define ARK_PKTDIR_ARG "Pkt_dir" 72 73 /* Devinfo configurations */ 74 #define ARK_RX_MAX_QUEUE (4096 * 4) 75 #define ARK_RX_MIN_QUEUE (512) 76 #define ARK_RX_MAX_PKT_LEN ((16 * 1024) - 128) 77 #define ARK_RX_MIN_BUFSIZE (1024) 78 79 #define ARK_TX_MAX_QUEUE (4096 * 4) 80 #define ARK_TX_MIN_QUEUE (256) 81 82 int rte_pmd_ark_rx_userdata_dynfield_offset = -1; 83 int rte_pmd_ark_tx_userdata_dynfield_offset = -1; 84 85 static const char * const valid_arguments[] = { 86 ARK_PKTGEN_ARG, 87 ARK_PKTCHKR_ARG, 88 ARK_PKTDIR_ARG, 89 NULL 90 }; 91 92 static const struct rte_pci_id pci_id_ark_map[] = { 93 {RTE_PCI_DEVICE(0x1d6c, 0x100d)}, 94 {RTE_PCI_DEVICE(0x1d6c, 0x100e)}, 95 {.vendor_id = 0, /* sentinel */ }, 96 }; 97 98 static int 99 eth_ark_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 100 struct rte_pci_device *pci_dev) 101 { 102 struct rte_eth_dev *eth_dev; 103 int ret; 104 105 eth_dev = rte_eth_dev_pci_allocate(pci_dev, sizeof(struct ark_adapter)); 106 107 if (eth_dev == NULL) 108 return -ENOMEM; 109 110 ret = eth_ark_dev_init(eth_dev); 111 if (ret) 112 rte_eth_dev_release_port(eth_dev); 113 114 return ret; 115 } 116 117 static int 118 eth_ark_pci_remove(struct rte_pci_device *pci_dev) 119 { 120 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ark_dev_uninit); 121 } 122 123 static struct rte_pci_driver rte_ark_pmd = { 124 .id_table = pci_id_ark_map, 125 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 126 .probe = eth_ark_pci_probe, 127 .remove = eth_ark_pci_remove, 128 }; 129 130 static const struct eth_dev_ops ark_eth_dev_ops = { 131 .dev_configure = eth_ark_dev_configure, 132 .dev_start = eth_ark_dev_start, 133 .dev_stop = eth_ark_dev_stop, 134 .dev_close = eth_ark_dev_close, 135 136 .dev_infos_get = eth_ark_dev_info_get, 137 138 .rx_queue_setup = eth_ark_dev_rx_queue_setup, 139 .tx_queue_setup = eth_ark_tx_queue_setup, 140 141 .link_update = eth_ark_dev_link_update, 142 .dev_set_link_up = eth_ark_dev_set_link_up, 143 .dev_set_link_down = eth_ark_dev_set_link_down, 144 145 .rx_queue_start = eth_ark_rx_start_queue, 146 .rx_queue_stop = eth_ark_rx_stop_queue, 147 148 .tx_queue_start = eth_ark_tx_queue_start, 149 .tx_queue_stop = eth_ark_tx_queue_stop, 150 151 .stats_get = eth_ark_dev_stats_get, 152 .stats_reset = eth_ark_dev_stats_reset, 153 154 .mac_addr_add = eth_ark_macaddr_add, 155 .mac_addr_remove = eth_ark_macaddr_remove, 156 .mac_addr_set = eth_ark_set_default_mac_addr, 157 158 .mtu_set = eth_ark_set_mtu, 159 }; 160 161 static int 162 check_for_ext(struct ark_adapter *ark) 163 { 164 int found = 0; 165 166 /* Get the env */ 167 const char *dllpath = getenv("ARK_EXT_PATH"); 168 169 if (dllpath == NULL) { 170 ARK_PMD_LOG(DEBUG, "EXT NO dll path specified\n"); 171 return 0; 172 } 173 ARK_PMD_LOG(NOTICE, "EXT found dll path at %s\n", dllpath); 174 175 /* Open and load the .so */ 176 ark->d_handle = dlopen(dllpath, RTLD_LOCAL | RTLD_LAZY); 177 if (ark->d_handle == NULL) { 178 ARK_PMD_LOG(ERR, "Could not load user extension %s\n", 179 dllpath); 180 return -1; 181 } 182 ARK_PMD_LOG(DEBUG, "SUCCESS: loaded user extension %s\n", 183 dllpath); 184 185 /* Get the entry points */ 186 ark->user_ext.dev_init = 187 (void *(*)(struct rte_eth_dev *, void *, int)) 188 dlsym(ark->d_handle, "dev_init"); 189 ARK_PMD_LOG(DEBUG, "device ext init pointer = %p\n", 190 ark->user_ext.dev_init); 191 ark->user_ext.dev_get_port_count = 192 (int (*)(struct rte_eth_dev *, void *)) 193 dlsym(ark->d_handle, "dev_get_port_count"); 194 ark->user_ext.dev_uninit = 195 (void (*)(struct rte_eth_dev *, void *)) 196 dlsym(ark->d_handle, "dev_uninit"); 197 ark->user_ext.dev_configure = 198 (int (*)(struct rte_eth_dev *, void *)) 199 dlsym(ark->d_handle, "dev_configure"); 200 ark->user_ext.dev_start = 201 (int (*)(struct rte_eth_dev *, void *)) 202 dlsym(ark->d_handle, "dev_start"); 203 ark->user_ext.dev_stop = 204 (void (*)(struct rte_eth_dev *, void *)) 205 dlsym(ark->d_handle, "dev_stop"); 206 ark->user_ext.dev_close = 207 (void (*)(struct rte_eth_dev *, void *)) 208 dlsym(ark->d_handle, "dev_close"); 209 ark->user_ext.link_update = 210 (int (*)(struct rte_eth_dev *, int, void *)) 211 dlsym(ark->d_handle, "link_update"); 212 ark->user_ext.dev_set_link_up = 213 (int (*)(struct rte_eth_dev *, void *)) 214 dlsym(ark->d_handle, "dev_set_link_up"); 215 ark->user_ext.dev_set_link_down = 216 (int (*)(struct rte_eth_dev *, void *)) 217 dlsym(ark->d_handle, "dev_set_link_down"); 218 ark->user_ext.stats_get = 219 (int (*)(struct rte_eth_dev *, struct rte_eth_stats *, 220 void *)) 221 dlsym(ark->d_handle, "stats_get"); 222 ark->user_ext.stats_reset = 223 (void (*)(struct rte_eth_dev *, void *)) 224 dlsym(ark->d_handle, "stats_reset"); 225 ark->user_ext.mac_addr_add = 226 (void (*)(struct rte_eth_dev *, struct rte_ether_addr *, 227 uint32_t, uint32_t, void *)) 228 dlsym(ark->d_handle, "mac_addr_add"); 229 ark->user_ext.mac_addr_remove = 230 (void (*)(struct rte_eth_dev *, uint32_t, void *)) 231 dlsym(ark->d_handle, "mac_addr_remove"); 232 ark->user_ext.mac_addr_set = 233 (void (*)(struct rte_eth_dev *, struct rte_ether_addr *, 234 void *)) 235 dlsym(ark->d_handle, "mac_addr_set"); 236 ark->user_ext.set_mtu = 237 (int (*)(struct rte_eth_dev *, uint16_t, 238 void *)) 239 dlsym(ark->d_handle, "set_mtu"); 240 241 return found; 242 } 243 244 static int 245 eth_ark_dev_init(struct rte_eth_dev *dev) 246 { 247 struct ark_adapter *ark = dev->data->dev_private; 248 struct rte_pci_device *pci_dev; 249 int ret; 250 int port_count = 1; 251 int p; 252 static const struct rte_mbuf_dynfield ark_tx_userdata_dynfield_desc = { 253 .name = RTE_PMD_ARK_TX_USERDATA_DYNFIELD_NAME, 254 .size = sizeof(rte_pmd_ark_tx_userdata_t), 255 .align = __alignof__(rte_pmd_ark_tx_userdata_t), 256 }; 257 static const struct rte_mbuf_dynfield ark_rx_userdata_dynfield_desc = { 258 .name = RTE_PMD_ARK_RX_USERDATA_DYNFIELD_NAME, 259 .size = sizeof(rte_pmd_ark_rx_userdata_t), 260 .align = __alignof__(rte_pmd_ark_rx_userdata_t), 261 }; 262 263 ark->eth_dev = dev; 264 265 ARK_PMD_LOG(DEBUG, "\n"); 266 267 /* Check to see if there is an extension that we need to load */ 268 ret = check_for_ext(ark); 269 if (ret) 270 return ret; 271 272 /* Extra mbuf fields for user data */ 273 if (RTE_PMD_ARK_TX_USERDATA_ENABLE) { 274 rte_pmd_ark_tx_userdata_dynfield_offset = 275 rte_mbuf_dynfield_register(&ark_tx_userdata_dynfield_desc); 276 if (rte_pmd_ark_tx_userdata_dynfield_offset < 0) { 277 ARK_PMD_LOG(ERR, 278 "Failed to register mbuf field for tx userdata\n"); 279 return -rte_errno; 280 } 281 ARK_PMD_LOG(INFO, "Registered TX-meta dynamic field at %d\n", 282 rte_pmd_ark_tx_userdata_dynfield_offset); 283 } 284 if (RTE_PMD_ARK_RX_USERDATA_ENABLE) { 285 rte_pmd_ark_rx_userdata_dynfield_offset = 286 rte_mbuf_dynfield_register(&ark_rx_userdata_dynfield_desc); 287 if (rte_pmd_ark_rx_userdata_dynfield_offset < 0) { 288 ARK_PMD_LOG(ERR, 289 "Failed to register mbuf field for rx userdata\n"); 290 return -rte_errno; 291 } 292 ARK_PMD_LOG(INFO, "Registered RX-meta dynamic field at %d\n", 293 rte_pmd_ark_rx_userdata_dynfield_offset); 294 } 295 296 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 297 rte_eth_copy_pci_info(dev, pci_dev); 298 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 299 300 /* Use dummy function until setup */ 301 dev->rx_pkt_burst = ð_ark_recv_pkts_noop; 302 dev->tx_pkt_burst = ð_ark_xmit_pkts_noop; 303 304 ark->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr; 305 ark->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr; 306 307 ark->sysctrl.v = (void *)&ark->bar0[ARK_SYSCTRL_BASE]; 308 ark->mpurx.v = (void *)&ark->bar0[ARK_MPU_RX_BASE]; 309 ark->udm.v = (void *)&ark->bar0[ARK_UDM_BASE]; 310 ark->mputx.v = (void *)&ark->bar0[ARK_MPU_TX_BASE]; 311 ark->ddm.v = (void *)&ark->bar0[ARK_DDM_BASE]; 312 ark->cmac.v = (void *)&ark->bar0[ARK_CMAC_BASE]; 313 ark->external.v = (void *)&ark->bar0[ARK_EXTERNAL_BASE]; 314 ark->pktdir.v = (void *)&ark->bar0[ARK_PKTDIR_BASE]; 315 ark->pktgen.v = (void *)&ark->bar0[ARK_PKTGEN_BASE]; 316 ark->pktchkr.v = (void *)&ark->bar0[ARK_PKTCHKR_BASE]; 317 318 ark->rqpacing = 319 (struct ark_rqpace_t *)(ark->bar0 + ARK_RCPACING_BASE); 320 ark->started = 0; 321 322 ARK_PMD_LOG(INFO, "Sys Ctrl Const = 0x%x HW Commit_ID: %08x\n", 323 ark->sysctrl.t32[4], 324 rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4])); 325 ARK_PMD_LOG(NOTICE, "Arkville HW Commit_ID: %08x\n", 326 rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4])); 327 328 /* If HW sanity test fails, return an error */ 329 if (ark->sysctrl.t32[4] != 0xcafef00d) { 330 ARK_PMD_LOG(ERR, 331 "HW Sanity test has failed, expected constant" 332 " 0x%x, read 0x%x (%s)\n", 333 0xcafef00d, 334 ark->sysctrl.t32[4], __func__); 335 return -1; 336 } 337 if (ark->sysctrl.t32[3] != 0) { 338 if (ark_rqp_lasped(ark->rqpacing)) { 339 ARK_PMD_LOG(ERR, "Arkville Evaluation System - " 340 "Timer has Expired\n"); 341 return -1; 342 } 343 ARK_PMD_LOG(WARNING, "Arkville Evaluation System - " 344 "Timer is Running\n"); 345 } 346 347 ARK_PMD_LOG(DEBUG, 348 "HW Sanity test has PASSED, expected constant" 349 " 0x%x, read 0x%x (%s)\n", 350 0xcafef00d, ark->sysctrl.t32[4], __func__); 351 352 /* We are a single function multi-port device. */ 353 ret = ark_config_device(dev); 354 if (ret) 355 return -1; 356 357 dev->dev_ops = &ark_eth_dev_ops; 358 dev->rx_queue_count = eth_ark_dev_rx_queue_count; 359 360 dev->data->mac_addrs = rte_zmalloc("ark", RTE_ETHER_ADDR_LEN, 0); 361 if (!dev->data->mac_addrs) { 362 ARK_PMD_LOG(ERR, 363 "Failed to allocated memory for storing mac address" 364 ); 365 } 366 367 if (ark->user_ext.dev_init) { 368 ark->user_data[dev->data->port_id] = 369 ark->user_ext.dev_init(dev, ark->a_bar, 0); 370 if (!ark->user_data[dev->data->port_id]) { 371 ARK_PMD_LOG(WARNING, 372 "Failed to initialize PMD extension!" 373 " continuing without it\n"); 374 memset(&ark->user_ext, 0, sizeof(struct ark_user_ext)); 375 dlclose(ark->d_handle); 376 } 377 } 378 379 if (pci_dev->device.devargs) 380 ret = eth_ark_check_args(ark, pci_dev->device.devargs->args); 381 else 382 ARK_PMD_LOG(INFO, "No Device args found\n"); 383 384 if (ret) 385 goto error; 386 /* 387 * We will create additional devices based on the number of requested 388 * ports 389 */ 390 if (ark->user_ext.dev_get_port_count) 391 port_count = 392 ark->user_ext.dev_get_port_count(dev, 393 ark->user_data[dev->data->port_id]); 394 ark->num_ports = port_count; 395 396 for (p = 0; p < port_count; p++) { 397 struct rte_eth_dev *eth_dev; 398 char name[RTE_ETH_NAME_MAX_LEN]; 399 400 snprintf(name, sizeof(name), "arketh%d", 401 dev->data->port_id + p); 402 403 if (p == 0) { 404 /* First port is already allocated by DPDK */ 405 eth_dev = ark->eth_dev; 406 rte_eth_dev_probing_finish(eth_dev); 407 continue; 408 } 409 410 /* reserve an ethdev entry */ 411 eth_dev = rte_eth_dev_allocate(name); 412 if (!eth_dev) { 413 ARK_PMD_LOG(ERR, 414 "Could not allocate eth_dev for port %d\n", 415 p); 416 goto error; 417 } 418 419 eth_dev->device = &pci_dev->device; 420 eth_dev->data->dev_private = ark; 421 eth_dev->dev_ops = ark->eth_dev->dev_ops; 422 eth_dev->tx_pkt_burst = ark->eth_dev->tx_pkt_burst; 423 eth_dev->rx_pkt_burst = ark->eth_dev->rx_pkt_burst; 424 425 rte_eth_copy_pci_info(eth_dev, pci_dev); 426 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 427 428 eth_dev->data->mac_addrs = rte_zmalloc(name, 429 RTE_ETHER_ADDR_LEN, 0); 430 if (!eth_dev->data->mac_addrs) { 431 ARK_PMD_LOG(ERR, 432 "Memory allocation for MAC failed!" 433 " Exiting.\n"); 434 goto error; 435 } 436 437 if (ark->user_ext.dev_init) { 438 ark->user_data[eth_dev->data->port_id] = 439 ark->user_ext.dev_init(dev, ark->a_bar, p); 440 } 441 442 rte_eth_dev_probing_finish(eth_dev); 443 } 444 445 return ret; 446 447 error: 448 rte_free(dev->data->mac_addrs); 449 dev->data->mac_addrs = NULL; 450 return -1; 451 } 452 453 /* 454 *Initial device configuration when device is opened 455 * setup the DDM, and UDM 456 * Called once per PCIE device 457 */ 458 static int 459 ark_config_device(struct rte_eth_dev *dev) 460 { 461 struct ark_adapter *ark = dev->data->dev_private; 462 uint16_t num_q, i; 463 struct ark_mpu_t *mpu; 464 465 /* 466 * Make sure that the packet director, generator and checker are in a 467 * known state 468 */ 469 ark->start_pg = 0; 470 ark->pg = ark_pktgen_init(ark->pktgen.v, 0, 1); 471 if (ark->pg == NULL) 472 return -1; 473 ark_pktgen_reset(ark->pg); 474 ark->pc = ark_pktchkr_init(ark->pktchkr.v, 0, 1); 475 if (ark->pc == NULL) 476 return -1; 477 ark_pktchkr_stop(ark->pc); 478 ark->pd = ark_pktdir_init(ark->pktdir.v); 479 if (ark->pd == NULL) 480 return -1; 481 482 /* Verify HW */ 483 if (ark_udm_verify(ark->udm.v)) 484 return -1; 485 if (ark_ddm_verify(ark->ddm.v)) 486 return -1; 487 488 /* UDM */ 489 if (ark_udm_reset(ark->udm.v)) { 490 ARK_PMD_LOG(ERR, "Unable to stop and reset UDM\n"); 491 return -1; 492 } 493 /* Keep in reset until the MPU are cleared */ 494 495 /* MPU reset */ 496 mpu = ark->mpurx.v; 497 num_q = ark_api_num_queues(mpu); 498 ark->rx_queues = num_q; 499 for (i = 0; i < num_q; i++) { 500 ark_mpu_reset(mpu); 501 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 502 } 503 504 ark_udm_stop(ark->udm.v, 0); 505 ark_udm_configure(ark->udm.v, 506 RTE_PKTMBUF_HEADROOM, 507 RTE_MBUF_DEFAULT_DATAROOM, 508 ARK_RX_WRITE_TIME_NS); 509 ark_udm_stats_reset(ark->udm.v); 510 ark_udm_stop(ark->udm.v, 0); 511 512 /* TX -- DDM */ 513 if (ark_ddm_stop(ark->ddm.v, 1)) 514 ARK_PMD_LOG(ERR, "Unable to stop DDM\n"); 515 516 mpu = ark->mputx.v; 517 num_q = ark_api_num_queues(mpu); 518 ark->tx_queues = num_q; 519 for (i = 0; i < num_q; i++) { 520 ark_mpu_reset(mpu); 521 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 522 } 523 524 ark_ddm_reset(ark->ddm.v); 525 ark_ddm_stats_reset(ark->ddm.v); 526 527 ark_ddm_stop(ark->ddm.v, 0); 528 ark_rqp_stats_reset(ark->rqpacing); 529 530 return 0; 531 } 532 533 static int 534 eth_ark_dev_uninit(struct rte_eth_dev *dev) 535 { 536 struct ark_adapter *ark = dev->data->dev_private; 537 538 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 539 return 0; 540 541 if (ark->user_ext.dev_uninit) 542 ark->user_ext.dev_uninit(dev, 543 ark->user_data[dev->data->port_id]); 544 545 ark_pktgen_uninit(ark->pg); 546 ark_pktchkr_uninit(ark->pc); 547 548 return 0; 549 } 550 551 static int 552 eth_ark_dev_configure(struct rte_eth_dev *dev) 553 { 554 struct ark_adapter *ark = dev->data->dev_private; 555 556 eth_ark_dev_set_link_up(dev); 557 if (ark->user_ext.dev_configure) 558 return ark->user_ext.dev_configure(dev, 559 ark->user_data[dev->data->port_id]); 560 return 0; 561 } 562 563 static void * 564 delay_pg_start(void *arg) 565 { 566 struct ark_adapter *ark = (struct ark_adapter *)arg; 567 568 /* This function is used exclusively for regression testing, We 569 * perform a blind sleep here to ensure that the external test 570 * application has time to setup the test before we generate packets 571 */ 572 usleep(100000); 573 ark_pktgen_run(ark->pg); 574 return NULL; 575 } 576 577 static int 578 eth_ark_dev_start(struct rte_eth_dev *dev) 579 { 580 struct ark_adapter *ark = dev->data->dev_private; 581 int i; 582 583 /* RX Side */ 584 /* start UDM */ 585 ark_udm_start(ark->udm.v); 586 587 for (i = 0; i < dev->data->nb_rx_queues; i++) 588 eth_ark_rx_start_queue(dev, i); 589 590 /* TX Side */ 591 for (i = 0; i < dev->data->nb_tx_queues; i++) 592 eth_ark_tx_queue_start(dev, i); 593 594 /* start DDM */ 595 ark_ddm_start(ark->ddm.v); 596 597 ark->started = 1; 598 /* set xmit and receive function */ 599 dev->rx_pkt_burst = ð_ark_recv_pkts; 600 dev->tx_pkt_burst = ð_ark_xmit_pkts; 601 602 if (ark->start_pg) 603 ark_pktchkr_run(ark->pc); 604 605 if (ark->start_pg && (dev->data->port_id == 0)) { 606 pthread_t thread; 607 608 /* Delay packet generatpr start allow the hardware to be ready 609 * This is only used for sanity checking with internal generator 610 */ 611 if (pthread_create(&thread, NULL, delay_pg_start, ark)) { 612 ARK_PMD_LOG(ERR, "Could not create pktgen " 613 "starter thread\n"); 614 return -1; 615 } 616 } 617 618 if (ark->user_ext.dev_start) 619 ark->user_ext.dev_start(dev, 620 ark->user_data[dev->data->port_id]); 621 622 return 0; 623 } 624 625 static int 626 eth_ark_dev_stop(struct rte_eth_dev *dev) 627 { 628 uint16_t i; 629 int status; 630 struct ark_adapter *ark = dev->data->dev_private; 631 struct ark_mpu_t *mpu; 632 633 if (ark->started == 0) 634 return 0; 635 ark->started = 0; 636 dev->data->dev_started = 0; 637 638 /* Stop the extension first */ 639 if (ark->user_ext.dev_stop) 640 ark->user_ext.dev_stop(dev, 641 ark->user_data[dev->data->port_id]); 642 643 /* Stop the packet generator */ 644 if (ark->start_pg) 645 ark_pktgen_pause(ark->pg); 646 647 dev->rx_pkt_burst = ð_ark_recv_pkts_noop; 648 dev->tx_pkt_burst = ð_ark_xmit_pkts_noop; 649 650 /* STOP TX Side */ 651 for (i = 0; i < dev->data->nb_tx_queues; i++) { 652 status = eth_ark_tx_queue_stop(dev, i); 653 if (status != 0) { 654 uint16_t port = dev->data->port_id; 655 ARK_PMD_LOG(ERR, 656 "tx_queue stop anomaly" 657 " port %u, queue %u\n", 658 port, i); 659 } 660 } 661 662 /* Stop DDM */ 663 /* Wait up to 0.1 second. each stop is up to 1000 * 10 useconds */ 664 for (i = 0; i < 10; i++) { 665 status = ark_ddm_stop(ark->ddm.v, 1); 666 if (status == 0) 667 break; 668 } 669 if (status || i != 0) { 670 ARK_PMD_LOG(ERR, "DDM stop anomaly. status:" 671 " %d iter: %u. (%s)\n", 672 status, 673 i, 674 __func__); 675 ark_ddm_dump(ark->ddm.v, "Stop anomaly"); 676 677 mpu = ark->mputx.v; 678 for (i = 0; i < ark->tx_queues; i++) { 679 ark_mpu_dump(mpu, "DDM failure dump", i); 680 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 681 } 682 } 683 684 /* STOP RX Side */ 685 /* Stop UDM multiple tries attempted */ 686 for (i = 0; i < 10; i++) { 687 status = ark_udm_stop(ark->udm.v, 1); 688 if (status == 0) 689 break; 690 } 691 if (status || i != 0) { 692 ARK_PMD_LOG(ERR, "UDM stop anomaly. status %d iter: %u. (%s)\n", 693 status, i, __func__); 694 ark_udm_dump(ark->udm.v, "Stop anomaly"); 695 696 mpu = ark->mpurx.v; 697 for (i = 0; i < ark->rx_queues; i++) { 698 ark_mpu_dump(mpu, "UDM Stop anomaly", i); 699 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 700 } 701 } 702 703 ark_udm_dump_stats(ark->udm.v, "Post stop"); 704 ark_udm_dump_perf(ark->udm.v, "Post stop"); 705 706 for (i = 0; i < dev->data->nb_rx_queues; i++) 707 eth_ark_rx_dump_queue(dev, i, __func__); 708 709 /* Stop the packet checker if it is running */ 710 if (ark->start_pg) { 711 ark_pktchkr_dump_stats(ark->pc); 712 ark_pktchkr_stop(ark->pc); 713 } 714 715 return 0; 716 } 717 718 static int 719 eth_ark_dev_close(struct rte_eth_dev *dev) 720 { 721 struct ark_adapter *ark = dev->data->dev_private; 722 uint16_t i; 723 724 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 725 return 0; 726 727 if (ark->user_ext.dev_close) 728 ark->user_ext.dev_close(dev, 729 ark->user_data[dev->data->port_id]); 730 731 eth_ark_dev_stop(dev); 732 eth_ark_udm_force_close(dev); 733 734 /* 735 * TODO This should only be called once for the device during shutdown 736 */ 737 ark_rqp_dump(ark->rqpacing); 738 739 for (i = 0; i < dev->data->nb_tx_queues; i++) { 740 eth_ark_tx_queue_release(dev->data->tx_queues[i]); 741 dev->data->tx_queues[i] = 0; 742 } 743 744 for (i = 0; i < dev->data->nb_rx_queues; i++) { 745 eth_ark_dev_rx_queue_release(dev->data->rx_queues[i]); 746 dev->data->rx_queues[i] = 0; 747 } 748 749 return 0; 750 } 751 752 static int 753 eth_ark_dev_info_get(struct rte_eth_dev *dev, 754 struct rte_eth_dev_info *dev_info) 755 { 756 struct ark_adapter *ark = dev->data->dev_private; 757 struct ark_mpu_t *tx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_TX_BASE); 758 struct ark_mpu_t *rx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_RX_BASE); 759 uint16_t ports = ark->num_ports; 760 761 dev_info->max_rx_pktlen = ARK_RX_MAX_PKT_LEN; 762 dev_info->min_rx_bufsize = ARK_RX_MIN_BUFSIZE; 763 764 dev_info->max_rx_queues = ark_api_num_queues_per_port(rx_mpu, ports); 765 dev_info->max_tx_queues = ark_api_num_queues_per_port(tx_mpu, ports); 766 767 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 768 .nb_max = ARK_RX_MAX_QUEUE, 769 .nb_min = ARK_RX_MIN_QUEUE, 770 .nb_align = ARK_RX_MIN_QUEUE}; /* power of 2 */ 771 772 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 773 .nb_max = ARK_TX_MAX_QUEUE, 774 .nb_min = ARK_TX_MIN_QUEUE, 775 .nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */ 776 777 /* ARK PMD supports all line rates, how do we indicate that here ?? */ 778 dev_info->speed_capa = (ETH_LINK_SPEED_1G | 779 ETH_LINK_SPEED_10G | 780 ETH_LINK_SPEED_25G | 781 ETH_LINK_SPEED_40G | 782 ETH_LINK_SPEED_50G | 783 ETH_LINK_SPEED_100G); 784 785 return 0; 786 } 787 788 static int 789 eth_ark_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 790 { 791 ARK_PMD_LOG(DEBUG, "link status = %d\n", 792 dev->data->dev_link.link_status); 793 struct ark_adapter *ark = dev->data->dev_private; 794 795 if (ark->user_ext.link_update) { 796 return ark->user_ext.link_update 797 (dev, wait_to_complete, 798 ark->user_data[dev->data->port_id]); 799 } 800 return 0; 801 } 802 803 static int 804 eth_ark_dev_set_link_up(struct rte_eth_dev *dev) 805 { 806 dev->data->dev_link.link_status = 1; 807 struct ark_adapter *ark = dev->data->dev_private; 808 809 if (ark->user_ext.dev_set_link_up) 810 return ark->user_ext.dev_set_link_up(dev, 811 ark->user_data[dev->data->port_id]); 812 return 0; 813 } 814 815 static int 816 eth_ark_dev_set_link_down(struct rte_eth_dev *dev) 817 { 818 dev->data->dev_link.link_status = 0; 819 struct ark_adapter *ark = dev->data->dev_private; 820 821 if (ark->user_ext.dev_set_link_down) 822 return ark->user_ext.dev_set_link_down(dev, 823 ark->user_data[dev->data->port_id]); 824 return 0; 825 } 826 827 static int 828 eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 829 { 830 uint16_t i; 831 struct ark_adapter *ark = dev->data->dev_private; 832 833 stats->ipackets = 0; 834 stats->ibytes = 0; 835 stats->opackets = 0; 836 stats->obytes = 0; 837 stats->imissed = 0; 838 stats->oerrors = 0; 839 840 for (i = 0; i < dev->data->nb_tx_queues; i++) 841 eth_tx_queue_stats_get(dev->data->tx_queues[i], stats); 842 for (i = 0; i < dev->data->nb_rx_queues; i++) 843 eth_rx_queue_stats_get(dev->data->rx_queues[i], stats); 844 if (ark->user_ext.stats_get) 845 return ark->user_ext.stats_get(dev, stats, 846 ark->user_data[dev->data->port_id]); 847 return 0; 848 } 849 850 static int 851 eth_ark_dev_stats_reset(struct rte_eth_dev *dev) 852 { 853 uint16_t i; 854 struct ark_adapter *ark = dev->data->dev_private; 855 856 for (i = 0; i < dev->data->nb_tx_queues; i++) 857 eth_tx_queue_stats_reset(dev->data->tx_queues[i]); 858 for (i = 0; i < dev->data->nb_rx_queues; i++) 859 eth_rx_queue_stats_reset(dev->data->rx_queues[i]); 860 if (ark->user_ext.stats_reset) 861 ark->user_ext.stats_reset(dev, 862 ark->user_data[dev->data->port_id]); 863 864 return 0; 865 } 866 867 static int 868 eth_ark_macaddr_add(struct rte_eth_dev *dev, 869 struct rte_ether_addr *mac_addr, 870 uint32_t index, 871 uint32_t pool) 872 { 873 struct ark_adapter *ark = dev->data->dev_private; 874 875 if (ark->user_ext.mac_addr_add) { 876 ark->user_ext.mac_addr_add(dev, 877 mac_addr, 878 index, 879 pool, 880 ark->user_data[dev->data->port_id]); 881 return 0; 882 } 883 return -ENOTSUP; 884 } 885 886 static void 887 eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) 888 { 889 struct ark_adapter *ark = dev->data->dev_private; 890 891 if (ark->user_ext.mac_addr_remove) 892 ark->user_ext.mac_addr_remove(dev, index, 893 ark->user_data[dev->data->port_id]); 894 } 895 896 static int 897 eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, 898 struct rte_ether_addr *mac_addr) 899 { 900 struct ark_adapter *ark = dev->data->dev_private; 901 902 if (ark->user_ext.mac_addr_set) { 903 ark->user_ext.mac_addr_set(dev, mac_addr, 904 ark->user_data[dev->data->port_id]); 905 return 0; 906 } 907 return -ENOTSUP; 908 } 909 910 static int 911 eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size) 912 { 913 struct ark_adapter *ark = dev->data->dev_private; 914 915 if (ark->user_ext.set_mtu) 916 return ark->user_ext.set_mtu(dev, size, 917 ark->user_data[dev->data->port_id]); 918 919 return -ENOTSUP; 920 } 921 922 static inline int 923 process_pktdir_arg(const char *key, const char *value, 924 void *extra_args) 925 { 926 ARK_PMD_LOG(DEBUG, "key = %s, value = %s\n", 927 key, value); 928 struct ark_adapter *ark = 929 (struct ark_adapter *)extra_args; 930 931 ark->pkt_dir_v = strtol(value, NULL, 16); 932 ARK_PMD_LOG(DEBUG, "pkt_dir_v = 0x%x\n", ark->pkt_dir_v); 933 return 0; 934 } 935 936 static inline int 937 process_file_args(const char *key, const char *value, void *extra_args) 938 { 939 ARK_PMD_LOG(DEBUG, "key = %s, value = %s\n", 940 key, value); 941 char *args = (char *)extra_args; 942 943 /* Open the configuration file */ 944 FILE *file = fopen(value, "r"); 945 char line[ARK_MAX_ARG_LEN]; 946 int size = 0; 947 int first = 1; 948 949 if (file == NULL) { 950 ARK_PMD_LOG(ERR, "Unable to open " 951 "config file %s\n", value); 952 return -1; 953 } 954 955 while (fgets(line, sizeof(line), file)) { 956 size += strlen(line); 957 if (size >= ARK_MAX_ARG_LEN) { 958 ARK_PMD_LOG(ERR, "Unable to parse file %s args, " 959 "parameter list is too long\n", value); 960 fclose(file); 961 return -1; 962 } 963 if (first) { 964 strncpy(args, line, ARK_MAX_ARG_LEN); 965 first = 0; 966 } else { 967 strncat(args, line, ARK_MAX_ARG_LEN); 968 } 969 } 970 ARK_PMD_LOG(DEBUG, "file = %s\n", args); 971 fclose(file); 972 return 0; 973 } 974 975 static int 976 eth_ark_check_args(struct ark_adapter *ark, const char *params) 977 { 978 struct rte_kvargs *kvlist; 979 unsigned int k_idx; 980 struct rte_kvargs_pair *pair = NULL; 981 int ret = -1; 982 983 kvlist = rte_kvargs_parse(params, valid_arguments); 984 if (kvlist == NULL) 985 return 0; 986 987 ark->pkt_gen_args[0] = 0; 988 ark->pkt_chkr_args[0] = 0; 989 990 for (k_idx = 0; k_idx < kvlist->count; k_idx++) { 991 pair = &kvlist->pairs[k_idx]; 992 ARK_PMD_LOG(DEBUG, "**** Arg passed to PMD = %s:%s\n", 993 pair->key, 994 pair->value); 995 } 996 997 if (rte_kvargs_process(kvlist, 998 ARK_PKTDIR_ARG, 999 &process_pktdir_arg, 1000 ark) != 0) { 1001 ARK_PMD_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTDIR_ARG); 1002 goto free_kvlist; 1003 } 1004 1005 if (rte_kvargs_process(kvlist, 1006 ARK_PKTGEN_ARG, 1007 &process_file_args, 1008 ark->pkt_gen_args) != 0) { 1009 ARK_PMD_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTGEN_ARG); 1010 goto free_kvlist; 1011 } 1012 1013 if (rte_kvargs_process(kvlist, 1014 ARK_PKTCHKR_ARG, 1015 &process_file_args, 1016 ark->pkt_chkr_args) != 0) { 1017 ARK_PMD_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTCHKR_ARG); 1018 goto free_kvlist; 1019 } 1020 1021 ARK_PMD_LOG(INFO, "packet director set to 0x%x\n", ark->pkt_dir_v); 1022 /* Setup the packet director */ 1023 ark_pktdir_setup(ark->pd, ark->pkt_dir_v); 1024 1025 /* Setup the packet generator */ 1026 if (ark->pkt_gen_args[0]) { 1027 ARK_PMD_LOG(DEBUG, "Setting up the packet generator\n"); 1028 ark_pktgen_parse(ark->pkt_gen_args); 1029 ark_pktgen_reset(ark->pg); 1030 ark_pktgen_setup(ark->pg); 1031 ark->start_pg = 1; 1032 } 1033 1034 /* Setup the packet checker */ 1035 if (ark->pkt_chkr_args[0]) { 1036 ark_pktchkr_parse(ark->pkt_chkr_args); 1037 ark_pktchkr_setup(ark->pc); 1038 } 1039 1040 ret = 0; 1041 1042 free_kvlist: 1043 rte_kvargs_free(kvlist); 1044 1045 return ret; 1046 } 1047 1048 RTE_PMD_REGISTER_PCI(net_ark, rte_ark_pmd); 1049 RTE_PMD_REGISTER_KMOD_DEP(net_ark, "* igb_uio | uio_pci_generic "); 1050 RTE_PMD_REGISTER_PCI_TABLE(net_ark, pci_id_ark_map); 1051 RTE_PMD_REGISTER_PARAM_STRING(net_ark, 1052 ARK_PKTGEN_ARG "=<filename> " 1053 ARK_PKTCHKR_ARG "=<filename> " 1054 ARK_PKTDIR_ARG "=<bitmap>"); 1055 RTE_LOG_REGISTER(ark_logtype, pmd.net.ark, NOTICE); 1056