1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2015-2017 Atomic Rules LLC 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of copyright holder nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <unistd.h> 35 #include <sys/stat.h> 36 #include <dlfcn.h> 37 38 #include <rte_ethdev_pci.h> 39 #include <rte_kvargs.h> 40 41 #include "ark_global.h" 42 #include "ark_logs.h" 43 #include "ark_ethdev.h" 44 #include "ark_ethdev_tx.h" 45 #include "ark_ethdev_rx.h" 46 #include "ark_mpu.h" 47 #include "ark_ddm.h" 48 #include "ark_udm.h" 49 #include "ark_rqp.h" 50 #include "ark_pktdir.h" 51 #include "ark_pktgen.h" 52 #include "ark_pktchkr.h" 53 54 /* Internal prototypes */ 55 static int eth_ark_check_args(struct ark_adapter *ark, const char *params); 56 static int eth_ark_dev_init(struct rte_eth_dev *dev); 57 static int ark_config_device(struct rte_eth_dev *dev); 58 static int eth_ark_dev_uninit(struct rte_eth_dev *eth_dev); 59 static int eth_ark_dev_configure(struct rte_eth_dev *dev); 60 static int eth_ark_dev_start(struct rte_eth_dev *dev); 61 static void eth_ark_dev_stop(struct rte_eth_dev *dev); 62 static void eth_ark_dev_close(struct rte_eth_dev *dev); 63 static void eth_ark_dev_info_get(struct rte_eth_dev *dev, 64 struct rte_eth_dev_info *dev_info); 65 static int eth_ark_dev_link_update(struct rte_eth_dev *dev, 66 int wait_to_complete); 67 static int eth_ark_dev_set_link_up(struct rte_eth_dev *dev); 68 static int eth_ark_dev_set_link_down(struct rte_eth_dev *dev); 69 static void eth_ark_dev_stats_get(struct rte_eth_dev *dev, 70 struct rte_eth_stats *stats); 71 static void eth_ark_dev_stats_reset(struct rte_eth_dev *dev); 72 static void eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, 73 struct ether_addr *mac_addr); 74 static int eth_ark_macaddr_add(struct rte_eth_dev *dev, 75 struct ether_addr *mac_addr, 76 uint32_t index, 77 uint32_t pool); 78 static void eth_ark_macaddr_remove(struct rte_eth_dev *dev, 79 uint32_t index); 80 static int eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size); 81 82 /* 83 * The packet generator is a functional block used to generate packet 84 * patterns for testing. It is not intended for nominal use. 85 */ 86 #define ARK_PKTGEN_ARG "Pkt_gen" 87 88 /* 89 * The packet checker is a functional block used to verify packet 90 * patterns for testing. It is not intended for nominal use. 91 */ 92 #define ARK_PKTCHKR_ARG "Pkt_chkr" 93 94 /* 95 * The packet director is used to select the internal ingress and 96 * egress packets paths during testing. It is not intended for 97 * nominal use. 98 */ 99 #define ARK_PKTDIR_ARG "Pkt_dir" 100 101 /* Devinfo configurations */ 102 #define ARK_RX_MAX_QUEUE (4096 * 4) 103 #define ARK_RX_MIN_QUEUE (512) 104 #define ARK_RX_MAX_PKT_LEN ((16 * 1024) - 128) 105 #define ARK_RX_MIN_BUFSIZE (1024) 106 107 #define ARK_TX_MAX_QUEUE (4096 * 4) 108 #define ARK_TX_MIN_QUEUE (256) 109 110 static const char * const valid_arguments[] = { 111 ARK_PKTGEN_ARG, 112 ARK_PKTCHKR_ARG, 113 ARK_PKTDIR_ARG, 114 NULL 115 }; 116 117 static const struct rte_pci_id pci_id_ark_map[] = { 118 {RTE_PCI_DEVICE(0x1d6c, 0x100d)}, 119 {RTE_PCI_DEVICE(0x1d6c, 0x100e)}, 120 {.vendor_id = 0, /* sentinel */ }, 121 }; 122 123 static int 124 eth_ark_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 125 struct rte_pci_device *pci_dev) 126 { 127 struct rte_eth_dev *eth_dev; 128 int ret; 129 130 eth_dev = rte_eth_dev_pci_allocate(pci_dev, sizeof(struct ark_adapter)); 131 132 if (eth_dev == NULL) 133 return -ENOMEM; 134 135 ret = eth_ark_dev_init(eth_dev); 136 if (ret) 137 rte_eth_dev_pci_release(eth_dev); 138 139 return ret; 140 } 141 142 static int 143 eth_ark_pci_remove(struct rte_pci_device *pci_dev) 144 { 145 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ark_dev_uninit); 146 } 147 148 static struct rte_pci_driver rte_ark_pmd = { 149 .id_table = pci_id_ark_map, 150 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 151 .probe = eth_ark_pci_probe, 152 .remove = eth_ark_pci_remove, 153 }; 154 155 static const struct eth_dev_ops ark_eth_dev_ops = { 156 .dev_configure = eth_ark_dev_configure, 157 .dev_start = eth_ark_dev_start, 158 .dev_stop = eth_ark_dev_stop, 159 .dev_close = eth_ark_dev_close, 160 161 .dev_infos_get = eth_ark_dev_info_get, 162 163 .rx_queue_setup = eth_ark_dev_rx_queue_setup, 164 .rx_queue_count = eth_ark_dev_rx_queue_count, 165 .tx_queue_setup = eth_ark_tx_queue_setup, 166 167 .link_update = eth_ark_dev_link_update, 168 .dev_set_link_up = eth_ark_dev_set_link_up, 169 .dev_set_link_down = eth_ark_dev_set_link_down, 170 171 .rx_queue_start = eth_ark_rx_start_queue, 172 .rx_queue_stop = eth_ark_rx_stop_queue, 173 174 .tx_queue_start = eth_ark_tx_queue_start, 175 .tx_queue_stop = eth_ark_tx_queue_stop, 176 177 .stats_get = eth_ark_dev_stats_get, 178 .stats_reset = eth_ark_dev_stats_reset, 179 180 .mac_addr_add = eth_ark_macaddr_add, 181 .mac_addr_remove = eth_ark_macaddr_remove, 182 .mac_addr_set = eth_ark_set_default_mac_addr, 183 184 .mtu_set = eth_ark_set_mtu, 185 }; 186 187 static int 188 check_for_ext(struct ark_adapter *ark) 189 { 190 int found = 0; 191 192 /* Get the env */ 193 const char *dllpath = getenv("ARK_EXT_PATH"); 194 195 if (dllpath == NULL) { 196 PMD_DEBUG_LOG(DEBUG, "ARK EXT NO dll path specified\n"); 197 return 0; 198 } 199 PMD_DRV_LOG(INFO, "ARK EXT found dll path at %s\n", dllpath); 200 201 /* Open and load the .so */ 202 ark->d_handle = dlopen(dllpath, RTLD_LOCAL | RTLD_LAZY); 203 if (ark->d_handle == NULL) { 204 PMD_DRV_LOG(ERR, "Could not load user extension %s\n", 205 dllpath); 206 return -1; 207 } 208 PMD_DRV_LOG(INFO, "SUCCESS: loaded user extension %s\n", 209 dllpath); 210 211 /* Get the entry points */ 212 ark->user_ext.dev_init = 213 (void *(*)(struct rte_eth_dev *, void *, int)) 214 dlsym(ark->d_handle, "dev_init"); 215 PMD_DEBUG_LOG(DEBUG, "device ext init pointer = %p\n", 216 ark->user_ext.dev_init); 217 ark->user_ext.dev_get_port_count = 218 (int (*)(struct rte_eth_dev *, void *)) 219 dlsym(ark->d_handle, "dev_get_port_count"); 220 ark->user_ext.dev_uninit = 221 (void (*)(struct rte_eth_dev *, void *)) 222 dlsym(ark->d_handle, "dev_uninit"); 223 ark->user_ext.dev_configure = 224 (int (*)(struct rte_eth_dev *, void *)) 225 dlsym(ark->d_handle, "dev_configure"); 226 ark->user_ext.dev_start = 227 (int (*)(struct rte_eth_dev *, void *)) 228 dlsym(ark->d_handle, "dev_start"); 229 ark->user_ext.dev_stop = 230 (void (*)(struct rte_eth_dev *, void *)) 231 dlsym(ark->d_handle, "dev_stop"); 232 ark->user_ext.dev_close = 233 (void (*)(struct rte_eth_dev *, void *)) 234 dlsym(ark->d_handle, "dev_close"); 235 ark->user_ext.link_update = 236 (int (*)(struct rte_eth_dev *, int, void *)) 237 dlsym(ark->d_handle, "link_update"); 238 ark->user_ext.dev_set_link_up = 239 (int (*)(struct rte_eth_dev *, void *)) 240 dlsym(ark->d_handle, "dev_set_link_up"); 241 ark->user_ext.dev_set_link_down = 242 (int (*)(struct rte_eth_dev *, void *)) 243 dlsym(ark->d_handle, "dev_set_link_down"); 244 ark->user_ext.stats_get = 245 (void (*)(struct rte_eth_dev *, struct rte_eth_stats *, 246 void *)) 247 dlsym(ark->d_handle, "stats_get"); 248 ark->user_ext.stats_reset = 249 (void (*)(struct rte_eth_dev *, void *)) 250 dlsym(ark->d_handle, "stats_reset"); 251 ark->user_ext.mac_addr_add = 252 (void (*)(struct rte_eth_dev *, struct ether_addr *, uint32_t, 253 uint32_t, void *)) 254 dlsym(ark->d_handle, "mac_addr_add"); 255 ark->user_ext.mac_addr_remove = 256 (void (*)(struct rte_eth_dev *, uint32_t, void *)) 257 dlsym(ark->d_handle, "mac_addr_remove"); 258 ark->user_ext.mac_addr_set = 259 (void (*)(struct rte_eth_dev *, struct ether_addr *, 260 void *)) 261 dlsym(ark->d_handle, "mac_addr_set"); 262 ark->user_ext.set_mtu = 263 (int (*)(struct rte_eth_dev *, uint16_t, 264 void *)) 265 dlsym(ark->d_handle, "set_mtu"); 266 267 return found; 268 } 269 270 static int 271 eth_ark_dev_init(struct rte_eth_dev *dev) 272 { 273 struct ark_adapter *ark = 274 (struct ark_adapter *)dev->data->dev_private; 275 struct rte_pci_device *pci_dev; 276 int ret; 277 int port_count = 1; 278 int p; 279 280 ark->eth_dev = dev; 281 282 PMD_FUNC_LOG(DEBUG, "\n"); 283 284 /* Check to see if there is an extension that we need to load */ 285 ret = check_for_ext(ark); 286 if (ret) 287 return ret; 288 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 289 rte_eth_copy_pci_info(dev, pci_dev); 290 291 /* Use dummy function until setup */ 292 dev->rx_pkt_burst = ð_ark_recv_pkts_noop; 293 dev->tx_pkt_burst = ð_ark_xmit_pkts_noop; 294 295 ark->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr; 296 ark->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr; 297 298 ark->sysctrl.v = (void *)&ark->bar0[ARK_SYSCTRL_BASE]; 299 ark->mpurx.v = (void *)&ark->bar0[ARK_MPU_RX_BASE]; 300 ark->udm.v = (void *)&ark->bar0[ARK_UDM_BASE]; 301 ark->mputx.v = (void *)&ark->bar0[ARK_MPU_TX_BASE]; 302 ark->ddm.v = (void *)&ark->bar0[ARK_DDM_BASE]; 303 ark->cmac.v = (void *)&ark->bar0[ARK_CMAC_BASE]; 304 ark->external.v = (void *)&ark->bar0[ARK_EXTERNAL_BASE]; 305 ark->pktdir.v = (void *)&ark->bar0[ARK_PKTDIR_BASE]; 306 ark->pktgen.v = (void *)&ark->bar0[ARK_PKTGEN_BASE]; 307 ark->pktchkr.v = (void *)&ark->bar0[ARK_PKTCHKR_BASE]; 308 309 ark->rqpacing = 310 (struct ark_rqpace_t *)(ark->bar0 + ARK_RCPACING_BASE); 311 ark->started = 0; 312 313 PMD_DEBUG_LOG(INFO, "Sys Ctrl Const = 0x%x HW Commit_ID: %08x\n", 314 ark->sysctrl.t32[4], 315 rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4])); 316 PMD_DRV_LOG(INFO, "Arkville HW Commit_ID: %08x\n", 317 rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4])); 318 319 /* If HW sanity test fails, return an error */ 320 if (ark->sysctrl.t32[4] != 0xcafef00d) { 321 PMD_DRV_LOG(ERR, 322 "HW Sanity test has failed, expected constant" 323 " 0x%x, read 0x%x (%s)\n", 324 0xcafef00d, 325 ark->sysctrl.t32[4], __func__); 326 return -1; 327 } 328 if (ark->sysctrl.t32[3] != 0) { 329 if (ark_rqp_lasped(ark->rqpacing)) { 330 PMD_DRV_LOG(ERR, "Arkville Evaluation System - " 331 "Timer has Expired\n"); 332 return -1; 333 } 334 PMD_DRV_LOG(WARNING, "Arkville Evaluation System - " 335 "Timer is Running\n"); 336 } 337 338 PMD_DRV_LOG(INFO, 339 "HW Sanity test has PASSED, expected constant" 340 " 0x%x, read 0x%x (%s)\n", 341 0xcafef00d, ark->sysctrl.t32[4], __func__); 342 343 /* We are a single function multi-port device. */ 344 ret = ark_config_device(dev); 345 dev->dev_ops = &ark_eth_dev_ops; 346 dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; 347 348 dev->data->mac_addrs = rte_zmalloc("ark", ETHER_ADDR_LEN, 0); 349 if (!dev->data->mac_addrs) { 350 PMD_DRV_LOG(ERR, 351 "Failed to allocated memory for storing mac address" 352 ); 353 } 354 355 if (ark->user_ext.dev_init) { 356 ark->user_data[dev->data->port_id] = 357 ark->user_ext.dev_init(dev, ark->a_bar, 0); 358 if (!ark->user_data[dev->data->port_id]) { 359 PMD_DRV_LOG(INFO, 360 "Failed to initialize PMD extension!" 361 " continuing without it\n"); 362 memset(&ark->user_ext, 0, sizeof(struct ark_user_ext)); 363 dlclose(ark->d_handle); 364 } 365 } 366 367 if (pci_dev->device.devargs) 368 ret = eth_ark_check_args(ark, pci_dev->device.devargs->args); 369 else 370 PMD_DRV_LOG(INFO, "No Device args found\n"); 371 372 if (ret) 373 goto error; 374 /* 375 * We will create additional devices based on the number of requested 376 * ports 377 */ 378 if (ark->user_ext.dev_get_port_count) 379 port_count = 380 ark->user_ext.dev_get_port_count(dev, 381 ark->user_data[dev->data->port_id]); 382 ark->num_ports = port_count; 383 384 for (p = 0; p < port_count; p++) { 385 struct rte_eth_dev *eth_dev; 386 char name[RTE_ETH_NAME_MAX_LEN]; 387 388 snprintf(name, sizeof(name), "arketh%d", 389 dev->data->port_id + p); 390 391 if (p == 0) { 392 /* First port is already allocated by DPDK */ 393 eth_dev = ark->eth_dev; 394 continue; 395 } 396 397 /* reserve an ethdev entry */ 398 eth_dev = rte_eth_dev_allocate(name); 399 if (!eth_dev) { 400 PMD_DRV_LOG(ERR, 401 "Could not allocate eth_dev for port %d\n", 402 p); 403 goto error; 404 } 405 406 eth_dev->device = &pci_dev->device; 407 eth_dev->data->dev_private = ark; 408 eth_dev->dev_ops = ark->eth_dev->dev_ops; 409 eth_dev->tx_pkt_burst = ark->eth_dev->tx_pkt_burst; 410 eth_dev->rx_pkt_burst = ark->eth_dev->rx_pkt_burst; 411 412 rte_eth_copy_pci_info(eth_dev, pci_dev); 413 414 eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0); 415 if (!eth_dev->data->mac_addrs) { 416 PMD_DRV_LOG(ERR, 417 "Memory allocation for MAC failed!" 418 " Exiting.\n"); 419 goto error; 420 } 421 422 if (ark->user_ext.dev_init) { 423 ark->user_data[eth_dev->data->port_id] = 424 ark->user_ext.dev_init(dev, ark->a_bar, p); 425 } 426 } 427 428 return ret; 429 430 error: 431 if (dev->data->mac_addrs) 432 rte_free(dev->data->mac_addrs); 433 return -1; 434 } 435 436 /* 437 *Initial device configuration when device is opened 438 * setup the DDM, and UDM 439 * Called once per PCIE device 440 */ 441 static int 442 ark_config_device(struct rte_eth_dev *dev) 443 { 444 struct ark_adapter *ark = 445 (struct ark_adapter *)dev->data->dev_private; 446 uint16_t num_q, i; 447 struct ark_mpu_t *mpu; 448 449 /* 450 * Make sure that the packet director, generator and checker are in a 451 * known state 452 */ 453 ark->start_pg = 0; 454 ark->pg = ark_pktgen_init(ark->pktgen.v, 0, 1); 455 ark_pktgen_reset(ark->pg); 456 ark->pc = ark_pktchkr_init(ark->pktchkr.v, 0, 1); 457 ark_pktchkr_stop(ark->pc); 458 ark->pd = ark_pktdir_init(ark->pktdir.v); 459 460 /* Verify HW */ 461 if (ark_udm_verify(ark->udm.v)) 462 return -1; 463 if (ark_ddm_verify(ark->ddm.v)) 464 return -1; 465 466 /* UDM */ 467 if (ark_udm_reset(ark->udm.v)) { 468 PMD_DRV_LOG(ERR, "Unable to stop and reset UDM\n"); 469 return -1; 470 } 471 /* Keep in reset until the MPU are cleared */ 472 473 /* MPU reset */ 474 mpu = ark->mpurx.v; 475 num_q = ark_api_num_queues(mpu); 476 ark->rx_queues = num_q; 477 for (i = 0; i < num_q; i++) { 478 ark_mpu_reset(mpu); 479 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 480 } 481 482 ark_udm_stop(ark->udm.v, 0); 483 ark_udm_configure(ark->udm.v, 484 RTE_PKTMBUF_HEADROOM, 485 RTE_MBUF_DEFAULT_DATAROOM, 486 ARK_RX_WRITE_TIME_NS); 487 ark_udm_stats_reset(ark->udm.v); 488 ark_udm_stop(ark->udm.v, 0); 489 490 /* TX -- DDM */ 491 if (ark_ddm_stop(ark->ddm.v, 1)) 492 PMD_DRV_LOG(ERR, "Unable to stop DDM\n"); 493 494 mpu = ark->mputx.v; 495 num_q = ark_api_num_queues(mpu); 496 ark->tx_queues = num_q; 497 for (i = 0; i < num_q; i++) { 498 ark_mpu_reset(mpu); 499 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 500 } 501 502 ark_ddm_reset(ark->ddm.v); 503 ark_ddm_stats_reset(ark->ddm.v); 504 505 ark_ddm_stop(ark->ddm.v, 0); 506 ark_rqp_stats_reset(ark->rqpacing); 507 508 return 0; 509 } 510 511 static int 512 eth_ark_dev_uninit(struct rte_eth_dev *dev) 513 { 514 struct ark_adapter *ark = 515 (struct ark_adapter *)dev->data->dev_private; 516 517 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 518 return 0; 519 520 if (ark->user_ext.dev_uninit) 521 ark->user_ext.dev_uninit(dev, 522 ark->user_data[dev->data->port_id]); 523 524 ark_pktgen_uninit(ark->pg); 525 ark_pktchkr_uninit(ark->pc); 526 527 dev->dev_ops = NULL; 528 dev->rx_pkt_burst = NULL; 529 dev->tx_pkt_burst = NULL; 530 rte_free(dev->data->mac_addrs); 531 return 0; 532 } 533 534 static int 535 eth_ark_dev_configure(struct rte_eth_dev *dev) 536 { 537 PMD_FUNC_LOG(DEBUG, "\n"); 538 struct ark_adapter *ark = 539 (struct ark_adapter *)dev->data->dev_private; 540 541 eth_ark_dev_set_link_up(dev); 542 if (ark->user_ext.dev_configure) 543 return ark->user_ext.dev_configure(dev, 544 ark->user_data[dev->data->port_id]); 545 return 0; 546 } 547 548 static void * 549 delay_pg_start(void *arg) 550 { 551 struct ark_adapter *ark = (struct ark_adapter *)arg; 552 553 /* This function is used exclusively for regression testing, We 554 * perform a blind sleep here to ensure that the external test 555 * application has time to setup the test before we generate packets 556 */ 557 usleep(100000); 558 ark_pktgen_run(ark->pg); 559 return NULL; 560 } 561 562 static int 563 eth_ark_dev_start(struct rte_eth_dev *dev) 564 { 565 struct ark_adapter *ark = 566 (struct ark_adapter *)dev->data->dev_private; 567 int i; 568 569 PMD_FUNC_LOG(DEBUG, "\n"); 570 571 /* RX Side */ 572 /* start UDM */ 573 ark_udm_start(ark->udm.v); 574 575 for (i = 0; i < dev->data->nb_rx_queues; i++) 576 eth_ark_rx_start_queue(dev, i); 577 578 /* TX Side */ 579 for (i = 0; i < dev->data->nb_tx_queues; i++) 580 eth_ark_tx_queue_start(dev, i); 581 582 /* start DDM */ 583 ark_ddm_start(ark->ddm.v); 584 585 ark->started = 1; 586 /* set xmit and receive function */ 587 dev->rx_pkt_burst = ð_ark_recv_pkts; 588 dev->tx_pkt_burst = ð_ark_xmit_pkts; 589 590 if (ark->start_pg) 591 ark_pktchkr_run(ark->pc); 592 593 if (ark->start_pg && (dev->data->port_id == 0)) { 594 pthread_t thread; 595 596 /* Delay packet generatpr start allow the hardware to be ready 597 * This is only used for sanity checking with internal generator 598 */ 599 if (pthread_create(&thread, NULL, delay_pg_start, ark)) { 600 PMD_DRV_LOG(ERR, "Could not create pktgen " 601 "starter thread\n"); 602 return -1; 603 } 604 } 605 606 if (ark->user_ext.dev_start) 607 ark->user_ext.dev_start(dev, 608 ark->user_data[dev->data->port_id]); 609 610 return 0; 611 } 612 613 static void 614 eth_ark_dev_stop(struct rte_eth_dev *dev) 615 { 616 uint16_t i; 617 int status; 618 struct ark_adapter *ark = 619 (struct ark_adapter *)dev->data->dev_private; 620 struct ark_mpu_t *mpu; 621 622 PMD_FUNC_LOG(DEBUG, "\n"); 623 624 if (ark->started == 0) 625 return; 626 ark->started = 0; 627 628 /* Stop the extension first */ 629 if (ark->user_ext.dev_stop) 630 ark->user_ext.dev_stop(dev, 631 ark->user_data[dev->data->port_id]); 632 633 /* Stop the packet generator */ 634 if (ark->start_pg) 635 ark_pktgen_pause(ark->pg); 636 637 dev->rx_pkt_burst = ð_ark_recv_pkts_noop; 638 dev->tx_pkt_burst = ð_ark_xmit_pkts_noop; 639 640 /* STOP TX Side */ 641 for (i = 0; i < dev->data->nb_tx_queues; i++) { 642 status = eth_ark_tx_queue_stop(dev, i); 643 if (status != 0) { 644 uint8_t port = dev->data->port_id; 645 PMD_DRV_LOG(ERR, 646 "tx_queue stop anomaly" 647 " port %u, queue %u\n", 648 port, i); 649 } 650 } 651 652 /* Stop DDM */ 653 /* Wait up to 0.1 second. each stop is up to 1000 * 10 useconds */ 654 for (i = 0; i < 10; i++) { 655 status = ark_ddm_stop(ark->ddm.v, 1); 656 if (status == 0) 657 break; 658 } 659 if (status || i != 0) { 660 PMD_DRV_LOG(ERR, "DDM stop anomaly. status:" 661 " %d iter: %u. (%s)\n", 662 status, 663 i, 664 __func__); 665 ark_ddm_dump(ark->ddm.v, "Stop anomaly"); 666 667 mpu = ark->mputx.v; 668 for (i = 0; i < ark->tx_queues; i++) { 669 ark_mpu_dump(mpu, "DDM failure dump", i); 670 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 671 } 672 } 673 674 /* STOP RX Side */ 675 /* Stop UDM multiple tries attempted */ 676 for (i = 0; i < 10; i++) { 677 status = ark_udm_stop(ark->udm.v, 1); 678 if (status == 0) 679 break; 680 } 681 if (status || i != 0) { 682 PMD_DRV_LOG(ERR, "UDM stop anomaly. status %d iter: %u. (%s)\n", 683 status, i, __func__); 684 ark_udm_dump(ark->udm.v, "Stop anomaly"); 685 686 mpu = ark->mpurx.v; 687 for (i = 0; i < ark->rx_queues; i++) { 688 ark_mpu_dump(mpu, "UDM Stop anomaly", i); 689 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 690 } 691 } 692 693 ark_udm_dump_stats(ark->udm.v, "Post stop"); 694 ark_udm_dump_perf(ark->udm.v, "Post stop"); 695 696 for (i = 0; i < dev->data->nb_tx_queues; i++) 697 eth_ark_rx_dump_queue(dev, i, __func__); 698 699 /* Stop the packet checker if it is running */ 700 if (ark->start_pg) { 701 ark_pktchkr_dump_stats(ark->pc); 702 ark_pktchkr_stop(ark->pc); 703 } 704 } 705 706 static void 707 eth_ark_dev_close(struct rte_eth_dev *dev) 708 { 709 struct ark_adapter *ark = 710 (struct ark_adapter *)dev->data->dev_private; 711 uint16_t i; 712 713 if (ark->user_ext.dev_close) 714 ark->user_ext.dev_close(dev, 715 ark->user_data[dev->data->port_id]); 716 717 eth_ark_dev_stop(dev); 718 eth_ark_udm_force_close(dev); 719 720 /* 721 * TODO This should only be called once for the device during shutdown 722 */ 723 ark_rqp_dump(ark->rqpacing); 724 725 for (i = 0; i < dev->data->nb_tx_queues; i++) { 726 eth_ark_tx_queue_release(dev->data->tx_queues[i]); 727 dev->data->tx_queues[i] = 0; 728 } 729 730 for (i = 0; i < dev->data->nb_rx_queues; i++) { 731 eth_ark_dev_rx_queue_release(dev->data->rx_queues[i]); 732 dev->data->rx_queues[i] = 0; 733 } 734 } 735 736 static void 737 eth_ark_dev_info_get(struct rte_eth_dev *dev, 738 struct rte_eth_dev_info *dev_info) 739 { 740 struct ark_adapter *ark = 741 (struct ark_adapter *)dev->data->dev_private; 742 struct ark_mpu_t *tx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_TX_BASE); 743 struct ark_mpu_t *rx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_RX_BASE); 744 uint16_t ports = ark->num_ports; 745 746 dev_info->max_rx_pktlen = ARK_RX_MAX_PKT_LEN; 747 dev_info->min_rx_bufsize = ARK_RX_MIN_BUFSIZE; 748 749 dev_info->max_rx_queues = ark_api_num_queues_per_port(rx_mpu, ports); 750 dev_info->max_tx_queues = ark_api_num_queues_per_port(tx_mpu, ports); 751 752 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 753 .nb_max = ARK_RX_MAX_QUEUE, 754 .nb_min = ARK_RX_MIN_QUEUE, 755 .nb_align = ARK_RX_MIN_QUEUE}; /* power of 2 */ 756 757 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 758 .nb_max = ARK_TX_MAX_QUEUE, 759 .nb_min = ARK_TX_MIN_QUEUE, 760 .nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */ 761 762 /* ARK PMD supports all line rates, how do we indicate that here ?? */ 763 dev_info->speed_capa = (ETH_LINK_SPEED_1G | 764 ETH_LINK_SPEED_10G | 765 ETH_LINK_SPEED_25G | 766 ETH_LINK_SPEED_40G | 767 ETH_LINK_SPEED_50G | 768 ETH_LINK_SPEED_100G); 769 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); 770 } 771 772 static int 773 eth_ark_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 774 { 775 PMD_DEBUG_LOG(DEBUG, "link status = %d\n", 776 dev->data->dev_link.link_status); 777 struct ark_adapter *ark = 778 (struct ark_adapter *)dev->data->dev_private; 779 780 if (ark->user_ext.link_update) { 781 return ark->user_ext.link_update 782 (dev, wait_to_complete, 783 ark->user_data[dev->data->port_id]); 784 } 785 return 0; 786 } 787 788 static int 789 eth_ark_dev_set_link_up(struct rte_eth_dev *dev) 790 { 791 dev->data->dev_link.link_status = 1; 792 struct ark_adapter *ark = 793 (struct ark_adapter *)dev->data->dev_private; 794 795 if (ark->user_ext.dev_set_link_up) 796 return ark->user_ext.dev_set_link_up(dev, 797 ark->user_data[dev->data->port_id]); 798 return 0; 799 } 800 801 static int 802 eth_ark_dev_set_link_down(struct rte_eth_dev *dev) 803 { 804 dev->data->dev_link.link_status = 0; 805 struct ark_adapter *ark = 806 (struct ark_adapter *)dev->data->dev_private; 807 808 if (ark->user_ext.dev_set_link_down) 809 return ark->user_ext.dev_set_link_down(dev, 810 ark->user_data[dev->data->port_id]); 811 return 0; 812 } 813 814 static void 815 eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 816 { 817 uint16_t i; 818 struct ark_adapter *ark = 819 (struct ark_adapter *)dev->data->dev_private; 820 821 stats->ipackets = 0; 822 stats->ibytes = 0; 823 stats->opackets = 0; 824 stats->obytes = 0; 825 stats->imissed = 0; 826 stats->oerrors = 0; 827 828 for (i = 0; i < dev->data->nb_tx_queues; i++) 829 eth_tx_queue_stats_get(dev->data->tx_queues[i], stats); 830 for (i = 0; i < dev->data->nb_rx_queues; i++) 831 eth_rx_queue_stats_get(dev->data->rx_queues[i], stats); 832 if (ark->user_ext.stats_get) 833 ark->user_ext.stats_get(dev, stats, 834 ark->user_data[dev->data->port_id]); 835 } 836 837 static void 838 eth_ark_dev_stats_reset(struct rte_eth_dev *dev) 839 { 840 uint16_t i; 841 struct ark_adapter *ark = 842 (struct ark_adapter *)dev->data->dev_private; 843 844 for (i = 0; i < dev->data->nb_tx_queues; i++) 845 eth_tx_queue_stats_reset(dev->data->tx_queues[i]); 846 for (i = 0; i < dev->data->nb_rx_queues; i++) 847 eth_rx_queue_stats_reset(dev->data->rx_queues[i]); 848 if (ark->user_ext.stats_reset) 849 ark->user_ext.stats_reset(dev, 850 ark->user_data[dev->data->port_id]); 851 } 852 853 static int 854 eth_ark_macaddr_add(struct rte_eth_dev *dev, 855 struct ether_addr *mac_addr, 856 uint32_t index, 857 uint32_t pool) 858 { 859 struct ark_adapter *ark = 860 (struct ark_adapter *)dev->data->dev_private; 861 862 if (ark->user_ext.mac_addr_add) { 863 ark->user_ext.mac_addr_add(dev, 864 mac_addr, 865 index, 866 pool, 867 ark->user_data[dev->data->port_id]); 868 return 0; 869 } 870 return -ENOTSUP; 871 } 872 873 static void 874 eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) 875 { 876 struct ark_adapter *ark = 877 (struct ark_adapter *)dev->data->dev_private; 878 879 if (ark->user_ext.mac_addr_remove) 880 ark->user_ext.mac_addr_remove(dev, index, 881 ark->user_data[dev->data->port_id]); 882 } 883 884 static void 885 eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, 886 struct ether_addr *mac_addr) 887 { 888 struct ark_adapter *ark = 889 (struct ark_adapter *)dev->data->dev_private; 890 891 if (ark->user_ext.mac_addr_set) 892 ark->user_ext.mac_addr_set(dev, mac_addr, 893 ark->user_data[dev->data->port_id]); 894 } 895 896 static int 897 eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size) 898 { 899 struct ark_adapter *ark = 900 (struct ark_adapter *)dev->data->dev_private; 901 902 if (ark->user_ext.set_mtu) 903 return ark->user_ext.set_mtu(dev, size, 904 ark->user_data[dev->data->port_id]); 905 906 return -ENOTSUP; 907 } 908 909 static inline int 910 process_pktdir_arg(const char *key, const char *value, 911 void *extra_args) 912 { 913 PMD_FUNC_LOG(DEBUG, "key = %s, value = %s\n", 914 key, value); 915 struct ark_adapter *ark = 916 (struct ark_adapter *)extra_args; 917 918 ark->pkt_dir_v = strtol(value, NULL, 16); 919 PMD_FUNC_LOG(DEBUG, "pkt_dir_v = 0x%x\n", ark->pkt_dir_v); 920 return 0; 921 } 922 923 static inline int 924 process_file_args(const char *key, const char *value, void *extra_args) 925 { 926 PMD_FUNC_LOG(DEBUG, "key = %s, value = %s\n", 927 key, value); 928 char *args = (char *)extra_args; 929 930 /* Open the configuration file */ 931 FILE *file = fopen(value, "r"); 932 char line[ARK_MAX_ARG_LEN]; 933 int size = 0; 934 int first = 1; 935 936 if (file == NULL) { 937 PMD_DRV_LOG(ERR, "Unable to open " 938 "config file %s\n", value); 939 return -1; 940 } 941 942 while (fgets(line, sizeof(line), file)) { 943 size += strlen(line); 944 if (size >= ARK_MAX_ARG_LEN) { 945 PMD_DRV_LOG(ERR, "Unable to parse file %s args, " 946 "parameter list is too long\n", value); 947 fclose(file); 948 return -1; 949 } 950 if (first) { 951 strncpy(args, line, ARK_MAX_ARG_LEN); 952 first = 0; 953 } else { 954 strncat(args, line, ARK_MAX_ARG_LEN); 955 } 956 } 957 PMD_FUNC_LOG(DEBUG, "file = %s\n", args); 958 fclose(file); 959 return 0; 960 } 961 962 static int 963 eth_ark_check_args(struct ark_adapter *ark, const char *params) 964 { 965 struct rte_kvargs *kvlist; 966 unsigned int k_idx; 967 struct rte_kvargs_pair *pair = NULL; 968 int ret = -1; 969 970 kvlist = rte_kvargs_parse(params, valid_arguments); 971 if (kvlist == NULL) 972 return 0; 973 974 ark->pkt_gen_args[0] = 0; 975 ark->pkt_chkr_args[0] = 0; 976 977 for (k_idx = 0; k_idx < kvlist->count; k_idx++) { 978 pair = &kvlist->pairs[k_idx]; 979 PMD_FUNC_LOG(DEBUG, "**** Arg passed to PMD = %s:%s\n", 980 pair->key, 981 pair->value); 982 } 983 984 if (rte_kvargs_process(kvlist, 985 ARK_PKTDIR_ARG, 986 &process_pktdir_arg, 987 ark) != 0) { 988 PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTDIR_ARG); 989 goto free_kvlist; 990 } 991 992 if (rte_kvargs_process(kvlist, 993 ARK_PKTGEN_ARG, 994 &process_file_args, 995 ark->pkt_gen_args) != 0) { 996 PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTGEN_ARG); 997 goto free_kvlist; 998 } 999 1000 if (rte_kvargs_process(kvlist, 1001 ARK_PKTCHKR_ARG, 1002 &process_file_args, 1003 ark->pkt_chkr_args) != 0) { 1004 PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTCHKR_ARG); 1005 goto free_kvlist; 1006 } 1007 1008 PMD_DRV_LOG(INFO, "packet director set to 0x%x\n", ark->pkt_dir_v); 1009 /* Setup the packet director */ 1010 ark_pktdir_setup(ark->pd, ark->pkt_dir_v); 1011 1012 /* Setup the packet generator */ 1013 if (ark->pkt_gen_args[0]) { 1014 PMD_DRV_LOG(INFO, "Setting up the packet generator\n"); 1015 ark_pktgen_parse(ark->pkt_gen_args); 1016 ark_pktgen_reset(ark->pg); 1017 ark_pktgen_setup(ark->pg); 1018 ark->start_pg = 1; 1019 } 1020 1021 /* Setup the packet checker */ 1022 if (ark->pkt_chkr_args[0]) { 1023 ark_pktchkr_parse(ark->pkt_chkr_args); 1024 ark_pktchkr_setup(ark->pc); 1025 } 1026 1027 ret = 0; 1028 1029 free_kvlist: 1030 rte_kvargs_free(kvlist); 1031 1032 return ret; 1033 } 1034 1035 RTE_PMD_REGISTER_PCI(net_ark, rte_ark_pmd); 1036 RTE_PMD_REGISTER_KMOD_DEP(net_ark, "* igb_uio | uio_pci_generic "); 1037 RTE_PMD_REGISTER_PCI_TABLE(net_ark, pci_id_ark_map); 1038 RTE_PMD_REGISTER_PARAM_STRING(net_ark, 1039 ARK_PKTGEN_ARG "=<filename> " 1040 ARK_PKTCHKR_ARG "=<filename> " 1041 ARK_PKTDIR_ARG "=<bitmap>"); 1042