1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2015-2017 Atomic Rules LLC 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of copyright holder nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <unistd.h> 35 #include <sys/stat.h> 36 #include <dlfcn.h> 37 38 #include <rte_bus_pci.h> 39 #include <rte_ethdev_pci.h> 40 #include <rte_kvargs.h> 41 42 #include "ark_global.h" 43 #include "ark_logs.h" 44 #include "ark_ethdev_tx.h" 45 #include "ark_ethdev_rx.h" 46 #include "ark_mpu.h" 47 #include "ark_ddm.h" 48 #include "ark_udm.h" 49 #include "ark_rqp.h" 50 #include "ark_pktdir.h" 51 #include "ark_pktgen.h" 52 #include "ark_pktchkr.h" 53 54 /* Internal prototypes */ 55 static int eth_ark_check_args(struct ark_adapter *ark, const char *params); 56 static int eth_ark_dev_init(struct rte_eth_dev *dev); 57 static int ark_config_device(struct rte_eth_dev *dev); 58 static int eth_ark_dev_uninit(struct rte_eth_dev *eth_dev); 59 static int eth_ark_dev_configure(struct rte_eth_dev *dev); 60 static int eth_ark_dev_start(struct rte_eth_dev *dev); 61 static void eth_ark_dev_stop(struct rte_eth_dev *dev); 62 static void eth_ark_dev_close(struct rte_eth_dev *dev); 63 static void eth_ark_dev_info_get(struct rte_eth_dev *dev, 64 struct rte_eth_dev_info *dev_info); 65 static int eth_ark_dev_link_update(struct rte_eth_dev *dev, 66 int wait_to_complete); 67 static int eth_ark_dev_set_link_up(struct rte_eth_dev *dev); 68 static int eth_ark_dev_set_link_down(struct rte_eth_dev *dev); 69 static int eth_ark_dev_stats_get(struct rte_eth_dev *dev, 70 struct rte_eth_stats *stats); 71 static void eth_ark_dev_stats_reset(struct rte_eth_dev *dev); 72 static int eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, 73 struct ether_addr *mac_addr); 74 static int eth_ark_macaddr_add(struct rte_eth_dev *dev, 75 struct ether_addr *mac_addr, 76 uint32_t index, 77 uint32_t pool); 78 static void eth_ark_macaddr_remove(struct rte_eth_dev *dev, 79 uint32_t index); 80 static int eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size); 81 82 /* 83 * The packet generator is a functional block used to generate packet 84 * patterns for testing. It is not intended for nominal use. 85 */ 86 #define ARK_PKTGEN_ARG "Pkt_gen" 87 88 /* 89 * The packet checker is a functional block used to verify packet 90 * patterns for testing. It is not intended for nominal use. 91 */ 92 #define ARK_PKTCHKR_ARG "Pkt_chkr" 93 94 /* 95 * The packet director is used to select the internal ingress and 96 * egress packets paths during testing. It is not intended for 97 * nominal use. 98 */ 99 #define ARK_PKTDIR_ARG "Pkt_dir" 100 101 /* Devinfo configurations */ 102 #define ARK_RX_MAX_QUEUE (4096 * 4) 103 #define ARK_RX_MIN_QUEUE (512) 104 #define ARK_RX_MAX_PKT_LEN ((16 * 1024) - 128) 105 #define ARK_RX_MIN_BUFSIZE (1024) 106 107 #define ARK_TX_MAX_QUEUE (4096 * 4) 108 #define ARK_TX_MIN_QUEUE (256) 109 110 static const char * const valid_arguments[] = { 111 ARK_PKTGEN_ARG, 112 ARK_PKTCHKR_ARG, 113 ARK_PKTDIR_ARG, 114 NULL 115 }; 116 117 static const struct rte_pci_id pci_id_ark_map[] = { 118 {RTE_PCI_DEVICE(0x1d6c, 0x100d)}, 119 {RTE_PCI_DEVICE(0x1d6c, 0x100e)}, 120 {.vendor_id = 0, /* sentinel */ }, 121 }; 122 123 static int 124 eth_ark_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 125 struct rte_pci_device *pci_dev) 126 { 127 struct rte_eth_dev *eth_dev; 128 int ret; 129 130 eth_dev = rte_eth_dev_pci_allocate(pci_dev, sizeof(struct ark_adapter)); 131 132 if (eth_dev == NULL) 133 return -ENOMEM; 134 135 ret = eth_ark_dev_init(eth_dev); 136 if (ret) 137 rte_eth_dev_pci_release(eth_dev); 138 139 return ret; 140 } 141 142 static int 143 eth_ark_pci_remove(struct rte_pci_device *pci_dev) 144 { 145 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ark_dev_uninit); 146 } 147 148 static struct rte_pci_driver rte_ark_pmd = { 149 .id_table = pci_id_ark_map, 150 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 151 .probe = eth_ark_pci_probe, 152 .remove = eth_ark_pci_remove, 153 }; 154 155 static const struct eth_dev_ops ark_eth_dev_ops = { 156 .dev_configure = eth_ark_dev_configure, 157 .dev_start = eth_ark_dev_start, 158 .dev_stop = eth_ark_dev_stop, 159 .dev_close = eth_ark_dev_close, 160 161 .dev_infos_get = eth_ark_dev_info_get, 162 163 .rx_queue_setup = eth_ark_dev_rx_queue_setup, 164 .rx_queue_count = eth_ark_dev_rx_queue_count, 165 .tx_queue_setup = eth_ark_tx_queue_setup, 166 167 .link_update = eth_ark_dev_link_update, 168 .dev_set_link_up = eth_ark_dev_set_link_up, 169 .dev_set_link_down = eth_ark_dev_set_link_down, 170 171 .rx_queue_start = eth_ark_rx_start_queue, 172 .rx_queue_stop = eth_ark_rx_stop_queue, 173 174 .tx_queue_start = eth_ark_tx_queue_start, 175 .tx_queue_stop = eth_ark_tx_queue_stop, 176 177 .stats_get = eth_ark_dev_stats_get, 178 .stats_reset = eth_ark_dev_stats_reset, 179 180 .mac_addr_add = eth_ark_macaddr_add, 181 .mac_addr_remove = eth_ark_macaddr_remove, 182 .mac_addr_set = eth_ark_set_default_mac_addr, 183 184 .mtu_set = eth_ark_set_mtu, 185 }; 186 187 static int 188 check_for_ext(struct ark_adapter *ark) 189 { 190 int found = 0; 191 192 /* Get the env */ 193 const char *dllpath = getenv("ARK_EXT_PATH"); 194 195 if (dllpath == NULL) { 196 PMD_DEBUG_LOG(DEBUG, "ARK EXT NO dll path specified\n"); 197 return 0; 198 } 199 PMD_DRV_LOG(INFO, "ARK EXT found dll path at %s\n", dllpath); 200 201 /* Open and load the .so */ 202 ark->d_handle = dlopen(dllpath, RTLD_LOCAL | RTLD_LAZY); 203 if (ark->d_handle == NULL) { 204 PMD_DRV_LOG(ERR, "Could not load user extension %s\n", 205 dllpath); 206 return -1; 207 } 208 PMD_DRV_LOG(INFO, "SUCCESS: loaded user extension %s\n", 209 dllpath); 210 211 /* Get the entry points */ 212 ark->user_ext.dev_init = 213 (void *(*)(struct rte_eth_dev *, void *, int)) 214 dlsym(ark->d_handle, "dev_init"); 215 PMD_DEBUG_LOG(DEBUG, "device ext init pointer = %p\n", 216 ark->user_ext.dev_init); 217 ark->user_ext.dev_get_port_count = 218 (int (*)(struct rte_eth_dev *, void *)) 219 dlsym(ark->d_handle, "dev_get_port_count"); 220 ark->user_ext.dev_uninit = 221 (void (*)(struct rte_eth_dev *, void *)) 222 dlsym(ark->d_handle, "dev_uninit"); 223 ark->user_ext.dev_configure = 224 (int (*)(struct rte_eth_dev *, void *)) 225 dlsym(ark->d_handle, "dev_configure"); 226 ark->user_ext.dev_start = 227 (int (*)(struct rte_eth_dev *, void *)) 228 dlsym(ark->d_handle, "dev_start"); 229 ark->user_ext.dev_stop = 230 (void (*)(struct rte_eth_dev *, void *)) 231 dlsym(ark->d_handle, "dev_stop"); 232 ark->user_ext.dev_close = 233 (void (*)(struct rte_eth_dev *, void *)) 234 dlsym(ark->d_handle, "dev_close"); 235 ark->user_ext.link_update = 236 (int (*)(struct rte_eth_dev *, int, void *)) 237 dlsym(ark->d_handle, "link_update"); 238 ark->user_ext.dev_set_link_up = 239 (int (*)(struct rte_eth_dev *, void *)) 240 dlsym(ark->d_handle, "dev_set_link_up"); 241 ark->user_ext.dev_set_link_down = 242 (int (*)(struct rte_eth_dev *, void *)) 243 dlsym(ark->d_handle, "dev_set_link_down"); 244 ark->user_ext.stats_get = 245 (int (*)(struct rte_eth_dev *, struct rte_eth_stats *, 246 void *)) 247 dlsym(ark->d_handle, "stats_get"); 248 ark->user_ext.stats_reset = 249 (void (*)(struct rte_eth_dev *, void *)) 250 dlsym(ark->d_handle, "stats_reset"); 251 ark->user_ext.mac_addr_add = 252 (void (*)(struct rte_eth_dev *, struct ether_addr *, uint32_t, 253 uint32_t, void *)) 254 dlsym(ark->d_handle, "mac_addr_add"); 255 ark->user_ext.mac_addr_remove = 256 (void (*)(struct rte_eth_dev *, uint32_t, void *)) 257 dlsym(ark->d_handle, "mac_addr_remove"); 258 ark->user_ext.mac_addr_set = 259 (void (*)(struct rte_eth_dev *, struct ether_addr *, 260 void *)) 261 dlsym(ark->d_handle, "mac_addr_set"); 262 ark->user_ext.set_mtu = 263 (int (*)(struct rte_eth_dev *, uint16_t, 264 void *)) 265 dlsym(ark->d_handle, "set_mtu"); 266 267 return found; 268 } 269 270 static int 271 eth_ark_dev_init(struct rte_eth_dev *dev) 272 { 273 struct ark_adapter *ark = 274 (struct ark_adapter *)dev->data->dev_private; 275 struct rte_pci_device *pci_dev; 276 int ret; 277 int port_count = 1; 278 int p; 279 280 ark->eth_dev = dev; 281 282 PMD_FUNC_LOG(DEBUG, "\n"); 283 284 /* Check to see if there is an extension that we need to load */ 285 ret = check_for_ext(ark); 286 if (ret) 287 return ret; 288 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 289 rte_eth_copy_pci_info(dev, pci_dev); 290 291 /* Use dummy function until setup */ 292 dev->rx_pkt_burst = ð_ark_recv_pkts_noop; 293 dev->tx_pkt_burst = ð_ark_xmit_pkts_noop; 294 295 ark->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr; 296 ark->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr; 297 298 ark->sysctrl.v = (void *)&ark->bar0[ARK_SYSCTRL_BASE]; 299 ark->mpurx.v = (void *)&ark->bar0[ARK_MPU_RX_BASE]; 300 ark->udm.v = (void *)&ark->bar0[ARK_UDM_BASE]; 301 ark->mputx.v = (void *)&ark->bar0[ARK_MPU_TX_BASE]; 302 ark->ddm.v = (void *)&ark->bar0[ARK_DDM_BASE]; 303 ark->cmac.v = (void *)&ark->bar0[ARK_CMAC_BASE]; 304 ark->external.v = (void *)&ark->bar0[ARK_EXTERNAL_BASE]; 305 ark->pktdir.v = (void *)&ark->bar0[ARK_PKTDIR_BASE]; 306 ark->pktgen.v = (void *)&ark->bar0[ARK_PKTGEN_BASE]; 307 ark->pktchkr.v = (void *)&ark->bar0[ARK_PKTCHKR_BASE]; 308 309 ark->rqpacing = 310 (struct ark_rqpace_t *)(ark->bar0 + ARK_RCPACING_BASE); 311 ark->started = 0; 312 313 PMD_DEBUG_LOG(INFO, "Sys Ctrl Const = 0x%x HW Commit_ID: %08x\n", 314 ark->sysctrl.t32[4], 315 rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4])); 316 PMD_DRV_LOG(INFO, "Arkville HW Commit_ID: %08x\n", 317 rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4])); 318 319 /* If HW sanity test fails, return an error */ 320 if (ark->sysctrl.t32[4] != 0xcafef00d) { 321 PMD_DRV_LOG(ERR, 322 "HW Sanity test has failed, expected constant" 323 " 0x%x, read 0x%x (%s)\n", 324 0xcafef00d, 325 ark->sysctrl.t32[4], __func__); 326 return -1; 327 } 328 if (ark->sysctrl.t32[3] != 0) { 329 if (ark_rqp_lasped(ark->rqpacing)) { 330 PMD_DRV_LOG(ERR, "Arkville Evaluation System - " 331 "Timer has Expired\n"); 332 return -1; 333 } 334 PMD_DRV_LOG(WARNING, "Arkville Evaluation System - " 335 "Timer is Running\n"); 336 } 337 338 PMD_DRV_LOG(INFO, 339 "HW Sanity test has PASSED, expected constant" 340 " 0x%x, read 0x%x (%s)\n", 341 0xcafef00d, ark->sysctrl.t32[4], __func__); 342 343 /* We are a single function multi-port device. */ 344 ret = ark_config_device(dev); 345 dev->dev_ops = &ark_eth_dev_ops; 346 347 dev->data->mac_addrs = rte_zmalloc("ark", ETHER_ADDR_LEN, 0); 348 if (!dev->data->mac_addrs) { 349 PMD_DRV_LOG(ERR, 350 "Failed to allocated memory for storing mac address" 351 ); 352 } 353 354 if (ark->user_ext.dev_init) { 355 ark->user_data[dev->data->port_id] = 356 ark->user_ext.dev_init(dev, ark->a_bar, 0); 357 if (!ark->user_data[dev->data->port_id]) { 358 PMD_DRV_LOG(INFO, 359 "Failed to initialize PMD extension!" 360 " continuing without it\n"); 361 memset(&ark->user_ext, 0, sizeof(struct ark_user_ext)); 362 dlclose(ark->d_handle); 363 } 364 } 365 366 if (pci_dev->device.devargs) 367 ret = eth_ark_check_args(ark, pci_dev->device.devargs->args); 368 else 369 PMD_DRV_LOG(INFO, "No Device args found\n"); 370 371 if (ret) 372 goto error; 373 /* 374 * We will create additional devices based on the number of requested 375 * ports 376 */ 377 if (ark->user_ext.dev_get_port_count) 378 port_count = 379 ark->user_ext.dev_get_port_count(dev, 380 ark->user_data[dev->data->port_id]); 381 ark->num_ports = port_count; 382 383 for (p = 0; p < port_count; p++) { 384 struct rte_eth_dev *eth_dev; 385 char name[RTE_ETH_NAME_MAX_LEN]; 386 387 snprintf(name, sizeof(name), "arketh%d", 388 dev->data->port_id + p); 389 390 if (p == 0) { 391 /* First port is already allocated by DPDK */ 392 eth_dev = ark->eth_dev; 393 continue; 394 } 395 396 /* reserve an ethdev entry */ 397 eth_dev = rte_eth_dev_allocate(name); 398 if (!eth_dev) { 399 PMD_DRV_LOG(ERR, 400 "Could not allocate eth_dev for port %d\n", 401 p); 402 goto error; 403 } 404 405 eth_dev->device = &pci_dev->device; 406 eth_dev->data->dev_private = ark; 407 eth_dev->dev_ops = ark->eth_dev->dev_ops; 408 eth_dev->tx_pkt_burst = ark->eth_dev->tx_pkt_burst; 409 eth_dev->rx_pkt_burst = ark->eth_dev->rx_pkt_burst; 410 411 rte_eth_copy_pci_info(eth_dev, pci_dev); 412 413 eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0); 414 if (!eth_dev->data->mac_addrs) { 415 PMD_DRV_LOG(ERR, 416 "Memory allocation for MAC failed!" 417 " Exiting.\n"); 418 goto error; 419 } 420 421 if (ark->user_ext.dev_init) { 422 ark->user_data[eth_dev->data->port_id] = 423 ark->user_ext.dev_init(dev, ark->a_bar, p); 424 } 425 426 rte_eth_dev_probing_finish(eth_dev); 427 } 428 429 return ret; 430 431 error: 432 if (dev->data->mac_addrs) 433 rte_free(dev->data->mac_addrs); 434 return -1; 435 } 436 437 /* 438 *Initial device configuration when device is opened 439 * setup the DDM, and UDM 440 * Called once per PCIE device 441 */ 442 static int 443 ark_config_device(struct rte_eth_dev *dev) 444 { 445 struct ark_adapter *ark = 446 (struct ark_adapter *)dev->data->dev_private; 447 uint16_t num_q, i; 448 struct ark_mpu_t *mpu; 449 450 /* 451 * Make sure that the packet director, generator and checker are in a 452 * known state 453 */ 454 ark->start_pg = 0; 455 ark->pg = ark_pktgen_init(ark->pktgen.v, 0, 1); 456 if (ark->pg == NULL) 457 return -1; 458 ark_pktgen_reset(ark->pg); 459 ark->pc = ark_pktchkr_init(ark->pktchkr.v, 0, 1); 460 if (ark->pc == NULL) 461 return -1; 462 ark_pktchkr_stop(ark->pc); 463 ark->pd = ark_pktdir_init(ark->pktdir.v); 464 if (ark->pd == NULL) 465 return -1; 466 467 /* Verify HW */ 468 if (ark_udm_verify(ark->udm.v)) 469 return -1; 470 if (ark_ddm_verify(ark->ddm.v)) 471 return -1; 472 473 /* UDM */ 474 if (ark_udm_reset(ark->udm.v)) { 475 PMD_DRV_LOG(ERR, "Unable to stop and reset UDM\n"); 476 return -1; 477 } 478 /* Keep in reset until the MPU are cleared */ 479 480 /* MPU reset */ 481 mpu = ark->mpurx.v; 482 num_q = ark_api_num_queues(mpu); 483 ark->rx_queues = num_q; 484 for (i = 0; i < num_q; i++) { 485 ark_mpu_reset(mpu); 486 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 487 } 488 489 ark_udm_stop(ark->udm.v, 0); 490 ark_udm_configure(ark->udm.v, 491 RTE_PKTMBUF_HEADROOM, 492 RTE_MBUF_DEFAULT_DATAROOM, 493 ARK_RX_WRITE_TIME_NS); 494 ark_udm_stats_reset(ark->udm.v); 495 ark_udm_stop(ark->udm.v, 0); 496 497 /* TX -- DDM */ 498 if (ark_ddm_stop(ark->ddm.v, 1)) 499 PMD_DRV_LOG(ERR, "Unable to stop DDM\n"); 500 501 mpu = ark->mputx.v; 502 num_q = ark_api_num_queues(mpu); 503 ark->tx_queues = num_q; 504 for (i = 0; i < num_q; i++) { 505 ark_mpu_reset(mpu); 506 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 507 } 508 509 ark_ddm_reset(ark->ddm.v); 510 ark_ddm_stats_reset(ark->ddm.v); 511 512 ark_ddm_stop(ark->ddm.v, 0); 513 ark_rqp_stats_reset(ark->rqpacing); 514 515 return 0; 516 } 517 518 static int 519 eth_ark_dev_uninit(struct rte_eth_dev *dev) 520 { 521 struct ark_adapter *ark = 522 (struct ark_adapter *)dev->data->dev_private; 523 524 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 525 return 0; 526 527 if (ark->user_ext.dev_uninit) 528 ark->user_ext.dev_uninit(dev, 529 ark->user_data[dev->data->port_id]); 530 531 ark_pktgen_uninit(ark->pg); 532 ark_pktchkr_uninit(ark->pc); 533 534 dev->dev_ops = NULL; 535 dev->rx_pkt_burst = NULL; 536 dev->tx_pkt_burst = NULL; 537 rte_free(dev->data->mac_addrs); 538 return 0; 539 } 540 541 static int 542 eth_ark_dev_configure(struct rte_eth_dev *dev) 543 { 544 PMD_FUNC_LOG(DEBUG, "\n"); 545 struct ark_adapter *ark = 546 (struct ark_adapter *)dev->data->dev_private; 547 548 eth_ark_dev_set_link_up(dev); 549 if (ark->user_ext.dev_configure) 550 return ark->user_ext.dev_configure(dev, 551 ark->user_data[dev->data->port_id]); 552 return 0; 553 } 554 555 static void * 556 delay_pg_start(void *arg) 557 { 558 struct ark_adapter *ark = (struct ark_adapter *)arg; 559 560 /* This function is used exclusively for regression testing, We 561 * perform a blind sleep here to ensure that the external test 562 * application has time to setup the test before we generate packets 563 */ 564 usleep(100000); 565 ark_pktgen_run(ark->pg); 566 return NULL; 567 } 568 569 static int 570 eth_ark_dev_start(struct rte_eth_dev *dev) 571 { 572 struct ark_adapter *ark = 573 (struct ark_adapter *)dev->data->dev_private; 574 int i; 575 576 PMD_FUNC_LOG(DEBUG, "\n"); 577 578 /* RX Side */ 579 /* start UDM */ 580 ark_udm_start(ark->udm.v); 581 582 for (i = 0; i < dev->data->nb_rx_queues; i++) 583 eth_ark_rx_start_queue(dev, i); 584 585 /* TX Side */ 586 for (i = 0; i < dev->data->nb_tx_queues; i++) 587 eth_ark_tx_queue_start(dev, i); 588 589 /* start DDM */ 590 ark_ddm_start(ark->ddm.v); 591 592 ark->started = 1; 593 /* set xmit and receive function */ 594 dev->rx_pkt_burst = ð_ark_recv_pkts; 595 dev->tx_pkt_burst = ð_ark_xmit_pkts; 596 597 if (ark->start_pg) 598 ark_pktchkr_run(ark->pc); 599 600 if (ark->start_pg && (dev->data->port_id == 0)) { 601 pthread_t thread; 602 603 /* Delay packet generatpr start allow the hardware to be ready 604 * This is only used for sanity checking with internal generator 605 */ 606 if (pthread_create(&thread, NULL, delay_pg_start, ark)) { 607 PMD_DRV_LOG(ERR, "Could not create pktgen " 608 "starter thread\n"); 609 return -1; 610 } 611 } 612 613 if (ark->user_ext.dev_start) 614 ark->user_ext.dev_start(dev, 615 ark->user_data[dev->data->port_id]); 616 617 return 0; 618 } 619 620 static void 621 eth_ark_dev_stop(struct rte_eth_dev *dev) 622 { 623 uint16_t i; 624 int status; 625 struct ark_adapter *ark = 626 (struct ark_adapter *)dev->data->dev_private; 627 struct ark_mpu_t *mpu; 628 629 PMD_FUNC_LOG(DEBUG, "\n"); 630 631 if (ark->started == 0) 632 return; 633 ark->started = 0; 634 635 /* Stop the extension first */ 636 if (ark->user_ext.dev_stop) 637 ark->user_ext.dev_stop(dev, 638 ark->user_data[dev->data->port_id]); 639 640 /* Stop the packet generator */ 641 if (ark->start_pg) 642 ark_pktgen_pause(ark->pg); 643 644 dev->rx_pkt_burst = ð_ark_recv_pkts_noop; 645 dev->tx_pkt_burst = ð_ark_xmit_pkts_noop; 646 647 /* STOP TX Side */ 648 for (i = 0; i < dev->data->nb_tx_queues; i++) { 649 status = eth_ark_tx_queue_stop(dev, i); 650 if (status != 0) { 651 uint16_t port = dev->data->port_id; 652 PMD_DRV_LOG(ERR, 653 "tx_queue stop anomaly" 654 " port %u, queue %u\n", 655 port, i); 656 } 657 } 658 659 /* Stop DDM */ 660 /* Wait up to 0.1 second. each stop is up to 1000 * 10 useconds */ 661 for (i = 0; i < 10; i++) { 662 status = ark_ddm_stop(ark->ddm.v, 1); 663 if (status == 0) 664 break; 665 } 666 if (status || i != 0) { 667 PMD_DRV_LOG(ERR, "DDM stop anomaly. status:" 668 " %d iter: %u. (%s)\n", 669 status, 670 i, 671 __func__); 672 ark_ddm_dump(ark->ddm.v, "Stop anomaly"); 673 674 mpu = ark->mputx.v; 675 for (i = 0; i < ark->tx_queues; i++) { 676 ark_mpu_dump(mpu, "DDM failure dump", i); 677 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 678 } 679 } 680 681 /* STOP RX Side */ 682 /* Stop UDM multiple tries attempted */ 683 for (i = 0; i < 10; i++) { 684 status = ark_udm_stop(ark->udm.v, 1); 685 if (status == 0) 686 break; 687 } 688 if (status || i != 0) { 689 PMD_DRV_LOG(ERR, "UDM stop anomaly. status %d iter: %u. (%s)\n", 690 status, i, __func__); 691 ark_udm_dump(ark->udm.v, "Stop anomaly"); 692 693 mpu = ark->mpurx.v; 694 for (i = 0; i < ark->rx_queues; i++) { 695 ark_mpu_dump(mpu, "UDM Stop anomaly", i); 696 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 697 } 698 } 699 700 ark_udm_dump_stats(ark->udm.v, "Post stop"); 701 ark_udm_dump_perf(ark->udm.v, "Post stop"); 702 703 for (i = 0; i < dev->data->nb_rx_queues; i++) 704 eth_ark_rx_dump_queue(dev, i, __func__); 705 706 /* Stop the packet checker if it is running */ 707 if (ark->start_pg) { 708 ark_pktchkr_dump_stats(ark->pc); 709 ark_pktchkr_stop(ark->pc); 710 } 711 } 712 713 static void 714 eth_ark_dev_close(struct rte_eth_dev *dev) 715 { 716 struct ark_adapter *ark = 717 (struct ark_adapter *)dev->data->dev_private; 718 uint16_t i; 719 720 if (ark->user_ext.dev_close) 721 ark->user_ext.dev_close(dev, 722 ark->user_data[dev->data->port_id]); 723 724 eth_ark_dev_stop(dev); 725 eth_ark_udm_force_close(dev); 726 727 /* 728 * TODO This should only be called once for the device during shutdown 729 */ 730 ark_rqp_dump(ark->rqpacing); 731 732 for (i = 0; i < dev->data->nb_tx_queues; i++) { 733 eth_ark_tx_queue_release(dev->data->tx_queues[i]); 734 dev->data->tx_queues[i] = 0; 735 } 736 737 for (i = 0; i < dev->data->nb_rx_queues; i++) { 738 eth_ark_dev_rx_queue_release(dev->data->rx_queues[i]); 739 dev->data->rx_queues[i] = 0; 740 } 741 } 742 743 static void 744 eth_ark_dev_info_get(struct rte_eth_dev *dev, 745 struct rte_eth_dev_info *dev_info) 746 { 747 struct ark_adapter *ark = 748 (struct ark_adapter *)dev->data->dev_private; 749 struct ark_mpu_t *tx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_TX_BASE); 750 struct ark_mpu_t *rx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_RX_BASE); 751 uint16_t ports = ark->num_ports; 752 753 dev_info->max_rx_pktlen = ARK_RX_MAX_PKT_LEN; 754 dev_info->min_rx_bufsize = ARK_RX_MIN_BUFSIZE; 755 756 dev_info->max_rx_queues = ark_api_num_queues_per_port(rx_mpu, ports); 757 dev_info->max_tx_queues = ark_api_num_queues_per_port(tx_mpu, ports); 758 759 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 760 .nb_max = ARK_RX_MAX_QUEUE, 761 .nb_min = ARK_RX_MIN_QUEUE, 762 .nb_align = ARK_RX_MIN_QUEUE}; /* power of 2 */ 763 764 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 765 .nb_max = ARK_TX_MAX_QUEUE, 766 .nb_min = ARK_TX_MIN_QUEUE, 767 .nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */ 768 769 /* ARK PMD supports all line rates, how do we indicate that here ?? */ 770 dev_info->speed_capa = (ETH_LINK_SPEED_1G | 771 ETH_LINK_SPEED_10G | 772 ETH_LINK_SPEED_25G | 773 ETH_LINK_SPEED_40G | 774 ETH_LINK_SPEED_50G | 775 ETH_LINK_SPEED_100G); 776 } 777 778 static int 779 eth_ark_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 780 { 781 PMD_DEBUG_LOG(DEBUG, "link status = %d\n", 782 dev->data->dev_link.link_status); 783 struct ark_adapter *ark = 784 (struct ark_adapter *)dev->data->dev_private; 785 786 if (ark->user_ext.link_update) { 787 return ark->user_ext.link_update 788 (dev, wait_to_complete, 789 ark->user_data[dev->data->port_id]); 790 } 791 return 0; 792 } 793 794 static int 795 eth_ark_dev_set_link_up(struct rte_eth_dev *dev) 796 { 797 dev->data->dev_link.link_status = 1; 798 struct ark_adapter *ark = 799 (struct ark_adapter *)dev->data->dev_private; 800 801 if (ark->user_ext.dev_set_link_up) 802 return ark->user_ext.dev_set_link_up(dev, 803 ark->user_data[dev->data->port_id]); 804 return 0; 805 } 806 807 static int 808 eth_ark_dev_set_link_down(struct rte_eth_dev *dev) 809 { 810 dev->data->dev_link.link_status = 0; 811 struct ark_adapter *ark = 812 (struct ark_adapter *)dev->data->dev_private; 813 814 if (ark->user_ext.dev_set_link_down) 815 return ark->user_ext.dev_set_link_down(dev, 816 ark->user_data[dev->data->port_id]); 817 return 0; 818 } 819 820 static int 821 eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 822 { 823 uint16_t i; 824 struct ark_adapter *ark = 825 (struct ark_adapter *)dev->data->dev_private; 826 827 stats->ipackets = 0; 828 stats->ibytes = 0; 829 stats->opackets = 0; 830 stats->obytes = 0; 831 stats->imissed = 0; 832 stats->oerrors = 0; 833 834 for (i = 0; i < dev->data->nb_tx_queues; i++) 835 eth_tx_queue_stats_get(dev->data->tx_queues[i], stats); 836 for (i = 0; i < dev->data->nb_rx_queues; i++) 837 eth_rx_queue_stats_get(dev->data->rx_queues[i], stats); 838 if (ark->user_ext.stats_get) 839 return ark->user_ext.stats_get(dev, stats, 840 ark->user_data[dev->data->port_id]); 841 return 0; 842 } 843 844 static void 845 eth_ark_dev_stats_reset(struct rte_eth_dev *dev) 846 { 847 uint16_t i; 848 struct ark_adapter *ark = 849 (struct ark_adapter *)dev->data->dev_private; 850 851 for (i = 0; i < dev->data->nb_tx_queues; i++) 852 eth_tx_queue_stats_reset(dev->data->tx_queues[i]); 853 for (i = 0; i < dev->data->nb_rx_queues; i++) 854 eth_rx_queue_stats_reset(dev->data->rx_queues[i]); 855 if (ark->user_ext.stats_reset) 856 ark->user_ext.stats_reset(dev, 857 ark->user_data[dev->data->port_id]); 858 } 859 860 static int 861 eth_ark_macaddr_add(struct rte_eth_dev *dev, 862 struct ether_addr *mac_addr, 863 uint32_t index, 864 uint32_t pool) 865 { 866 struct ark_adapter *ark = 867 (struct ark_adapter *)dev->data->dev_private; 868 869 if (ark->user_ext.mac_addr_add) { 870 ark->user_ext.mac_addr_add(dev, 871 mac_addr, 872 index, 873 pool, 874 ark->user_data[dev->data->port_id]); 875 return 0; 876 } 877 return -ENOTSUP; 878 } 879 880 static void 881 eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) 882 { 883 struct ark_adapter *ark = 884 (struct ark_adapter *)dev->data->dev_private; 885 886 if (ark->user_ext.mac_addr_remove) 887 ark->user_ext.mac_addr_remove(dev, index, 888 ark->user_data[dev->data->port_id]); 889 } 890 891 static int 892 eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, 893 struct ether_addr *mac_addr) 894 { 895 struct ark_adapter *ark = 896 (struct ark_adapter *)dev->data->dev_private; 897 898 if (ark->user_ext.mac_addr_set) { 899 ark->user_ext.mac_addr_set(dev, mac_addr, 900 ark->user_data[dev->data->port_id]); 901 return 0; 902 } 903 return -ENOTSUP; 904 } 905 906 static int 907 eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size) 908 { 909 struct ark_adapter *ark = 910 (struct ark_adapter *)dev->data->dev_private; 911 912 if (ark->user_ext.set_mtu) 913 return ark->user_ext.set_mtu(dev, size, 914 ark->user_data[dev->data->port_id]); 915 916 return -ENOTSUP; 917 } 918 919 static inline int 920 process_pktdir_arg(const char *key, const char *value, 921 void *extra_args) 922 { 923 PMD_FUNC_LOG(DEBUG, "key = %s, value = %s\n", 924 key, value); 925 struct ark_adapter *ark = 926 (struct ark_adapter *)extra_args; 927 928 ark->pkt_dir_v = strtol(value, NULL, 16); 929 PMD_FUNC_LOG(DEBUG, "pkt_dir_v = 0x%x\n", ark->pkt_dir_v); 930 return 0; 931 } 932 933 static inline int 934 process_file_args(const char *key, const char *value, void *extra_args) 935 { 936 PMD_FUNC_LOG(DEBUG, "key = %s, value = %s\n", 937 key, value); 938 char *args = (char *)extra_args; 939 940 /* Open the configuration file */ 941 FILE *file = fopen(value, "r"); 942 char line[ARK_MAX_ARG_LEN]; 943 int size = 0; 944 int first = 1; 945 946 if (file == NULL) { 947 PMD_DRV_LOG(ERR, "Unable to open " 948 "config file %s\n", value); 949 return -1; 950 } 951 952 while (fgets(line, sizeof(line), file)) { 953 size += strlen(line); 954 if (size >= ARK_MAX_ARG_LEN) { 955 PMD_DRV_LOG(ERR, "Unable to parse file %s args, " 956 "parameter list is too long\n", value); 957 fclose(file); 958 return -1; 959 } 960 if (first) { 961 strncpy(args, line, ARK_MAX_ARG_LEN); 962 first = 0; 963 } else { 964 strncat(args, line, ARK_MAX_ARG_LEN); 965 } 966 } 967 PMD_FUNC_LOG(DEBUG, "file = %s\n", args); 968 fclose(file); 969 return 0; 970 } 971 972 static int 973 eth_ark_check_args(struct ark_adapter *ark, const char *params) 974 { 975 struct rte_kvargs *kvlist; 976 unsigned int k_idx; 977 struct rte_kvargs_pair *pair = NULL; 978 int ret = -1; 979 980 kvlist = rte_kvargs_parse(params, valid_arguments); 981 if (kvlist == NULL) 982 return 0; 983 984 ark->pkt_gen_args[0] = 0; 985 ark->pkt_chkr_args[0] = 0; 986 987 for (k_idx = 0; k_idx < kvlist->count; k_idx++) { 988 pair = &kvlist->pairs[k_idx]; 989 PMD_FUNC_LOG(DEBUG, "**** Arg passed to PMD = %s:%s\n", 990 pair->key, 991 pair->value); 992 } 993 994 if (rte_kvargs_process(kvlist, 995 ARK_PKTDIR_ARG, 996 &process_pktdir_arg, 997 ark) != 0) { 998 PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTDIR_ARG); 999 goto free_kvlist; 1000 } 1001 1002 if (rte_kvargs_process(kvlist, 1003 ARK_PKTGEN_ARG, 1004 &process_file_args, 1005 ark->pkt_gen_args) != 0) { 1006 PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTGEN_ARG); 1007 goto free_kvlist; 1008 } 1009 1010 if (rte_kvargs_process(kvlist, 1011 ARK_PKTCHKR_ARG, 1012 &process_file_args, 1013 ark->pkt_chkr_args) != 0) { 1014 PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTCHKR_ARG); 1015 goto free_kvlist; 1016 } 1017 1018 PMD_DRV_LOG(INFO, "packet director set to 0x%x\n", ark->pkt_dir_v); 1019 /* Setup the packet director */ 1020 ark_pktdir_setup(ark->pd, ark->pkt_dir_v); 1021 1022 /* Setup the packet generator */ 1023 if (ark->pkt_gen_args[0]) { 1024 PMD_DRV_LOG(INFO, "Setting up the packet generator\n"); 1025 ark_pktgen_parse(ark->pkt_gen_args); 1026 ark_pktgen_reset(ark->pg); 1027 ark_pktgen_setup(ark->pg); 1028 ark->start_pg = 1; 1029 } 1030 1031 /* Setup the packet checker */ 1032 if (ark->pkt_chkr_args[0]) { 1033 ark_pktchkr_parse(ark->pkt_chkr_args); 1034 ark_pktchkr_setup(ark->pc); 1035 } 1036 1037 ret = 0; 1038 1039 free_kvlist: 1040 rte_kvargs_free(kvlist); 1041 1042 return ret; 1043 } 1044 1045 RTE_PMD_REGISTER_PCI(net_ark, rte_ark_pmd); 1046 RTE_PMD_REGISTER_KMOD_DEP(net_ark, "* igb_uio | uio_pci_generic "); 1047 RTE_PMD_REGISTER_PCI_TABLE(net_ark, pci_id_ark_map); 1048 RTE_PMD_REGISTER_PARAM_STRING(net_ark, 1049 ARK_PKTGEN_ARG "=<filename> " 1050 ARK_PKTCHKR_ARG "=<filename> " 1051 ARK_PKTDIR_ARG "=<bitmap>"); 1052