1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2015-2017 Atomic Rules LLC 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of copyright holder nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <unistd.h> 35 #include <sys/stat.h> 36 #include <dlfcn.h> 37 38 #include <rte_ethdev_pci.h> 39 #include <rte_kvargs.h> 40 41 #include "ark_global.h" 42 #include "ark_logs.h" 43 #include "ark_ethdev_tx.h" 44 #include "ark_ethdev_rx.h" 45 #include "ark_mpu.h" 46 #include "ark_ddm.h" 47 #include "ark_udm.h" 48 #include "ark_rqp.h" 49 #include "ark_pktdir.h" 50 #include "ark_pktgen.h" 51 #include "ark_pktchkr.h" 52 53 /* Internal prototypes */ 54 static int eth_ark_check_args(struct ark_adapter *ark, const char *params); 55 static int eth_ark_dev_init(struct rte_eth_dev *dev); 56 static int ark_config_device(struct rte_eth_dev *dev); 57 static int eth_ark_dev_uninit(struct rte_eth_dev *eth_dev); 58 static int eth_ark_dev_configure(struct rte_eth_dev *dev); 59 static int eth_ark_dev_start(struct rte_eth_dev *dev); 60 static void eth_ark_dev_stop(struct rte_eth_dev *dev); 61 static void eth_ark_dev_close(struct rte_eth_dev *dev); 62 static void eth_ark_dev_info_get(struct rte_eth_dev *dev, 63 struct rte_eth_dev_info *dev_info); 64 static int eth_ark_dev_link_update(struct rte_eth_dev *dev, 65 int wait_to_complete); 66 static int eth_ark_dev_set_link_up(struct rte_eth_dev *dev); 67 static int eth_ark_dev_set_link_down(struct rte_eth_dev *dev); 68 static void eth_ark_dev_stats_get(struct rte_eth_dev *dev, 69 struct rte_eth_stats *stats); 70 static void eth_ark_dev_stats_reset(struct rte_eth_dev *dev); 71 static void eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, 72 struct ether_addr *mac_addr); 73 static int eth_ark_macaddr_add(struct rte_eth_dev *dev, 74 struct ether_addr *mac_addr, 75 uint32_t index, 76 uint32_t pool); 77 static void eth_ark_macaddr_remove(struct rte_eth_dev *dev, 78 uint32_t index); 79 static int eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size); 80 81 /* 82 * The packet generator is a functional block used to generate packet 83 * patterns for testing. It is not intended for nominal use. 84 */ 85 #define ARK_PKTGEN_ARG "Pkt_gen" 86 87 /* 88 * The packet checker is a functional block used to verify packet 89 * patterns for testing. It is not intended for nominal use. 90 */ 91 #define ARK_PKTCHKR_ARG "Pkt_chkr" 92 93 /* 94 * The packet director is used to select the internal ingress and 95 * egress packets paths during testing. It is not intended for 96 * nominal use. 97 */ 98 #define ARK_PKTDIR_ARG "Pkt_dir" 99 100 /* Devinfo configurations */ 101 #define ARK_RX_MAX_QUEUE (4096 * 4) 102 #define ARK_RX_MIN_QUEUE (512) 103 #define ARK_RX_MAX_PKT_LEN ((16 * 1024) - 128) 104 #define ARK_RX_MIN_BUFSIZE (1024) 105 106 #define ARK_TX_MAX_QUEUE (4096 * 4) 107 #define ARK_TX_MIN_QUEUE (256) 108 109 static const char * const valid_arguments[] = { 110 ARK_PKTGEN_ARG, 111 ARK_PKTCHKR_ARG, 112 ARK_PKTDIR_ARG, 113 NULL 114 }; 115 116 static const struct rte_pci_id pci_id_ark_map[] = { 117 {RTE_PCI_DEVICE(0x1d6c, 0x100d)}, 118 {RTE_PCI_DEVICE(0x1d6c, 0x100e)}, 119 {.vendor_id = 0, /* sentinel */ }, 120 }; 121 122 static int 123 eth_ark_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 124 struct rte_pci_device *pci_dev) 125 { 126 struct rte_eth_dev *eth_dev; 127 int ret; 128 129 eth_dev = rte_eth_dev_pci_allocate(pci_dev, sizeof(struct ark_adapter)); 130 131 if (eth_dev == NULL) 132 return -ENOMEM; 133 134 ret = eth_ark_dev_init(eth_dev); 135 if (ret) 136 rte_eth_dev_pci_release(eth_dev); 137 138 return ret; 139 } 140 141 static int 142 eth_ark_pci_remove(struct rte_pci_device *pci_dev) 143 { 144 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ark_dev_uninit); 145 } 146 147 static struct rte_pci_driver rte_ark_pmd = { 148 .id_table = pci_id_ark_map, 149 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 150 .probe = eth_ark_pci_probe, 151 .remove = eth_ark_pci_remove, 152 }; 153 154 static const struct eth_dev_ops ark_eth_dev_ops = { 155 .dev_configure = eth_ark_dev_configure, 156 .dev_start = eth_ark_dev_start, 157 .dev_stop = eth_ark_dev_stop, 158 .dev_close = eth_ark_dev_close, 159 160 .dev_infos_get = eth_ark_dev_info_get, 161 162 .rx_queue_setup = eth_ark_dev_rx_queue_setup, 163 .rx_queue_count = eth_ark_dev_rx_queue_count, 164 .tx_queue_setup = eth_ark_tx_queue_setup, 165 166 .link_update = eth_ark_dev_link_update, 167 .dev_set_link_up = eth_ark_dev_set_link_up, 168 .dev_set_link_down = eth_ark_dev_set_link_down, 169 170 .rx_queue_start = eth_ark_rx_start_queue, 171 .rx_queue_stop = eth_ark_rx_stop_queue, 172 173 .tx_queue_start = eth_ark_tx_queue_start, 174 .tx_queue_stop = eth_ark_tx_queue_stop, 175 176 .stats_get = eth_ark_dev_stats_get, 177 .stats_reset = eth_ark_dev_stats_reset, 178 179 .mac_addr_add = eth_ark_macaddr_add, 180 .mac_addr_remove = eth_ark_macaddr_remove, 181 .mac_addr_set = eth_ark_set_default_mac_addr, 182 183 .mtu_set = eth_ark_set_mtu, 184 }; 185 186 static int 187 check_for_ext(struct ark_adapter *ark) 188 { 189 int found = 0; 190 191 /* Get the env */ 192 const char *dllpath = getenv("ARK_EXT_PATH"); 193 194 if (dllpath == NULL) { 195 PMD_DEBUG_LOG(DEBUG, "ARK EXT NO dll path specified\n"); 196 return 0; 197 } 198 PMD_DRV_LOG(INFO, "ARK EXT found dll path at %s\n", dllpath); 199 200 /* Open and load the .so */ 201 ark->d_handle = dlopen(dllpath, RTLD_LOCAL | RTLD_LAZY); 202 if (ark->d_handle == NULL) { 203 PMD_DRV_LOG(ERR, "Could not load user extension %s\n", 204 dllpath); 205 return -1; 206 } 207 PMD_DRV_LOG(INFO, "SUCCESS: loaded user extension %s\n", 208 dllpath); 209 210 /* Get the entry points */ 211 ark->user_ext.dev_init = 212 (void *(*)(struct rte_eth_dev *, void *, int)) 213 dlsym(ark->d_handle, "dev_init"); 214 PMD_DEBUG_LOG(DEBUG, "device ext init pointer = %p\n", 215 ark->user_ext.dev_init); 216 ark->user_ext.dev_get_port_count = 217 (int (*)(struct rte_eth_dev *, void *)) 218 dlsym(ark->d_handle, "dev_get_port_count"); 219 ark->user_ext.dev_uninit = 220 (void (*)(struct rte_eth_dev *, void *)) 221 dlsym(ark->d_handle, "dev_uninit"); 222 ark->user_ext.dev_configure = 223 (int (*)(struct rte_eth_dev *, void *)) 224 dlsym(ark->d_handle, "dev_configure"); 225 ark->user_ext.dev_start = 226 (int (*)(struct rte_eth_dev *, void *)) 227 dlsym(ark->d_handle, "dev_start"); 228 ark->user_ext.dev_stop = 229 (void (*)(struct rte_eth_dev *, void *)) 230 dlsym(ark->d_handle, "dev_stop"); 231 ark->user_ext.dev_close = 232 (void (*)(struct rte_eth_dev *, void *)) 233 dlsym(ark->d_handle, "dev_close"); 234 ark->user_ext.link_update = 235 (int (*)(struct rte_eth_dev *, int, void *)) 236 dlsym(ark->d_handle, "link_update"); 237 ark->user_ext.dev_set_link_up = 238 (int (*)(struct rte_eth_dev *, void *)) 239 dlsym(ark->d_handle, "dev_set_link_up"); 240 ark->user_ext.dev_set_link_down = 241 (int (*)(struct rte_eth_dev *, void *)) 242 dlsym(ark->d_handle, "dev_set_link_down"); 243 ark->user_ext.stats_get = 244 (void (*)(struct rte_eth_dev *, struct rte_eth_stats *, 245 void *)) 246 dlsym(ark->d_handle, "stats_get"); 247 ark->user_ext.stats_reset = 248 (void (*)(struct rte_eth_dev *, void *)) 249 dlsym(ark->d_handle, "stats_reset"); 250 ark->user_ext.mac_addr_add = 251 (void (*)(struct rte_eth_dev *, struct ether_addr *, uint32_t, 252 uint32_t, void *)) 253 dlsym(ark->d_handle, "mac_addr_add"); 254 ark->user_ext.mac_addr_remove = 255 (void (*)(struct rte_eth_dev *, uint32_t, void *)) 256 dlsym(ark->d_handle, "mac_addr_remove"); 257 ark->user_ext.mac_addr_set = 258 (void (*)(struct rte_eth_dev *, struct ether_addr *, 259 void *)) 260 dlsym(ark->d_handle, "mac_addr_set"); 261 ark->user_ext.set_mtu = 262 (int (*)(struct rte_eth_dev *, uint16_t, 263 void *)) 264 dlsym(ark->d_handle, "set_mtu"); 265 266 return found; 267 } 268 269 static int 270 eth_ark_dev_init(struct rte_eth_dev *dev) 271 { 272 struct ark_adapter *ark = 273 (struct ark_adapter *)dev->data->dev_private; 274 struct rte_pci_device *pci_dev; 275 int ret; 276 int port_count = 1; 277 int p; 278 279 ark->eth_dev = dev; 280 281 PMD_FUNC_LOG(DEBUG, "\n"); 282 283 /* Check to see if there is an extension that we need to load */ 284 ret = check_for_ext(ark); 285 if (ret) 286 return ret; 287 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 288 rte_eth_copy_pci_info(dev, pci_dev); 289 290 /* Use dummy function until setup */ 291 dev->rx_pkt_burst = ð_ark_recv_pkts_noop; 292 dev->tx_pkt_burst = ð_ark_xmit_pkts_noop; 293 294 ark->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr; 295 ark->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr; 296 297 ark->sysctrl.v = (void *)&ark->bar0[ARK_SYSCTRL_BASE]; 298 ark->mpurx.v = (void *)&ark->bar0[ARK_MPU_RX_BASE]; 299 ark->udm.v = (void *)&ark->bar0[ARK_UDM_BASE]; 300 ark->mputx.v = (void *)&ark->bar0[ARK_MPU_TX_BASE]; 301 ark->ddm.v = (void *)&ark->bar0[ARK_DDM_BASE]; 302 ark->cmac.v = (void *)&ark->bar0[ARK_CMAC_BASE]; 303 ark->external.v = (void *)&ark->bar0[ARK_EXTERNAL_BASE]; 304 ark->pktdir.v = (void *)&ark->bar0[ARK_PKTDIR_BASE]; 305 ark->pktgen.v = (void *)&ark->bar0[ARK_PKTGEN_BASE]; 306 ark->pktchkr.v = (void *)&ark->bar0[ARK_PKTCHKR_BASE]; 307 308 ark->rqpacing = 309 (struct ark_rqpace_t *)(ark->bar0 + ARK_RCPACING_BASE); 310 ark->started = 0; 311 312 PMD_DEBUG_LOG(INFO, "Sys Ctrl Const = 0x%x HW Commit_ID: %08x\n", 313 ark->sysctrl.t32[4], 314 rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4])); 315 PMD_DRV_LOG(INFO, "Arkville HW Commit_ID: %08x\n", 316 rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4])); 317 318 /* If HW sanity test fails, return an error */ 319 if (ark->sysctrl.t32[4] != 0xcafef00d) { 320 PMD_DRV_LOG(ERR, 321 "HW Sanity test has failed, expected constant" 322 " 0x%x, read 0x%x (%s)\n", 323 0xcafef00d, 324 ark->sysctrl.t32[4], __func__); 325 return -1; 326 } 327 if (ark->sysctrl.t32[3] != 0) { 328 if (ark_rqp_lasped(ark->rqpacing)) { 329 PMD_DRV_LOG(ERR, "Arkville Evaluation System - " 330 "Timer has Expired\n"); 331 return -1; 332 } 333 PMD_DRV_LOG(WARNING, "Arkville Evaluation System - " 334 "Timer is Running\n"); 335 } 336 337 PMD_DRV_LOG(INFO, 338 "HW Sanity test has PASSED, expected constant" 339 " 0x%x, read 0x%x (%s)\n", 340 0xcafef00d, ark->sysctrl.t32[4], __func__); 341 342 /* We are a single function multi-port device. */ 343 ret = ark_config_device(dev); 344 dev->dev_ops = &ark_eth_dev_ops; 345 dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; 346 347 dev->data->mac_addrs = rte_zmalloc("ark", ETHER_ADDR_LEN, 0); 348 if (!dev->data->mac_addrs) { 349 PMD_DRV_LOG(ERR, 350 "Failed to allocated memory for storing mac address" 351 ); 352 } 353 354 if (ark->user_ext.dev_init) { 355 ark->user_data[dev->data->port_id] = 356 ark->user_ext.dev_init(dev, ark->a_bar, 0); 357 if (!ark->user_data[dev->data->port_id]) { 358 PMD_DRV_LOG(INFO, 359 "Failed to initialize PMD extension!" 360 " continuing without it\n"); 361 memset(&ark->user_ext, 0, sizeof(struct ark_user_ext)); 362 dlclose(ark->d_handle); 363 } 364 } 365 366 if (pci_dev->device.devargs) 367 ret = eth_ark_check_args(ark, pci_dev->device.devargs->args); 368 else 369 PMD_DRV_LOG(INFO, "No Device args found\n"); 370 371 if (ret) 372 goto error; 373 /* 374 * We will create additional devices based on the number of requested 375 * ports 376 */ 377 if (ark->user_ext.dev_get_port_count) 378 port_count = 379 ark->user_ext.dev_get_port_count(dev, 380 ark->user_data[dev->data->port_id]); 381 ark->num_ports = port_count; 382 383 for (p = 0; p < port_count; p++) { 384 struct rte_eth_dev *eth_dev; 385 char name[RTE_ETH_NAME_MAX_LEN]; 386 387 snprintf(name, sizeof(name), "arketh%d", 388 dev->data->port_id + p); 389 390 if (p == 0) { 391 /* First port is already allocated by DPDK */ 392 eth_dev = ark->eth_dev; 393 continue; 394 } 395 396 /* reserve an ethdev entry */ 397 eth_dev = rte_eth_dev_allocate(name); 398 if (!eth_dev) { 399 PMD_DRV_LOG(ERR, 400 "Could not allocate eth_dev for port %d\n", 401 p); 402 goto error; 403 } 404 405 eth_dev->device = &pci_dev->device; 406 eth_dev->data->dev_private = ark; 407 eth_dev->dev_ops = ark->eth_dev->dev_ops; 408 eth_dev->tx_pkt_burst = ark->eth_dev->tx_pkt_burst; 409 eth_dev->rx_pkt_burst = ark->eth_dev->rx_pkt_burst; 410 411 rte_eth_copy_pci_info(eth_dev, pci_dev); 412 413 eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0); 414 if (!eth_dev->data->mac_addrs) { 415 PMD_DRV_LOG(ERR, 416 "Memory allocation for MAC failed!" 417 " Exiting.\n"); 418 goto error; 419 } 420 421 if (ark->user_ext.dev_init) { 422 ark->user_data[eth_dev->data->port_id] = 423 ark->user_ext.dev_init(dev, ark->a_bar, p); 424 } 425 } 426 427 return ret; 428 429 error: 430 if (dev->data->mac_addrs) 431 rte_free(dev->data->mac_addrs); 432 return -1; 433 } 434 435 /* 436 *Initial device configuration when device is opened 437 * setup the DDM, and UDM 438 * Called once per PCIE device 439 */ 440 static int 441 ark_config_device(struct rte_eth_dev *dev) 442 { 443 struct ark_adapter *ark = 444 (struct ark_adapter *)dev->data->dev_private; 445 uint16_t num_q, i; 446 struct ark_mpu_t *mpu; 447 448 /* 449 * Make sure that the packet director, generator and checker are in a 450 * known state 451 */ 452 ark->start_pg = 0; 453 ark->pg = ark_pktgen_init(ark->pktgen.v, 0, 1); 454 if (ark->pg == NULL) 455 return -1; 456 ark_pktgen_reset(ark->pg); 457 ark->pc = ark_pktchkr_init(ark->pktchkr.v, 0, 1); 458 if (ark->pc == NULL) 459 return -1; 460 ark_pktchkr_stop(ark->pc); 461 ark->pd = ark_pktdir_init(ark->pktdir.v); 462 if (ark->pd == NULL) 463 return -1; 464 465 /* Verify HW */ 466 if (ark_udm_verify(ark->udm.v)) 467 return -1; 468 if (ark_ddm_verify(ark->ddm.v)) 469 return -1; 470 471 /* UDM */ 472 if (ark_udm_reset(ark->udm.v)) { 473 PMD_DRV_LOG(ERR, "Unable to stop and reset UDM\n"); 474 return -1; 475 } 476 /* Keep in reset until the MPU are cleared */ 477 478 /* MPU reset */ 479 mpu = ark->mpurx.v; 480 num_q = ark_api_num_queues(mpu); 481 ark->rx_queues = num_q; 482 for (i = 0; i < num_q; i++) { 483 ark_mpu_reset(mpu); 484 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 485 } 486 487 ark_udm_stop(ark->udm.v, 0); 488 ark_udm_configure(ark->udm.v, 489 RTE_PKTMBUF_HEADROOM, 490 RTE_MBUF_DEFAULT_DATAROOM, 491 ARK_RX_WRITE_TIME_NS); 492 ark_udm_stats_reset(ark->udm.v); 493 ark_udm_stop(ark->udm.v, 0); 494 495 /* TX -- DDM */ 496 if (ark_ddm_stop(ark->ddm.v, 1)) 497 PMD_DRV_LOG(ERR, "Unable to stop DDM\n"); 498 499 mpu = ark->mputx.v; 500 num_q = ark_api_num_queues(mpu); 501 ark->tx_queues = num_q; 502 for (i = 0; i < num_q; i++) { 503 ark_mpu_reset(mpu); 504 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 505 } 506 507 ark_ddm_reset(ark->ddm.v); 508 ark_ddm_stats_reset(ark->ddm.v); 509 510 ark_ddm_stop(ark->ddm.v, 0); 511 ark_rqp_stats_reset(ark->rqpacing); 512 513 return 0; 514 } 515 516 static int 517 eth_ark_dev_uninit(struct rte_eth_dev *dev) 518 { 519 struct ark_adapter *ark = 520 (struct ark_adapter *)dev->data->dev_private; 521 522 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 523 return 0; 524 525 if (ark->user_ext.dev_uninit) 526 ark->user_ext.dev_uninit(dev, 527 ark->user_data[dev->data->port_id]); 528 529 ark_pktgen_uninit(ark->pg); 530 ark_pktchkr_uninit(ark->pc); 531 532 dev->dev_ops = NULL; 533 dev->rx_pkt_burst = NULL; 534 dev->tx_pkt_burst = NULL; 535 rte_free(dev->data->mac_addrs); 536 return 0; 537 } 538 539 static int 540 eth_ark_dev_configure(struct rte_eth_dev *dev) 541 { 542 PMD_FUNC_LOG(DEBUG, "\n"); 543 struct ark_adapter *ark = 544 (struct ark_adapter *)dev->data->dev_private; 545 546 eth_ark_dev_set_link_up(dev); 547 if (ark->user_ext.dev_configure) 548 return ark->user_ext.dev_configure(dev, 549 ark->user_data[dev->data->port_id]); 550 return 0; 551 } 552 553 static void * 554 delay_pg_start(void *arg) 555 { 556 struct ark_adapter *ark = (struct ark_adapter *)arg; 557 558 /* This function is used exclusively for regression testing, We 559 * perform a blind sleep here to ensure that the external test 560 * application has time to setup the test before we generate packets 561 */ 562 usleep(100000); 563 ark_pktgen_run(ark->pg); 564 return NULL; 565 } 566 567 static int 568 eth_ark_dev_start(struct rte_eth_dev *dev) 569 { 570 struct ark_adapter *ark = 571 (struct ark_adapter *)dev->data->dev_private; 572 int i; 573 574 PMD_FUNC_LOG(DEBUG, "\n"); 575 576 /* RX Side */ 577 /* start UDM */ 578 ark_udm_start(ark->udm.v); 579 580 for (i = 0; i < dev->data->nb_rx_queues; i++) 581 eth_ark_rx_start_queue(dev, i); 582 583 /* TX Side */ 584 for (i = 0; i < dev->data->nb_tx_queues; i++) 585 eth_ark_tx_queue_start(dev, i); 586 587 /* start DDM */ 588 ark_ddm_start(ark->ddm.v); 589 590 ark->started = 1; 591 /* set xmit and receive function */ 592 dev->rx_pkt_burst = ð_ark_recv_pkts; 593 dev->tx_pkt_burst = ð_ark_xmit_pkts; 594 595 if (ark->start_pg) 596 ark_pktchkr_run(ark->pc); 597 598 if (ark->start_pg && (dev->data->port_id == 0)) { 599 pthread_t thread; 600 601 /* Delay packet generatpr start allow the hardware to be ready 602 * This is only used for sanity checking with internal generator 603 */ 604 if (pthread_create(&thread, NULL, delay_pg_start, ark)) { 605 PMD_DRV_LOG(ERR, "Could not create pktgen " 606 "starter thread\n"); 607 return -1; 608 } 609 } 610 611 if (ark->user_ext.dev_start) 612 ark->user_ext.dev_start(dev, 613 ark->user_data[dev->data->port_id]); 614 615 return 0; 616 } 617 618 static void 619 eth_ark_dev_stop(struct rte_eth_dev *dev) 620 { 621 uint16_t i; 622 int status; 623 struct ark_adapter *ark = 624 (struct ark_adapter *)dev->data->dev_private; 625 struct ark_mpu_t *mpu; 626 627 PMD_FUNC_LOG(DEBUG, "\n"); 628 629 if (ark->started == 0) 630 return; 631 ark->started = 0; 632 633 /* Stop the extension first */ 634 if (ark->user_ext.dev_stop) 635 ark->user_ext.dev_stop(dev, 636 ark->user_data[dev->data->port_id]); 637 638 /* Stop the packet generator */ 639 if (ark->start_pg) 640 ark_pktgen_pause(ark->pg); 641 642 dev->rx_pkt_burst = ð_ark_recv_pkts_noop; 643 dev->tx_pkt_burst = ð_ark_xmit_pkts_noop; 644 645 /* STOP TX Side */ 646 for (i = 0; i < dev->data->nb_tx_queues; i++) { 647 status = eth_ark_tx_queue_stop(dev, i); 648 if (status != 0) { 649 uint8_t port = dev->data->port_id; 650 PMD_DRV_LOG(ERR, 651 "tx_queue stop anomaly" 652 " port %u, queue %u\n", 653 port, i); 654 } 655 } 656 657 /* Stop DDM */ 658 /* Wait up to 0.1 second. each stop is up to 1000 * 10 useconds */ 659 for (i = 0; i < 10; i++) { 660 status = ark_ddm_stop(ark->ddm.v, 1); 661 if (status == 0) 662 break; 663 } 664 if (status || i != 0) { 665 PMD_DRV_LOG(ERR, "DDM stop anomaly. status:" 666 " %d iter: %u. (%s)\n", 667 status, 668 i, 669 __func__); 670 ark_ddm_dump(ark->ddm.v, "Stop anomaly"); 671 672 mpu = ark->mputx.v; 673 for (i = 0; i < ark->tx_queues; i++) { 674 ark_mpu_dump(mpu, "DDM failure dump", i); 675 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 676 } 677 } 678 679 /* STOP RX Side */ 680 /* Stop UDM multiple tries attempted */ 681 for (i = 0; i < 10; i++) { 682 status = ark_udm_stop(ark->udm.v, 1); 683 if (status == 0) 684 break; 685 } 686 if (status || i != 0) { 687 PMD_DRV_LOG(ERR, "UDM stop anomaly. status %d iter: %u. (%s)\n", 688 status, i, __func__); 689 ark_udm_dump(ark->udm.v, "Stop anomaly"); 690 691 mpu = ark->mpurx.v; 692 for (i = 0; i < ark->rx_queues; i++) { 693 ark_mpu_dump(mpu, "UDM Stop anomaly", i); 694 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 695 } 696 } 697 698 ark_udm_dump_stats(ark->udm.v, "Post stop"); 699 ark_udm_dump_perf(ark->udm.v, "Post stop"); 700 701 for (i = 0; i < dev->data->nb_tx_queues; i++) 702 eth_ark_rx_dump_queue(dev, i, __func__); 703 704 /* Stop the packet checker if it is running */ 705 if (ark->start_pg) { 706 ark_pktchkr_dump_stats(ark->pc); 707 ark_pktchkr_stop(ark->pc); 708 } 709 } 710 711 static void 712 eth_ark_dev_close(struct rte_eth_dev *dev) 713 { 714 struct ark_adapter *ark = 715 (struct ark_adapter *)dev->data->dev_private; 716 uint16_t i; 717 718 if (ark->user_ext.dev_close) 719 ark->user_ext.dev_close(dev, 720 ark->user_data[dev->data->port_id]); 721 722 eth_ark_dev_stop(dev); 723 eth_ark_udm_force_close(dev); 724 725 /* 726 * TODO This should only be called once for the device during shutdown 727 */ 728 ark_rqp_dump(ark->rqpacing); 729 730 for (i = 0; i < dev->data->nb_tx_queues; i++) { 731 eth_ark_tx_queue_release(dev->data->tx_queues[i]); 732 dev->data->tx_queues[i] = 0; 733 } 734 735 for (i = 0; i < dev->data->nb_rx_queues; i++) { 736 eth_ark_dev_rx_queue_release(dev->data->rx_queues[i]); 737 dev->data->rx_queues[i] = 0; 738 } 739 } 740 741 static void 742 eth_ark_dev_info_get(struct rte_eth_dev *dev, 743 struct rte_eth_dev_info *dev_info) 744 { 745 struct ark_adapter *ark = 746 (struct ark_adapter *)dev->data->dev_private; 747 struct ark_mpu_t *tx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_TX_BASE); 748 struct ark_mpu_t *rx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_RX_BASE); 749 uint16_t ports = ark->num_ports; 750 751 dev_info->max_rx_pktlen = ARK_RX_MAX_PKT_LEN; 752 dev_info->min_rx_bufsize = ARK_RX_MIN_BUFSIZE; 753 754 dev_info->max_rx_queues = ark_api_num_queues_per_port(rx_mpu, ports); 755 dev_info->max_tx_queues = ark_api_num_queues_per_port(tx_mpu, ports); 756 757 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 758 .nb_max = ARK_RX_MAX_QUEUE, 759 .nb_min = ARK_RX_MIN_QUEUE, 760 .nb_align = ARK_RX_MIN_QUEUE}; /* power of 2 */ 761 762 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 763 .nb_max = ARK_TX_MAX_QUEUE, 764 .nb_min = ARK_TX_MIN_QUEUE, 765 .nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */ 766 767 /* ARK PMD supports all line rates, how do we indicate that here ?? */ 768 dev_info->speed_capa = (ETH_LINK_SPEED_1G | 769 ETH_LINK_SPEED_10G | 770 ETH_LINK_SPEED_25G | 771 ETH_LINK_SPEED_40G | 772 ETH_LINK_SPEED_50G | 773 ETH_LINK_SPEED_100G); 774 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); 775 } 776 777 static int 778 eth_ark_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 779 { 780 PMD_DEBUG_LOG(DEBUG, "link status = %d\n", 781 dev->data->dev_link.link_status); 782 struct ark_adapter *ark = 783 (struct ark_adapter *)dev->data->dev_private; 784 785 if (ark->user_ext.link_update) { 786 return ark->user_ext.link_update 787 (dev, wait_to_complete, 788 ark->user_data[dev->data->port_id]); 789 } 790 return 0; 791 } 792 793 static int 794 eth_ark_dev_set_link_up(struct rte_eth_dev *dev) 795 { 796 dev->data->dev_link.link_status = 1; 797 struct ark_adapter *ark = 798 (struct ark_adapter *)dev->data->dev_private; 799 800 if (ark->user_ext.dev_set_link_up) 801 return ark->user_ext.dev_set_link_up(dev, 802 ark->user_data[dev->data->port_id]); 803 return 0; 804 } 805 806 static int 807 eth_ark_dev_set_link_down(struct rte_eth_dev *dev) 808 { 809 dev->data->dev_link.link_status = 0; 810 struct ark_adapter *ark = 811 (struct ark_adapter *)dev->data->dev_private; 812 813 if (ark->user_ext.dev_set_link_down) 814 return ark->user_ext.dev_set_link_down(dev, 815 ark->user_data[dev->data->port_id]); 816 return 0; 817 } 818 819 static void 820 eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 821 { 822 uint16_t i; 823 struct ark_adapter *ark = 824 (struct ark_adapter *)dev->data->dev_private; 825 826 stats->ipackets = 0; 827 stats->ibytes = 0; 828 stats->opackets = 0; 829 stats->obytes = 0; 830 stats->imissed = 0; 831 stats->oerrors = 0; 832 833 for (i = 0; i < dev->data->nb_tx_queues; i++) 834 eth_tx_queue_stats_get(dev->data->tx_queues[i], stats); 835 for (i = 0; i < dev->data->nb_rx_queues; i++) 836 eth_rx_queue_stats_get(dev->data->rx_queues[i], stats); 837 if (ark->user_ext.stats_get) 838 ark->user_ext.stats_get(dev, stats, 839 ark->user_data[dev->data->port_id]); 840 } 841 842 static void 843 eth_ark_dev_stats_reset(struct rte_eth_dev *dev) 844 { 845 uint16_t i; 846 struct ark_adapter *ark = 847 (struct ark_adapter *)dev->data->dev_private; 848 849 for (i = 0; i < dev->data->nb_tx_queues; i++) 850 eth_tx_queue_stats_reset(dev->data->tx_queues[i]); 851 for (i = 0; i < dev->data->nb_rx_queues; i++) 852 eth_rx_queue_stats_reset(dev->data->rx_queues[i]); 853 if (ark->user_ext.stats_reset) 854 ark->user_ext.stats_reset(dev, 855 ark->user_data[dev->data->port_id]); 856 } 857 858 static int 859 eth_ark_macaddr_add(struct rte_eth_dev *dev, 860 struct ether_addr *mac_addr, 861 uint32_t index, 862 uint32_t pool) 863 { 864 struct ark_adapter *ark = 865 (struct ark_adapter *)dev->data->dev_private; 866 867 if (ark->user_ext.mac_addr_add) { 868 ark->user_ext.mac_addr_add(dev, 869 mac_addr, 870 index, 871 pool, 872 ark->user_data[dev->data->port_id]); 873 return 0; 874 } 875 return -ENOTSUP; 876 } 877 878 static void 879 eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) 880 { 881 struct ark_adapter *ark = 882 (struct ark_adapter *)dev->data->dev_private; 883 884 if (ark->user_ext.mac_addr_remove) 885 ark->user_ext.mac_addr_remove(dev, index, 886 ark->user_data[dev->data->port_id]); 887 } 888 889 static void 890 eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, 891 struct ether_addr *mac_addr) 892 { 893 struct ark_adapter *ark = 894 (struct ark_adapter *)dev->data->dev_private; 895 896 if (ark->user_ext.mac_addr_set) 897 ark->user_ext.mac_addr_set(dev, mac_addr, 898 ark->user_data[dev->data->port_id]); 899 } 900 901 static int 902 eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size) 903 { 904 struct ark_adapter *ark = 905 (struct ark_adapter *)dev->data->dev_private; 906 907 if (ark->user_ext.set_mtu) 908 return ark->user_ext.set_mtu(dev, size, 909 ark->user_data[dev->data->port_id]); 910 911 return -ENOTSUP; 912 } 913 914 static inline int 915 process_pktdir_arg(const char *key, const char *value, 916 void *extra_args) 917 { 918 PMD_FUNC_LOG(DEBUG, "key = %s, value = %s\n", 919 key, value); 920 struct ark_adapter *ark = 921 (struct ark_adapter *)extra_args; 922 923 ark->pkt_dir_v = strtol(value, NULL, 16); 924 PMD_FUNC_LOG(DEBUG, "pkt_dir_v = 0x%x\n", ark->pkt_dir_v); 925 return 0; 926 } 927 928 static inline int 929 process_file_args(const char *key, const char *value, void *extra_args) 930 { 931 PMD_FUNC_LOG(DEBUG, "key = %s, value = %s\n", 932 key, value); 933 char *args = (char *)extra_args; 934 935 /* Open the configuration file */ 936 FILE *file = fopen(value, "r"); 937 char line[ARK_MAX_ARG_LEN]; 938 int size = 0; 939 int first = 1; 940 941 if (file == NULL) { 942 PMD_DRV_LOG(ERR, "Unable to open " 943 "config file %s\n", value); 944 return -1; 945 } 946 947 while (fgets(line, sizeof(line), file)) { 948 size += strlen(line); 949 if (size >= ARK_MAX_ARG_LEN) { 950 PMD_DRV_LOG(ERR, "Unable to parse file %s args, " 951 "parameter list is too long\n", value); 952 fclose(file); 953 return -1; 954 } 955 if (first) { 956 strncpy(args, line, ARK_MAX_ARG_LEN); 957 first = 0; 958 } else { 959 strncat(args, line, ARK_MAX_ARG_LEN); 960 } 961 } 962 PMD_FUNC_LOG(DEBUG, "file = %s\n", args); 963 fclose(file); 964 return 0; 965 } 966 967 static int 968 eth_ark_check_args(struct ark_adapter *ark, const char *params) 969 { 970 struct rte_kvargs *kvlist; 971 unsigned int k_idx; 972 struct rte_kvargs_pair *pair = NULL; 973 int ret = -1; 974 975 kvlist = rte_kvargs_parse(params, valid_arguments); 976 if (kvlist == NULL) 977 return 0; 978 979 ark->pkt_gen_args[0] = 0; 980 ark->pkt_chkr_args[0] = 0; 981 982 for (k_idx = 0; k_idx < kvlist->count; k_idx++) { 983 pair = &kvlist->pairs[k_idx]; 984 PMD_FUNC_LOG(DEBUG, "**** Arg passed to PMD = %s:%s\n", 985 pair->key, 986 pair->value); 987 } 988 989 if (rte_kvargs_process(kvlist, 990 ARK_PKTDIR_ARG, 991 &process_pktdir_arg, 992 ark) != 0) { 993 PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTDIR_ARG); 994 goto free_kvlist; 995 } 996 997 if (rte_kvargs_process(kvlist, 998 ARK_PKTGEN_ARG, 999 &process_file_args, 1000 ark->pkt_gen_args) != 0) { 1001 PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTGEN_ARG); 1002 goto free_kvlist; 1003 } 1004 1005 if (rte_kvargs_process(kvlist, 1006 ARK_PKTCHKR_ARG, 1007 &process_file_args, 1008 ark->pkt_chkr_args) != 0) { 1009 PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTCHKR_ARG); 1010 goto free_kvlist; 1011 } 1012 1013 PMD_DRV_LOG(INFO, "packet director set to 0x%x\n", ark->pkt_dir_v); 1014 /* Setup the packet director */ 1015 ark_pktdir_setup(ark->pd, ark->pkt_dir_v); 1016 1017 /* Setup the packet generator */ 1018 if (ark->pkt_gen_args[0]) { 1019 PMD_DRV_LOG(INFO, "Setting up the packet generator\n"); 1020 ark_pktgen_parse(ark->pkt_gen_args); 1021 ark_pktgen_reset(ark->pg); 1022 ark_pktgen_setup(ark->pg); 1023 ark->start_pg = 1; 1024 } 1025 1026 /* Setup the packet checker */ 1027 if (ark->pkt_chkr_args[0]) { 1028 ark_pktchkr_parse(ark->pkt_chkr_args); 1029 ark_pktchkr_setup(ark->pc); 1030 } 1031 1032 ret = 0; 1033 1034 free_kvlist: 1035 rte_kvargs_free(kvlist); 1036 1037 return ret; 1038 } 1039 1040 RTE_PMD_REGISTER_PCI(net_ark, rte_ark_pmd); 1041 RTE_PMD_REGISTER_KMOD_DEP(net_ark, "* igb_uio | uio_pci_generic "); 1042 RTE_PMD_REGISTER_PCI_TABLE(net_ark, pci_id_ark_map); 1043 RTE_PMD_REGISTER_PARAM_STRING(net_ark, 1044 ARK_PKTGEN_ARG "=<filename> " 1045 ARK_PKTCHKR_ARG "=<filename> " 1046 ARK_PKTDIR_ARG "=<bitmap>"); 1047