1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2015-2017 Atomic Rules LLC 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of copyright holder nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <unistd.h> 35 #include <sys/stat.h> 36 #include <dlfcn.h> 37 38 #include <rte_bus_pci.h> 39 #include <rte_ethdev_pci.h> 40 #include <rte_kvargs.h> 41 42 #include "ark_global.h" 43 #include "ark_logs.h" 44 #include "ark_ethdev_tx.h" 45 #include "ark_ethdev_rx.h" 46 #include "ark_mpu.h" 47 #include "ark_ddm.h" 48 #include "ark_udm.h" 49 #include "ark_rqp.h" 50 #include "ark_pktdir.h" 51 #include "ark_pktgen.h" 52 #include "ark_pktchkr.h" 53 54 /* Internal prototypes */ 55 static int eth_ark_check_args(struct ark_adapter *ark, const char *params); 56 static int eth_ark_dev_init(struct rte_eth_dev *dev); 57 static int ark_config_device(struct rte_eth_dev *dev); 58 static int eth_ark_dev_uninit(struct rte_eth_dev *eth_dev); 59 static int eth_ark_dev_configure(struct rte_eth_dev *dev); 60 static int eth_ark_dev_start(struct rte_eth_dev *dev); 61 static void eth_ark_dev_stop(struct rte_eth_dev *dev); 62 static void eth_ark_dev_close(struct rte_eth_dev *dev); 63 static void eth_ark_dev_info_get(struct rte_eth_dev *dev, 64 struct rte_eth_dev_info *dev_info); 65 static int eth_ark_dev_link_update(struct rte_eth_dev *dev, 66 int wait_to_complete); 67 static int eth_ark_dev_set_link_up(struct rte_eth_dev *dev); 68 static int eth_ark_dev_set_link_down(struct rte_eth_dev *dev); 69 static int eth_ark_dev_stats_get(struct rte_eth_dev *dev, 70 struct rte_eth_stats *stats); 71 static void eth_ark_dev_stats_reset(struct rte_eth_dev *dev); 72 static int eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, 73 struct ether_addr *mac_addr); 74 static int eth_ark_macaddr_add(struct rte_eth_dev *dev, 75 struct ether_addr *mac_addr, 76 uint32_t index, 77 uint32_t pool); 78 static void eth_ark_macaddr_remove(struct rte_eth_dev *dev, 79 uint32_t index); 80 static int eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size); 81 82 /* 83 * The packet generator is a functional block used to generate packet 84 * patterns for testing. It is not intended for nominal use. 85 */ 86 #define ARK_PKTGEN_ARG "Pkt_gen" 87 88 /* 89 * The packet checker is a functional block used to verify packet 90 * patterns for testing. It is not intended for nominal use. 91 */ 92 #define ARK_PKTCHKR_ARG "Pkt_chkr" 93 94 /* 95 * The packet director is used to select the internal ingress and 96 * egress packets paths during testing. It is not intended for 97 * nominal use. 98 */ 99 #define ARK_PKTDIR_ARG "Pkt_dir" 100 101 /* Devinfo configurations */ 102 #define ARK_RX_MAX_QUEUE (4096 * 4) 103 #define ARK_RX_MIN_QUEUE (512) 104 #define ARK_RX_MAX_PKT_LEN ((16 * 1024) - 128) 105 #define ARK_RX_MIN_BUFSIZE (1024) 106 107 #define ARK_TX_MAX_QUEUE (4096 * 4) 108 #define ARK_TX_MIN_QUEUE (256) 109 110 static const char * const valid_arguments[] = { 111 ARK_PKTGEN_ARG, 112 ARK_PKTCHKR_ARG, 113 ARK_PKTDIR_ARG, 114 NULL 115 }; 116 117 static const struct rte_pci_id pci_id_ark_map[] = { 118 {RTE_PCI_DEVICE(0x1d6c, 0x100d)}, 119 {RTE_PCI_DEVICE(0x1d6c, 0x100e)}, 120 {.vendor_id = 0, /* sentinel */ }, 121 }; 122 123 static int 124 eth_ark_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 125 struct rte_pci_device *pci_dev) 126 { 127 struct rte_eth_dev *eth_dev; 128 int ret; 129 130 eth_dev = rte_eth_dev_pci_allocate(pci_dev, sizeof(struct ark_adapter)); 131 132 if (eth_dev == NULL) 133 return -ENOMEM; 134 135 ret = eth_ark_dev_init(eth_dev); 136 if (ret) 137 rte_eth_dev_pci_release(eth_dev); 138 139 return ret; 140 } 141 142 static int 143 eth_ark_pci_remove(struct rte_pci_device *pci_dev) 144 { 145 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ark_dev_uninit); 146 } 147 148 static struct rte_pci_driver rte_ark_pmd = { 149 .id_table = pci_id_ark_map, 150 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 151 .probe = eth_ark_pci_probe, 152 .remove = eth_ark_pci_remove, 153 }; 154 155 static const struct eth_dev_ops ark_eth_dev_ops = { 156 .dev_configure = eth_ark_dev_configure, 157 .dev_start = eth_ark_dev_start, 158 .dev_stop = eth_ark_dev_stop, 159 .dev_close = eth_ark_dev_close, 160 161 .dev_infos_get = eth_ark_dev_info_get, 162 163 .rx_queue_setup = eth_ark_dev_rx_queue_setup, 164 .rx_queue_count = eth_ark_dev_rx_queue_count, 165 .tx_queue_setup = eth_ark_tx_queue_setup, 166 167 .link_update = eth_ark_dev_link_update, 168 .dev_set_link_up = eth_ark_dev_set_link_up, 169 .dev_set_link_down = eth_ark_dev_set_link_down, 170 171 .rx_queue_start = eth_ark_rx_start_queue, 172 .rx_queue_stop = eth_ark_rx_stop_queue, 173 174 .tx_queue_start = eth_ark_tx_queue_start, 175 .tx_queue_stop = eth_ark_tx_queue_stop, 176 177 .stats_get = eth_ark_dev_stats_get, 178 .stats_reset = eth_ark_dev_stats_reset, 179 180 .mac_addr_add = eth_ark_macaddr_add, 181 .mac_addr_remove = eth_ark_macaddr_remove, 182 .mac_addr_set = eth_ark_set_default_mac_addr, 183 184 .mtu_set = eth_ark_set_mtu, 185 }; 186 187 static int 188 check_for_ext(struct ark_adapter *ark) 189 { 190 int found = 0; 191 192 /* Get the env */ 193 const char *dllpath = getenv("ARK_EXT_PATH"); 194 195 if (dllpath == NULL) { 196 PMD_DEBUG_LOG(DEBUG, "ARK EXT NO dll path specified\n"); 197 return 0; 198 } 199 PMD_DRV_LOG(INFO, "ARK EXT found dll path at %s\n", dllpath); 200 201 /* Open and load the .so */ 202 ark->d_handle = dlopen(dllpath, RTLD_LOCAL | RTLD_LAZY); 203 if (ark->d_handle == NULL) { 204 PMD_DRV_LOG(ERR, "Could not load user extension %s\n", 205 dllpath); 206 return -1; 207 } 208 PMD_DRV_LOG(INFO, "SUCCESS: loaded user extension %s\n", 209 dllpath); 210 211 /* Get the entry points */ 212 ark->user_ext.dev_init = 213 (void *(*)(struct rte_eth_dev *, void *, int)) 214 dlsym(ark->d_handle, "dev_init"); 215 PMD_DEBUG_LOG(DEBUG, "device ext init pointer = %p\n", 216 ark->user_ext.dev_init); 217 ark->user_ext.dev_get_port_count = 218 (int (*)(struct rte_eth_dev *, void *)) 219 dlsym(ark->d_handle, "dev_get_port_count"); 220 ark->user_ext.dev_uninit = 221 (void (*)(struct rte_eth_dev *, void *)) 222 dlsym(ark->d_handle, "dev_uninit"); 223 ark->user_ext.dev_configure = 224 (int (*)(struct rte_eth_dev *, void *)) 225 dlsym(ark->d_handle, "dev_configure"); 226 ark->user_ext.dev_start = 227 (int (*)(struct rte_eth_dev *, void *)) 228 dlsym(ark->d_handle, "dev_start"); 229 ark->user_ext.dev_stop = 230 (void (*)(struct rte_eth_dev *, void *)) 231 dlsym(ark->d_handle, "dev_stop"); 232 ark->user_ext.dev_close = 233 (void (*)(struct rte_eth_dev *, void *)) 234 dlsym(ark->d_handle, "dev_close"); 235 ark->user_ext.link_update = 236 (int (*)(struct rte_eth_dev *, int, void *)) 237 dlsym(ark->d_handle, "link_update"); 238 ark->user_ext.dev_set_link_up = 239 (int (*)(struct rte_eth_dev *, void *)) 240 dlsym(ark->d_handle, "dev_set_link_up"); 241 ark->user_ext.dev_set_link_down = 242 (int (*)(struct rte_eth_dev *, void *)) 243 dlsym(ark->d_handle, "dev_set_link_down"); 244 ark->user_ext.stats_get = 245 (int (*)(struct rte_eth_dev *, struct rte_eth_stats *, 246 void *)) 247 dlsym(ark->d_handle, "stats_get"); 248 ark->user_ext.stats_reset = 249 (void (*)(struct rte_eth_dev *, void *)) 250 dlsym(ark->d_handle, "stats_reset"); 251 ark->user_ext.mac_addr_add = 252 (void (*)(struct rte_eth_dev *, struct ether_addr *, uint32_t, 253 uint32_t, void *)) 254 dlsym(ark->d_handle, "mac_addr_add"); 255 ark->user_ext.mac_addr_remove = 256 (void (*)(struct rte_eth_dev *, uint32_t, void *)) 257 dlsym(ark->d_handle, "mac_addr_remove"); 258 ark->user_ext.mac_addr_set = 259 (void (*)(struct rte_eth_dev *, struct ether_addr *, 260 void *)) 261 dlsym(ark->d_handle, "mac_addr_set"); 262 ark->user_ext.set_mtu = 263 (int (*)(struct rte_eth_dev *, uint16_t, 264 void *)) 265 dlsym(ark->d_handle, "set_mtu"); 266 267 return found; 268 } 269 270 static int 271 eth_ark_dev_init(struct rte_eth_dev *dev) 272 { 273 struct ark_adapter *ark = 274 (struct ark_adapter *)dev->data->dev_private; 275 struct rte_pci_device *pci_dev; 276 int ret; 277 int port_count = 1; 278 int p; 279 280 ark->eth_dev = dev; 281 282 PMD_FUNC_LOG(DEBUG, "\n"); 283 284 /* Check to see if there is an extension that we need to load */ 285 ret = check_for_ext(ark); 286 if (ret) 287 return ret; 288 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 289 rte_eth_copy_pci_info(dev, pci_dev); 290 291 /* Use dummy function until setup */ 292 dev->rx_pkt_burst = ð_ark_recv_pkts_noop; 293 dev->tx_pkt_burst = ð_ark_xmit_pkts_noop; 294 295 ark->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr; 296 ark->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr; 297 298 ark->sysctrl.v = (void *)&ark->bar0[ARK_SYSCTRL_BASE]; 299 ark->mpurx.v = (void *)&ark->bar0[ARK_MPU_RX_BASE]; 300 ark->udm.v = (void *)&ark->bar0[ARK_UDM_BASE]; 301 ark->mputx.v = (void *)&ark->bar0[ARK_MPU_TX_BASE]; 302 ark->ddm.v = (void *)&ark->bar0[ARK_DDM_BASE]; 303 ark->cmac.v = (void *)&ark->bar0[ARK_CMAC_BASE]; 304 ark->external.v = (void *)&ark->bar0[ARK_EXTERNAL_BASE]; 305 ark->pktdir.v = (void *)&ark->bar0[ARK_PKTDIR_BASE]; 306 ark->pktgen.v = (void *)&ark->bar0[ARK_PKTGEN_BASE]; 307 ark->pktchkr.v = (void *)&ark->bar0[ARK_PKTCHKR_BASE]; 308 309 ark->rqpacing = 310 (struct ark_rqpace_t *)(ark->bar0 + ARK_RCPACING_BASE); 311 ark->started = 0; 312 313 PMD_DEBUG_LOG(INFO, "Sys Ctrl Const = 0x%x HW Commit_ID: %08x\n", 314 ark->sysctrl.t32[4], 315 rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4])); 316 PMD_DRV_LOG(INFO, "Arkville HW Commit_ID: %08x\n", 317 rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4])); 318 319 /* If HW sanity test fails, return an error */ 320 if (ark->sysctrl.t32[4] != 0xcafef00d) { 321 PMD_DRV_LOG(ERR, 322 "HW Sanity test has failed, expected constant" 323 " 0x%x, read 0x%x (%s)\n", 324 0xcafef00d, 325 ark->sysctrl.t32[4], __func__); 326 return -1; 327 } 328 if (ark->sysctrl.t32[3] != 0) { 329 if (ark_rqp_lasped(ark->rqpacing)) { 330 PMD_DRV_LOG(ERR, "Arkville Evaluation System - " 331 "Timer has Expired\n"); 332 return -1; 333 } 334 PMD_DRV_LOG(WARNING, "Arkville Evaluation System - " 335 "Timer is Running\n"); 336 } 337 338 PMD_DRV_LOG(INFO, 339 "HW Sanity test has PASSED, expected constant" 340 " 0x%x, read 0x%x (%s)\n", 341 0xcafef00d, ark->sysctrl.t32[4], __func__); 342 343 /* We are a single function multi-port device. */ 344 ret = ark_config_device(dev); 345 dev->dev_ops = &ark_eth_dev_ops; 346 347 dev->data->mac_addrs = rte_zmalloc("ark", ETHER_ADDR_LEN, 0); 348 if (!dev->data->mac_addrs) { 349 PMD_DRV_LOG(ERR, 350 "Failed to allocated memory for storing mac address" 351 ); 352 } 353 354 if (ark->user_ext.dev_init) { 355 ark->user_data[dev->data->port_id] = 356 ark->user_ext.dev_init(dev, ark->a_bar, 0); 357 if (!ark->user_data[dev->data->port_id]) { 358 PMD_DRV_LOG(INFO, 359 "Failed to initialize PMD extension!" 360 " continuing without it\n"); 361 memset(&ark->user_ext, 0, sizeof(struct ark_user_ext)); 362 dlclose(ark->d_handle); 363 } 364 } 365 366 if (pci_dev->device.devargs) 367 ret = eth_ark_check_args(ark, pci_dev->device.devargs->args); 368 else 369 PMD_DRV_LOG(INFO, "No Device args found\n"); 370 371 if (ret) 372 goto error; 373 /* 374 * We will create additional devices based on the number of requested 375 * ports 376 */ 377 if (ark->user_ext.dev_get_port_count) 378 port_count = 379 ark->user_ext.dev_get_port_count(dev, 380 ark->user_data[dev->data->port_id]); 381 ark->num_ports = port_count; 382 383 for (p = 0; p < port_count; p++) { 384 struct rte_eth_dev *eth_dev; 385 char name[RTE_ETH_NAME_MAX_LEN]; 386 387 snprintf(name, sizeof(name), "arketh%d", 388 dev->data->port_id + p); 389 390 if (p == 0) { 391 /* First port is already allocated by DPDK */ 392 eth_dev = ark->eth_dev; 393 rte_eth_dev_probing_finish(eth_dev); 394 continue; 395 } 396 397 /* reserve an ethdev entry */ 398 eth_dev = rte_eth_dev_allocate(name); 399 if (!eth_dev) { 400 PMD_DRV_LOG(ERR, 401 "Could not allocate eth_dev for port %d\n", 402 p); 403 goto error; 404 } 405 406 eth_dev->device = &pci_dev->device; 407 eth_dev->data->dev_private = ark; 408 eth_dev->dev_ops = ark->eth_dev->dev_ops; 409 eth_dev->tx_pkt_burst = ark->eth_dev->tx_pkt_burst; 410 eth_dev->rx_pkt_burst = ark->eth_dev->rx_pkt_burst; 411 412 rte_eth_copy_pci_info(eth_dev, pci_dev); 413 414 eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0); 415 if (!eth_dev->data->mac_addrs) { 416 PMD_DRV_LOG(ERR, 417 "Memory allocation for MAC failed!" 418 " Exiting.\n"); 419 goto error; 420 } 421 422 if (ark->user_ext.dev_init) { 423 ark->user_data[eth_dev->data->port_id] = 424 ark->user_ext.dev_init(dev, ark->a_bar, p); 425 } 426 427 rte_eth_dev_probing_finish(eth_dev); 428 } 429 430 return ret; 431 432 error: 433 if (dev->data->mac_addrs) 434 rte_free(dev->data->mac_addrs); 435 return -1; 436 } 437 438 /* 439 *Initial device configuration when device is opened 440 * setup the DDM, and UDM 441 * Called once per PCIE device 442 */ 443 static int 444 ark_config_device(struct rte_eth_dev *dev) 445 { 446 struct ark_adapter *ark = 447 (struct ark_adapter *)dev->data->dev_private; 448 uint16_t num_q, i; 449 struct ark_mpu_t *mpu; 450 451 /* 452 * Make sure that the packet director, generator and checker are in a 453 * known state 454 */ 455 ark->start_pg = 0; 456 ark->pg = ark_pktgen_init(ark->pktgen.v, 0, 1); 457 if (ark->pg == NULL) 458 return -1; 459 ark_pktgen_reset(ark->pg); 460 ark->pc = ark_pktchkr_init(ark->pktchkr.v, 0, 1); 461 if (ark->pc == NULL) 462 return -1; 463 ark_pktchkr_stop(ark->pc); 464 ark->pd = ark_pktdir_init(ark->pktdir.v); 465 if (ark->pd == NULL) 466 return -1; 467 468 /* Verify HW */ 469 if (ark_udm_verify(ark->udm.v)) 470 return -1; 471 if (ark_ddm_verify(ark->ddm.v)) 472 return -1; 473 474 /* UDM */ 475 if (ark_udm_reset(ark->udm.v)) { 476 PMD_DRV_LOG(ERR, "Unable to stop and reset UDM\n"); 477 return -1; 478 } 479 /* Keep in reset until the MPU are cleared */ 480 481 /* MPU reset */ 482 mpu = ark->mpurx.v; 483 num_q = ark_api_num_queues(mpu); 484 ark->rx_queues = num_q; 485 for (i = 0; i < num_q; i++) { 486 ark_mpu_reset(mpu); 487 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 488 } 489 490 ark_udm_stop(ark->udm.v, 0); 491 ark_udm_configure(ark->udm.v, 492 RTE_PKTMBUF_HEADROOM, 493 RTE_MBUF_DEFAULT_DATAROOM, 494 ARK_RX_WRITE_TIME_NS); 495 ark_udm_stats_reset(ark->udm.v); 496 ark_udm_stop(ark->udm.v, 0); 497 498 /* TX -- DDM */ 499 if (ark_ddm_stop(ark->ddm.v, 1)) 500 PMD_DRV_LOG(ERR, "Unable to stop DDM\n"); 501 502 mpu = ark->mputx.v; 503 num_q = ark_api_num_queues(mpu); 504 ark->tx_queues = num_q; 505 for (i = 0; i < num_q; i++) { 506 ark_mpu_reset(mpu); 507 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 508 } 509 510 ark_ddm_reset(ark->ddm.v); 511 ark_ddm_stats_reset(ark->ddm.v); 512 513 ark_ddm_stop(ark->ddm.v, 0); 514 ark_rqp_stats_reset(ark->rqpacing); 515 516 return 0; 517 } 518 519 static int 520 eth_ark_dev_uninit(struct rte_eth_dev *dev) 521 { 522 struct ark_adapter *ark = 523 (struct ark_adapter *)dev->data->dev_private; 524 525 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 526 return 0; 527 528 if (ark->user_ext.dev_uninit) 529 ark->user_ext.dev_uninit(dev, 530 ark->user_data[dev->data->port_id]); 531 532 ark_pktgen_uninit(ark->pg); 533 ark_pktchkr_uninit(ark->pc); 534 535 dev->dev_ops = NULL; 536 dev->rx_pkt_burst = NULL; 537 dev->tx_pkt_burst = NULL; 538 rte_free(dev->data->mac_addrs); 539 return 0; 540 } 541 542 static int 543 eth_ark_dev_configure(struct rte_eth_dev *dev) 544 { 545 PMD_FUNC_LOG(DEBUG, "\n"); 546 struct ark_adapter *ark = 547 (struct ark_adapter *)dev->data->dev_private; 548 549 eth_ark_dev_set_link_up(dev); 550 if (ark->user_ext.dev_configure) 551 return ark->user_ext.dev_configure(dev, 552 ark->user_data[dev->data->port_id]); 553 return 0; 554 } 555 556 static void * 557 delay_pg_start(void *arg) 558 { 559 struct ark_adapter *ark = (struct ark_adapter *)arg; 560 561 /* This function is used exclusively for regression testing, We 562 * perform a blind sleep here to ensure that the external test 563 * application has time to setup the test before we generate packets 564 */ 565 usleep(100000); 566 ark_pktgen_run(ark->pg); 567 return NULL; 568 } 569 570 static int 571 eth_ark_dev_start(struct rte_eth_dev *dev) 572 { 573 struct ark_adapter *ark = 574 (struct ark_adapter *)dev->data->dev_private; 575 int i; 576 577 PMD_FUNC_LOG(DEBUG, "\n"); 578 579 /* RX Side */ 580 /* start UDM */ 581 ark_udm_start(ark->udm.v); 582 583 for (i = 0; i < dev->data->nb_rx_queues; i++) 584 eth_ark_rx_start_queue(dev, i); 585 586 /* TX Side */ 587 for (i = 0; i < dev->data->nb_tx_queues; i++) 588 eth_ark_tx_queue_start(dev, i); 589 590 /* start DDM */ 591 ark_ddm_start(ark->ddm.v); 592 593 ark->started = 1; 594 /* set xmit and receive function */ 595 dev->rx_pkt_burst = ð_ark_recv_pkts; 596 dev->tx_pkt_burst = ð_ark_xmit_pkts; 597 598 if (ark->start_pg) 599 ark_pktchkr_run(ark->pc); 600 601 if (ark->start_pg && (dev->data->port_id == 0)) { 602 pthread_t thread; 603 604 /* Delay packet generatpr start allow the hardware to be ready 605 * This is only used for sanity checking with internal generator 606 */ 607 if (pthread_create(&thread, NULL, delay_pg_start, ark)) { 608 PMD_DRV_LOG(ERR, "Could not create pktgen " 609 "starter thread\n"); 610 return -1; 611 } 612 } 613 614 if (ark->user_ext.dev_start) 615 ark->user_ext.dev_start(dev, 616 ark->user_data[dev->data->port_id]); 617 618 return 0; 619 } 620 621 static void 622 eth_ark_dev_stop(struct rte_eth_dev *dev) 623 { 624 uint16_t i; 625 int status; 626 struct ark_adapter *ark = 627 (struct ark_adapter *)dev->data->dev_private; 628 struct ark_mpu_t *mpu; 629 630 PMD_FUNC_LOG(DEBUG, "\n"); 631 632 if (ark->started == 0) 633 return; 634 ark->started = 0; 635 636 /* Stop the extension first */ 637 if (ark->user_ext.dev_stop) 638 ark->user_ext.dev_stop(dev, 639 ark->user_data[dev->data->port_id]); 640 641 /* Stop the packet generator */ 642 if (ark->start_pg) 643 ark_pktgen_pause(ark->pg); 644 645 dev->rx_pkt_burst = ð_ark_recv_pkts_noop; 646 dev->tx_pkt_burst = ð_ark_xmit_pkts_noop; 647 648 /* STOP TX Side */ 649 for (i = 0; i < dev->data->nb_tx_queues; i++) { 650 status = eth_ark_tx_queue_stop(dev, i); 651 if (status != 0) { 652 uint16_t port = dev->data->port_id; 653 PMD_DRV_LOG(ERR, 654 "tx_queue stop anomaly" 655 " port %u, queue %u\n", 656 port, i); 657 } 658 } 659 660 /* Stop DDM */ 661 /* Wait up to 0.1 second. each stop is up to 1000 * 10 useconds */ 662 for (i = 0; i < 10; i++) { 663 status = ark_ddm_stop(ark->ddm.v, 1); 664 if (status == 0) 665 break; 666 } 667 if (status || i != 0) { 668 PMD_DRV_LOG(ERR, "DDM stop anomaly. status:" 669 " %d iter: %u. (%s)\n", 670 status, 671 i, 672 __func__); 673 ark_ddm_dump(ark->ddm.v, "Stop anomaly"); 674 675 mpu = ark->mputx.v; 676 for (i = 0; i < ark->tx_queues; i++) { 677 ark_mpu_dump(mpu, "DDM failure dump", i); 678 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 679 } 680 } 681 682 /* STOP RX Side */ 683 /* Stop UDM multiple tries attempted */ 684 for (i = 0; i < 10; i++) { 685 status = ark_udm_stop(ark->udm.v, 1); 686 if (status == 0) 687 break; 688 } 689 if (status || i != 0) { 690 PMD_DRV_LOG(ERR, "UDM stop anomaly. status %d iter: %u. (%s)\n", 691 status, i, __func__); 692 ark_udm_dump(ark->udm.v, "Stop anomaly"); 693 694 mpu = ark->mpurx.v; 695 for (i = 0; i < ark->rx_queues; i++) { 696 ark_mpu_dump(mpu, "UDM Stop anomaly", i); 697 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 698 } 699 } 700 701 ark_udm_dump_stats(ark->udm.v, "Post stop"); 702 ark_udm_dump_perf(ark->udm.v, "Post stop"); 703 704 for (i = 0; i < dev->data->nb_rx_queues; i++) 705 eth_ark_rx_dump_queue(dev, i, __func__); 706 707 /* Stop the packet checker if it is running */ 708 if (ark->start_pg) { 709 ark_pktchkr_dump_stats(ark->pc); 710 ark_pktchkr_stop(ark->pc); 711 } 712 } 713 714 static void 715 eth_ark_dev_close(struct rte_eth_dev *dev) 716 { 717 struct ark_adapter *ark = 718 (struct ark_adapter *)dev->data->dev_private; 719 uint16_t i; 720 721 if (ark->user_ext.dev_close) 722 ark->user_ext.dev_close(dev, 723 ark->user_data[dev->data->port_id]); 724 725 eth_ark_dev_stop(dev); 726 eth_ark_udm_force_close(dev); 727 728 /* 729 * TODO This should only be called once for the device during shutdown 730 */ 731 ark_rqp_dump(ark->rqpacing); 732 733 for (i = 0; i < dev->data->nb_tx_queues; i++) { 734 eth_ark_tx_queue_release(dev->data->tx_queues[i]); 735 dev->data->tx_queues[i] = 0; 736 } 737 738 for (i = 0; i < dev->data->nb_rx_queues; i++) { 739 eth_ark_dev_rx_queue_release(dev->data->rx_queues[i]); 740 dev->data->rx_queues[i] = 0; 741 } 742 } 743 744 static void 745 eth_ark_dev_info_get(struct rte_eth_dev *dev, 746 struct rte_eth_dev_info *dev_info) 747 { 748 struct ark_adapter *ark = 749 (struct ark_adapter *)dev->data->dev_private; 750 struct ark_mpu_t *tx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_TX_BASE); 751 struct ark_mpu_t *rx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_RX_BASE); 752 uint16_t ports = ark->num_ports; 753 754 dev_info->max_rx_pktlen = ARK_RX_MAX_PKT_LEN; 755 dev_info->min_rx_bufsize = ARK_RX_MIN_BUFSIZE; 756 757 dev_info->max_rx_queues = ark_api_num_queues_per_port(rx_mpu, ports); 758 dev_info->max_tx_queues = ark_api_num_queues_per_port(tx_mpu, ports); 759 760 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 761 .nb_max = ARK_RX_MAX_QUEUE, 762 .nb_min = ARK_RX_MIN_QUEUE, 763 .nb_align = ARK_RX_MIN_QUEUE}; /* power of 2 */ 764 765 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 766 .nb_max = ARK_TX_MAX_QUEUE, 767 .nb_min = ARK_TX_MIN_QUEUE, 768 .nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */ 769 770 /* ARK PMD supports all line rates, how do we indicate that here ?? */ 771 dev_info->speed_capa = (ETH_LINK_SPEED_1G | 772 ETH_LINK_SPEED_10G | 773 ETH_LINK_SPEED_25G | 774 ETH_LINK_SPEED_40G | 775 ETH_LINK_SPEED_50G | 776 ETH_LINK_SPEED_100G); 777 } 778 779 static int 780 eth_ark_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 781 { 782 PMD_DEBUG_LOG(DEBUG, "link status = %d\n", 783 dev->data->dev_link.link_status); 784 struct ark_adapter *ark = 785 (struct ark_adapter *)dev->data->dev_private; 786 787 if (ark->user_ext.link_update) { 788 return ark->user_ext.link_update 789 (dev, wait_to_complete, 790 ark->user_data[dev->data->port_id]); 791 } 792 return 0; 793 } 794 795 static int 796 eth_ark_dev_set_link_up(struct rte_eth_dev *dev) 797 { 798 dev->data->dev_link.link_status = 1; 799 struct ark_adapter *ark = 800 (struct ark_adapter *)dev->data->dev_private; 801 802 if (ark->user_ext.dev_set_link_up) 803 return ark->user_ext.dev_set_link_up(dev, 804 ark->user_data[dev->data->port_id]); 805 return 0; 806 } 807 808 static int 809 eth_ark_dev_set_link_down(struct rte_eth_dev *dev) 810 { 811 dev->data->dev_link.link_status = 0; 812 struct ark_adapter *ark = 813 (struct ark_adapter *)dev->data->dev_private; 814 815 if (ark->user_ext.dev_set_link_down) 816 return ark->user_ext.dev_set_link_down(dev, 817 ark->user_data[dev->data->port_id]); 818 return 0; 819 } 820 821 static int 822 eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 823 { 824 uint16_t i; 825 struct ark_adapter *ark = 826 (struct ark_adapter *)dev->data->dev_private; 827 828 stats->ipackets = 0; 829 stats->ibytes = 0; 830 stats->opackets = 0; 831 stats->obytes = 0; 832 stats->imissed = 0; 833 stats->oerrors = 0; 834 835 for (i = 0; i < dev->data->nb_tx_queues; i++) 836 eth_tx_queue_stats_get(dev->data->tx_queues[i], stats); 837 for (i = 0; i < dev->data->nb_rx_queues; i++) 838 eth_rx_queue_stats_get(dev->data->rx_queues[i], stats); 839 if (ark->user_ext.stats_get) 840 return ark->user_ext.stats_get(dev, stats, 841 ark->user_data[dev->data->port_id]); 842 return 0; 843 } 844 845 static void 846 eth_ark_dev_stats_reset(struct rte_eth_dev *dev) 847 { 848 uint16_t i; 849 struct ark_adapter *ark = 850 (struct ark_adapter *)dev->data->dev_private; 851 852 for (i = 0; i < dev->data->nb_tx_queues; i++) 853 eth_tx_queue_stats_reset(dev->data->tx_queues[i]); 854 for (i = 0; i < dev->data->nb_rx_queues; i++) 855 eth_rx_queue_stats_reset(dev->data->rx_queues[i]); 856 if (ark->user_ext.stats_reset) 857 ark->user_ext.stats_reset(dev, 858 ark->user_data[dev->data->port_id]); 859 } 860 861 static int 862 eth_ark_macaddr_add(struct rte_eth_dev *dev, 863 struct ether_addr *mac_addr, 864 uint32_t index, 865 uint32_t pool) 866 { 867 struct ark_adapter *ark = 868 (struct ark_adapter *)dev->data->dev_private; 869 870 if (ark->user_ext.mac_addr_add) { 871 ark->user_ext.mac_addr_add(dev, 872 mac_addr, 873 index, 874 pool, 875 ark->user_data[dev->data->port_id]); 876 return 0; 877 } 878 return -ENOTSUP; 879 } 880 881 static void 882 eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) 883 { 884 struct ark_adapter *ark = 885 (struct ark_adapter *)dev->data->dev_private; 886 887 if (ark->user_ext.mac_addr_remove) 888 ark->user_ext.mac_addr_remove(dev, index, 889 ark->user_data[dev->data->port_id]); 890 } 891 892 static int 893 eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, 894 struct ether_addr *mac_addr) 895 { 896 struct ark_adapter *ark = 897 (struct ark_adapter *)dev->data->dev_private; 898 899 if (ark->user_ext.mac_addr_set) { 900 ark->user_ext.mac_addr_set(dev, mac_addr, 901 ark->user_data[dev->data->port_id]); 902 return 0; 903 } 904 return -ENOTSUP; 905 } 906 907 static int 908 eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size) 909 { 910 struct ark_adapter *ark = 911 (struct ark_adapter *)dev->data->dev_private; 912 913 if (ark->user_ext.set_mtu) 914 return ark->user_ext.set_mtu(dev, size, 915 ark->user_data[dev->data->port_id]); 916 917 return -ENOTSUP; 918 } 919 920 static inline int 921 process_pktdir_arg(const char *key, const char *value, 922 void *extra_args) 923 { 924 PMD_FUNC_LOG(DEBUG, "key = %s, value = %s\n", 925 key, value); 926 struct ark_adapter *ark = 927 (struct ark_adapter *)extra_args; 928 929 ark->pkt_dir_v = strtol(value, NULL, 16); 930 PMD_FUNC_LOG(DEBUG, "pkt_dir_v = 0x%x\n", ark->pkt_dir_v); 931 return 0; 932 } 933 934 static inline int 935 process_file_args(const char *key, const char *value, void *extra_args) 936 { 937 PMD_FUNC_LOG(DEBUG, "key = %s, value = %s\n", 938 key, value); 939 char *args = (char *)extra_args; 940 941 /* Open the configuration file */ 942 FILE *file = fopen(value, "r"); 943 char line[ARK_MAX_ARG_LEN]; 944 int size = 0; 945 int first = 1; 946 947 if (file == NULL) { 948 PMD_DRV_LOG(ERR, "Unable to open " 949 "config file %s\n", value); 950 return -1; 951 } 952 953 while (fgets(line, sizeof(line), file)) { 954 size += strlen(line); 955 if (size >= ARK_MAX_ARG_LEN) { 956 PMD_DRV_LOG(ERR, "Unable to parse file %s args, " 957 "parameter list is too long\n", value); 958 fclose(file); 959 return -1; 960 } 961 if (first) { 962 strncpy(args, line, ARK_MAX_ARG_LEN); 963 first = 0; 964 } else { 965 strncat(args, line, ARK_MAX_ARG_LEN); 966 } 967 } 968 PMD_FUNC_LOG(DEBUG, "file = %s\n", args); 969 fclose(file); 970 return 0; 971 } 972 973 static int 974 eth_ark_check_args(struct ark_adapter *ark, const char *params) 975 { 976 struct rte_kvargs *kvlist; 977 unsigned int k_idx; 978 struct rte_kvargs_pair *pair = NULL; 979 int ret = -1; 980 981 kvlist = rte_kvargs_parse(params, valid_arguments); 982 if (kvlist == NULL) 983 return 0; 984 985 ark->pkt_gen_args[0] = 0; 986 ark->pkt_chkr_args[0] = 0; 987 988 for (k_idx = 0; k_idx < kvlist->count; k_idx++) { 989 pair = &kvlist->pairs[k_idx]; 990 PMD_FUNC_LOG(DEBUG, "**** Arg passed to PMD = %s:%s\n", 991 pair->key, 992 pair->value); 993 } 994 995 if (rte_kvargs_process(kvlist, 996 ARK_PKTDIR_ARG, 997 &process_pktdir_arg, 998 ark) != 0) { 999 PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTDIR_ARG); 1000 goto free_kvlist; 1001 } 1002 1003 if (rte_kvargs_process(kvlist, 1004 ARK_PKTGEN_ARG, 1005 &process_file_args, 1006 ark->pkt_gen_args) != 0) { 1007 PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTGEN_ARG); 1008 goto free_kvlist; 1009 } 1010 1011 if (rte_kvargs_process(kvlist, 1012 ARK_PKTCHKR_ARG, 1013 &process_file_args, 1014 ark->pkt_chkr_args) != 0) { 1015 PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTCHKR_ARG); 1016 goto free_kvlist; 1017 } 1018 1019 PMD_DRV_LOG(INFO, "packet director set to 0x%x\n", ark->pkt_dir_v); 1020 /* Setup the packet director */ 1021 ark_pktdir_setup(ark->pd, ark->pkt_dir_v); 1022 1023 /* Setup the packet generator */ 1024 if (ark->pkt_gen_args[0]) { 1025 PMD_DRV_LOG(INFO, "Setting up the packet generator\n"); 1026 ark_pktgen_parse(ark->pkt_gen_args); 1027 ark_pktgen_reset(ark->pg); 1028 ark_pktgen_setup(ark->pg); 1029 ark->start_pg = 1; 1030 } 1031 1032 /* Setup the packet checker */ 1033 if (ark->pkt_chkr_args[0]) { 1034 ark_pktchkr_parse(ark->pkt_chkr_args); 1035 ark_pktchkr_setup(ark->pc); 1036 } 1037 1038 ret = 0; 1039 1040 free_kvlist: 1041 rte_kvargs_free(kvlist); 1042 1043 return ret; 1044 } 1045 1046 RTE_PMD_REGISTER_PCI(net_ark, rte_ark_pmd); 1047 RTE_PMD_REGISTER_KMOD_DEP(net_ark, "* igb_uio | uio_pci_generic "); 1048 RTE_PMD_REGISTER_PCI_TABLE(net_ark, pci_id_ark_map); 1049 RTE_PMD_REGISTER_PARAM_STRING(net_ark, 1050 ARK_PKTGEN_ARG "=<filename> " 1051 ARK_PKTCHKR_ARG "=<filename> " 1052 ARK_PKTDIR_ARG "=<bitmap>"); 1053