1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2015-2018 Atomic Rules LLC 3 */ 4 5 #include <unistd.h> 6 #include <sys/stat.h> 7 #include <dlfcn.h> 8 9 #include <bus_pci_driver.h> 10 #include <ethdev_pci.h> 11 #include <rte_kvargs.h> 12 13 #include "ark_global.h" 14 #include "ark_logs.h" 15 #include "ark_ethdev_tx.h" 16 #include "ark_ethdev_rx.h" 17 #include "ark_mpu.h" 18 #include "ark_ddm.h" 19 #include "ark_udm.h" 20 #include "ark_rqp.h" 21 #include "ark_pktdir.h" 22 #include "ark_pktgen.h" 23 #include "ark_pktchkr.h" 24 25 /* Internal prototypes */ 26 static int eth_ark_check_args(struct ark_adapter *ark, const char *params); 27 static int eth_ark_dev_init(struct rte_eth_dev *dev); 28 static int ark_config_device(struct rte_eth_dev *dev); 29 static int eth_ark_dev_uninit(struct rte_eth_dev *eth_dev); 30 static int eth_ark_dev_configure(struct rte_eth_dev *dev); 31 static int eth_ark_dev_start(struct rte_eth_dev *dev); 32 static int eth_ark_dev_stop(struct rte_eth_dev *dev); 33 static int eth_ark_dev_close(struct rte_eth_dev *dev); 34 static int eth_ark_dev_info_get(struct rte_eth_dev *dev, 35 struct rte_eth_dev_info *dev_info); 36 static int eth_ark_dev_link_update(struct rte_eth_dev *dev, 37 int wait_to_complete); 38 static int eth_ark_dev_set_link_up(struct rte_eth_dev *dev); 39 static int eth_ark_dev_set_link_down(struct rte_eth_dev *dev); 40 static int eth_ark_dev_stats_get(struct rte_eth_dev *dev, 41 struct rte_eth_stats *stats); 42 static int eth_ark_dev_stats_reset(struct rte_eth_dev *dev); 43 static int eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, 44 struct rte_ether_addr *mac_addr); 45 static int eth_ark_macaddr_add(struct rte_eth_dev *dev, 46 struct rte_ether_addr *mac_addr, 47 uint32_t index, 48 uint32_t pool); 49 static void eth_ark_macaddr_remove(struct rte_eth_dev *dev, 50 uint32_t index); 51 static int eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size); 52 53 /* 54 * The packet generator is a functional block used to generate packet 55 * patterns for testing. It is not intended for nominal use. 56 */ 57 #define ARK_PKTGEN_ARG "Pkt_gen" 58 59 /* 60 * The packet checker is a functional block used to verify packet 61 * patterns for testing. It is not intended for nominal use. 62 */ 63 #define ARK_PKTCHKR_ARG "Pkt_chkr" 64 65 /* 66 * The packet director is used to select the internal ingress and 67 * egress packets paths during testing. It is not intended for 68 * nominal use. 69 */ 70 #define ARK_PKTDIR_ARG "Pkt_dir" 71 72 /* Devinfo configurations */ 73 #define ARK_RX_MAX_QUEUE (4096 * 4) 74 #define ARK_RX_MIN_QUEUE (512) 75 #define ARK_RX_MAX_PKT_LEN ((16 * 1024) - 128) 76 #define ARK_RX_MIN_BUFSIZE (1024) 77 78 #define ARK_TX_MAX_QUEUE (4096 * 4) 79 #define ARK_TX_MIN_QUEUE (256) 80 81 static const char * const valid_arguments[] = { 82 ARK_PKTGEN_ARG, 83 ARK_PKTCHKR_ARG, 84 ARK_PKTDIR_ARG, 85 NULL 86 }; 87 88 #define AR_VENDOR_ID 0x1d6c 89 static const struct rte_pci_id pci_id_ark_map[] = { 90 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x100d)}, 91 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x100e)}, 92 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x100f)}, 93 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1010)}, 94 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1017)}, 95 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1018)}, 96 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1019)}, 97 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x101a)}, 98 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x101b)}, 99 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x101c)}, 100 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x101e)}, 101 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x101f)}, 102 {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1022)}, 103 {.vendor_id = 0, /* sentinel */ }, 104 }; 105 106 /* 107 * This structure is used to statically define the capabilities 108 * of supported devices. 109 * Capabilities: 110 * rqpacing - 111 * Some HW variants require that PCIe read-requests be correctly throttled. 112 * This is called "rqpacing" and has to do with credit and flow control 113 * on certain Arkville implementations. 114 */ 115 struct ark_caps { 116 bool rqpacing; 117 bool isvf; 118 }; 119 struct ark_dev_caps { 120 uint32_t device_id; 121 struct ark_caps caps; 122 }; 123 #define SET_DEV_CAPS(id, rqp, vf) \ 124 {id, {.rqpacing = rqp, .isvf = vf} } 125 126 static const struct ark_dev_caps 127 ark_device_caps[] = { 128 SET_DEV_CAPS(0x100d, true, false), 129 SET_DEV_CAPS(0x100e, true, false), 130 SET_DEV_CAPS(0x100f, true, false), 131 SET_DEV_CAPS(0x1010, false, false), 132 SET_DEV_CAPS(0x1017, true, false), 133 SET_DEV_CAPS(0x1018, true, false), 134 SET_DEV_CAPS(0x1019, true, false), 135 SET_DEV_CAPS(0x101a, true, false), 136 SET_DEV_CAPS(0x101b, true, false), 137 SET_DEV_CAPS(0x101c, true, true), 138 SET_DEV_CAPS(0x101e, false, false), 139 SET_DEV_CAPS(0x101f, false, false), 140 {.device_id = 0,} 141 }; 142 143 static int 144 eth_ark_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 145 struct rte_pci_device *pci_dev) 146 { 147 struct rte_eth_dev *eth_dev; 148 int ret; 149 150 eth_dev = rte_eth_dev_pci_allocate(pci_dev, sizeof(struct ark_adapter)); 151 152 if (eth_dev == NULL) 153 return -ENOMEM; 154 155 ret = eth_ark_dev_init(eth_dev); 156 if (ret) 157 rte_eth_dev_release_port(eth_dev); 158 159 return ret; 160 } 161 162 static int 163 eth_ark_pci_remove(struct rte_pci_device *pci_dev) 164 { 165 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ark_dev_uninit); 166 } 167 168 static struct rte_pci_driver rte_ark_pmd = { 169 .id_table = pci_id_ark_map, 170 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 171 .probe = eth_ark_pci_probe, 172 .remove = eth_ark_pci_remove, 173 }; 174 175 static const struct eth_dev_ops ark_eth_dev_ops = { 176 .dev_configure = eth_ark_dev_configure, 177 .dev_start = eth_ark_dev_start, 178 .dev_stop = eth_ark_dev_stop, 179 .dev_close = eth_ark_dev_close, 180 181 .dev_infos_get = eth_ark_dev_info_get, 182 183 .rx_queue_setup = eth_ark_dev_rx_queue_setup, 184 .tx_queue_setup = eth_ark_tx_queue_setup, 185 186 .link_update = eth_ark_dev_link_update, 187 .dev_set_link_up = eth_ark_dev_set_link_up, 188 .dev_set_link_down = eth_ark_dev_set_link_down, 189 190 .rx_queue_start = eth_ark_rx_start_queue, 191 .rx_queue_stop = eth_ark_rx_stop_queue, 192 193 .tx_queue_start = eth_ark_tx_queue_start, 194 .tx_queue_stop = eth_ark_tx_queue_stop, 195 196 .stats_get = eth_ark_dev_stats_get, 197 .stats_reset = eth_ark_dev_stats_reset, 198 199 .mac_addr_add = eth_ark_macaddr_add, 200 .mac_addr_remove = eth_ark_macaddr_remove, 201 .mac_addr_set = eth_ark_set_default_mac_addr, 202 203 .mtu_set = eth_ark_set_mtu, 204 }; 205 206 static int 207 check_for_ext(struct ark_adapter *ark) 208 { 209 int found = 0; 210 211 /* Get the env */ 212 const char *dllpath = getenv("ARK_EXT_PATH"); 213 214 if (dllpath == NULL) { 215 ARK_PMD_LOG(DEBUG, "EXT NO dll path specified\n"); 216 return 0; 217 } 218 ARK_PMD_LOG(NOTICE, "EXT found dll path at %s\n", dllpath); 219 220 /* Open and load the .so */ 221 ark->d_handle = dlopen(dllpath, RTLD_LOCAL | RTLD_LAZY); 222 if (ark->d_handle == NULL) { 223 ARK_PMD_LOG(ERR, "Could not load user extension %s\n", 224 dllpath); 225 return -1; 226 } 227 ARK_PMD_LOG(DEBUG, "SUCCESS: loaded user extension %s\n", 228 dllpath); 229 230 /* Get the entry points */ 231 ark->user_ext.dev_init = 232 (void *(*)(struct rte_eth_dev *, void *, int)) 233 dlsym(ark->d_handle, "rte_pmd_ark_dev_init"); 234 ARK_PMD_LOG(DEBUG, "device ext init pointer = %p\n", 235 ark->user_ext.dev_init); 236 ark->user_ext.dev_get_port_count = 237 (int (*)(struct rte_eth_dev *, void *)) 238 dlsym(ark->d_handle, "rte_pmd_ark_dev_get_port_count"); 239 ark->user_ext.dev_uninit = 240 (void (*)(struct rte_eth_dev *, void *)) 241 dlsym(ark->d_handle, "rte_pmd_ark_dev_uninit"); 242 ark->user_ext.dev_configure = 243 (int (*)(struct rte_eth_dev *, void *)) 244 dlsym(ark->d_handle, "rte_pmd_ark_dev_configure"); 245 ark->user_ext.dev_start = 246 (int (*)(struct rte_eth_dev *, void *)) 247 dlsym(ark->d_handle, "rte_pmd_ark_dev_start"); 248 ark->user_ext.dev_stop = 249 (void (*)(struct rte_eth_dev *, void *)) 250 dlsym(ark->d_handle, "rte_pmd_ark_dev_stop"); 251 ark->user_ext.dev_close = 252 (void (*)(struct rte_eth_dev *, void *)) 253 dlsym(ark->d_handle, "rte_pmd_ark_dev_close"); 254 ark->user_ext.link_update = 255 (int (*)(struct rte_eth_dev *, int, void *)) 256 dlsym(ark->d_handle, "rte_pmd_ark_link_update"); 257 ark->user_ext.dev_set_link_up = 258 (int (*)(struct rte_eth_dev *, void *)) 259 dlsym(ark->d_handle, "rte_pmd_ark_dev_set_link_up"); 260 ark->user_ext.dev_set_link_down = 261 (int (*)(struct rte_eth_dev *, void *)) 262 dlsym(ark->d_handle, "rte_pmd_ark_dev_set_link_down"); 263 ark->user_ext.stats_get = 264 (int (*)(struct rte_eth_dev *, struct rte_eth_stats *, 265 void *)) 266 dlsym(ark->d_handle, "rte_pmd_ark_stats_get"); 267 ark->user_ext.stats_reset = 268 (void (*)(struct rte_eth_dev *, void *)) 269 dlsym(ark->d_handle, "rte_pmd_ark_stats_reset"); 270 ark->user_ext.mac_addr_add = 271 (void (*)(struct rte_eth_dev *, struct rte_ether_addr *, 272 uint32_t, uint32_t, void *)) 273 dlsym(ark->d_handle, "rte_pmd_ark_mac_addr_add"); 274 ark->user_ext.mac_addr_remove = 275 (void (*)(struct rte_eth_dev *, uint32_t, void *)) 276 dlsym(ark->d_handle, "rte_pmd_ark_mac_addr_remove"); 277 ark->user_ext.mac_addr_set = 278 (void (*)(struct rte_eth_dev *, struct rte_ether_addr *, 279 void *)) 280 dlsym(ark->d_handle, "rte_pmd_ark_mac_addr_set"); 281 ark->user_ext.set_mtu = 282 (int (*)(struct rte_eth_dev *, uint16_t, 283 void *)) 284 dlsym(ark->d_handle, "rte_pmd_ark_set_mtu"); 285 ark->user_ext.rx_user_meta_hook = 286 (rx_user_meta_hook_fn)dlsym(ark->d_handle, 287 "rte_pmd_ark_rx_user_meta_hook"); 288 ark->user_ext.tx_user_meta_hook = 289 (tx_user_meta_hook_fn)dlsym(ark->d_handle, 290 "rte_pmd_ark_tx_user_meta_hook"); 291 292 return found; 293 } 294 295 static int 296 eth_ark_dev_init(struct rte_eth_dev *dev) 297 { 298 struct ark_adapter *ark = dev->data->dev_private; 299 struct rte_pci_device *pci_dev; 300 int ret; 301 int port_count = 1; 302 int p; 303 bool rqpacing = false; 304 305 ark->eth_dev = dev; 306 307 ARK_PMD_LOG(DEBUG, "\n"); 308 309 /* Check to see if there is an extension that we need to load */ 310 ret = check_for_ext(ark); 311 if (ret) 312 return ret; 313 314 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 315 rte_eth_copy_pci_info(dev, pci_dev); 316 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 317 318 p = 0; 319 while (ark_device_caps[p].device_id != 0) { 320 if (pci_dev->id.device_id == ark_device_caps[p].device_id) { 321 rqpacing = ark_device_caps[p].caps.rqpacing; 322 ark->isvf = ark_device_caps[p].caps.isvf; 323 break; 324 } 325 p++; 326 } 327 328 /* Use dummy function until setup */ 329 dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; 330 dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; 331 332 ark->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr; 333 ark->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr; 334 335 ark->sysctrl.v = (void *)&ark->bar0[ARK_SYSCTRL_BASE]; 336 ark->mpurx.v = (void *)&ark->bar0[ARK_MPU_RX_BASE]; 337 ark->udm.v = (void *)&ark->bar0[ARK_UDM_BASE]; 338 ark->mputx.v = (void *)&ark->bar0[ARK_MPU_TX_BASE]; 339 ark->ddm.v = (void *)&ark->bar0[ARK_DDM_BASE]; 340 ark->cmac.v = (void *)&ark->bar0[ARK_CMAC_BASE]; 341 ark->external.v = (void *)&ark->bar0[ARK_EXTERNAL_BASE]; 342 ark->pktdir.v = (void *)&ark->bar0[ARK_PKTDIR_BASE]; 343 ark->pktgen.v = (void *)&ark->bar0[ARK_PKTGEN_BASE]; 344 ark->pktchkr.v = (void *)&ark->bar0[ARK_PKTCHKR_BASE]; 345 346 if (rqpacing) { 347 ark->rqpacing = 348 (struct ark_rqpace_t *)(ark->bar0 + ARK_RCPACING_BASE); 349 } else { 350 ark->rqpacing = NULL; 351 } 352 ark->started = 0; 353 ark->pkt_dir_v = ARK_PKT_DIR_INIT_VAL; 354 355 ARK_PMD_LOG(INFO, "Sys Ctrl Const = 0x%x HW Commit_ID: %08x\n", 356 ark->sysctrl.t32[4], 357 rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4])); 358 ARK_PMD_LOG(NOTICE, "Arkville HW Commit_ID: %08x\n", 359 rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4])); 360 361 /* If HW sanity test fails, return an error */ 362 if (ark->sysctrl.t32[4] != 0xcafef00d) { 363 ARK_PMD_LOG(ERR, 364 "HW Sanity test has failed, expected constant" 365 " 0x%x, read 0x%x (%s)\n", 366 0xcafef00d, 367 ark->sysctrl.t32[4], __func__); 368 return -1; 369 } 370 if (ark->sysctrl.t32[3] != 0) { 371 if (ark->rqpacing) { 372 if (ark_rqp_lasped(ark->rqpacing)) { 373 ARK_PMD_LOG(ERR, "Arkville Evaluation System - " 374 "Timer has Expired\n"); 375 return -1; 376 } 377 ARK_PMD_LOG(WARNING, "Arkville Evaluation System - " 378 "Timer is Running\n"); 379 } 380 } 381 382 ARK_PMD_LOG(DEBUG, 383 "HW Sanity test has PASSED, expected constant" 384 " 0x%x, read 0x%x (%s)\n", 385 0xcafef00d, ark->sysctrl.t32[4], __func__); 386 387 /* We are a single function multi-port device. */ 388 ret = ark_config_device(dev); 389 if (ret) 390 return -1; 391 392 dev->dev_ops = &ark_eth_dev_ops; 393 dev->rx_queue_count = eth_ark_dev_rx_queue_count; 394 395 dev->data->mac_addrs = rte_zmalloc("ark", RTE_ETHER_ADDR_LEN, 0); 396 if (!dev->data->mac_addrs) { 397 ARK_PMD_LOG(ERR, 398 "Failed to allocated memory for storing mac address" 399 ); 400 } 401 402 if (ark->user_ext.dev_init) { 403 ark->user_data[dev->data->port_id] = 404 ark->user_ext.dev_init(dev, ark->a_bar, 0); 405 if (!ark->user_data[dev->data->port_id]) { 406 ARK_PMD_LOG(WARNING, 407 "Failed to initialize PMD extension!" 408 " continuing without it\n"); 409 memset(&ark->user_ext, 0, sizeof(struct ark_user_ext)); 410 dlclose(ark->d_handle); 411 } 412 } 413 414 if (pci_dev->device.devargs) 415 ret = eth_ark_check_args(ark, pci_dev->device.devargs->args); 416 else 417 ARK_PMD_LOG(INFO, "No Device args found\n"); 418 419 if (ret) 420 goto error; 421 /* 422 * We will create additional devices based on the number of requested 423 * ports 424 */ 425 if (ark->user_ext.dev_get_port_count) 426 port_count = 427 ark->user_ext.dev_get_port_count(dev, 428 ark->user_data[dev->data->port_id]); 429 ark->num_ports = port_count; 430 431 for (p = 0; p < port_count; p++) { 432 struct rte_eth_dev *eth_dev; 433 char name[RTE_ETH_NAME_MAX_LEN]; 434 435 snprintf(name, sizeof(name), "arketh%d", 436 dev->data->port_id + p); 437 438 if (p == 0) { 439 /* First port is already allocated by DPDK */ 440 eth_dev = ark->eth_dev; 441 rte_eth_dev_probing_finish(eth_dev); 442 continue; 443 } 444 445 /* reserve an ethdev entry */ 446 eth_dev = rte_eth_dev_allocate(name); 447 if (!eth_dev) { 448 ARK_PMD_LOG(ERR, 449 "Could not allocate eth_dev for port %d\n", 450 p); 451 goto error; 452 } 453 454 eth_dev->device = &pci_dev->device; 455 eth_dev->data->dev_private = ark; 456 eth_dev->dev_ops = ark->eth_dev->dev_ops; 457 eth_dev->tx_pkt_burst = ark->eth_dev->tx_pkt_burst; 458 eth_dev->rx_pkt_burst = ark->eth_dev->rx_pkt_burst; 459 460 rte_eth_copy_pci_info(eth_dev, pci_dev); 461 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 462 463 eth_dev->data->mac_addrs = rte_zmalloc(name, 464 RTE_ETHER_ADDR_LEN, 0); 465 if (!eth_dev->data->mac_addrs) { 466 ARK_PMD_LOG(ERR, 467 "Memory allocation for MAC failed!" 468 " Exiting.\n"); 469 goto error; 470 } 471 472 if (ark->user_ext.dev_init) { 473 ark->user_data[eth_dev->data->port_id] = 474 ark->user_ext.dev_init(dev, ark->a_bar, p); 475 } 476 477 rte_eth_dev_probing_finish(eth_dev); 478 } 479 480 return ret; 481 482 error: 483 rte_free(dev->data->mac_addrs); 484 dev->data->mac_addrs = NULL; 485 return -1; 486 } 487 488 /* 489 *Initial device configuration when device is opened 490 * setup the DDM, and UDM 491 * Called once per PCIE device 492 */ 493 static int 494 ark_config_device(struct rte_eth_dev *dev) 495 { 496 struct ark_adapter *ark = dev->data->dev_private; 497 uint16_t num_q, i; 498 struct ark_mpu_t *mpu; 499 500 /* 501 * Make sure that the packet director, generator and checker are in a 502 * known state 503 */ 504 if (!ark->isvf) { 505 ark->start_pg = 0; 506 ark->pg_running = 0; 507 ark->pg = ark_pktgen_init(ark->pktgen.v, 0, 1); 508 if (ark->pg == NULL) 509 return -1; 510 ark_pktgen_reset(ark->pg); 511 ark->pc = ark_pktchkr_init(ark->pktchkr.v, 0, 1); 512 if (ark->pc == NULL) 513 return -1; 514 ark_pktchkr_stop(ark->pc); 515 ark->pd = ark_pktdir_init(ark->pktdir.v); 516 if (ark->pd == NULL) 517 return -1; 518 } 519 /* Verify HW */ 520 if (ark_udm_verify(ark->udm.v)) 521 return -1; 522 if (ark_ddm_verify(ark->ddm.v)) 523 return -1; 524 525 /* MPU reset */ 526 mpu = ark->mpurx.v; 527 num_q = ark_api_num_queues(mpu); 528 ark->rx_queues = num_q; 529 for (i = 0; i < num_q; i++) { 530 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 531 } 532 533 mpu = ark->mputx.v; 534 num_q = ark_api_num_queues(mpu); 535 ark->tx_queues = num_q; 536 for (i = 0; i < num_q; i++) { 537 mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); 538 } 539 540 if (!ark->isvf && ark->rqpacing) 541 ark_rqp_stats_reset(ark->rqpacing); 542 543 return 0; 544 } 545 546 static int 547 eth_ark_dev_uninit(struct rte_eth_dev *dev) 548 { 549 struct ark_adapter *ark = dev->data->dev_private; 550 551 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 552 return 0; 553 554 if (ark->user_ext.dev_uninit) 555 ark->user_ext.dev_uninit(dev, 556 ark->user_data[dev->data->port_id]); 557 558 if (!ark->isvf) { 559 ark_pktgen_uninit(ark->pg); 560 ark_pktchkr_uninit(ark->pc); 561 } 562 563 return 0; 564 } 565 566 static int 567 eth_ark_dev_configure(struct rte_eth_dev *dev) 568 { 569 struct ark_adapter *ark = dev->data->dev_private; 570 571 eth_ark_dev_set_link_up(dev); 572 if (ark->user_ext.dev_configure) 573 return ark->user_ext.dev_configure(dev, 574 ark->user_data[dev->data->port_id]); 575 return 0; 576 } 577 578 static int 579 eth_ark_dev_start(struct rte_eth_dev *dev) 580 { 581 struct ark_adapter *ark = dev->data->dev_private; 582 int i; 583 584 /* RX Side */ 585 for (i = 0; i < dev->data->nb_rx_queues; i++) 586 eth_ark_rx_start_queue(dev, i); 587 588 /* TX Side */ 589 for (i = 0; i < dev->data->nb_tx_queues; i++) 590 eth_ark_tx_queue_start(dev, i); 591 592 ark->started = 1; 593 /* set xmit and receive function */ 594 dev->rx_pkt_burst = ð_ark_recv_pkts; 595 dev->tx_pkt_burst = ð_ark_xmit_pkts; 596 597 if (!ark->isvf && ark->start_pg) 598 ark_pktchkr_run(ark->pc); 599 600 if (!ark->isvf && ark->start_pg && !ark->pg_running) { 601 pthread_t thread; 602 603 /* Delay packet generatpr start allow the hardware to be ready 604 * This is only used for sanity checking with internal generator 605 */ 606 char tname[32]; 607 snprintf(tname, sizeof(tname), "ark-delay-pg-%d", 608 dev->data->port_id); 609 610 if (rte_ctrl_thread_create(&thread, tname, NULL, 611 ark_pktgen_delay_start, ark->pg)) { 612 ARK_PMD_LOG(ERR, "Could not create pktgen " 613 "starter thread\n"); 614 return -1; 615 } 616 ark->pg_running = 1; 617 } 618 619 if (ark->user_ext.dev_start) 620 ark->user_ext.dev_start(dev, 621 ark->user_data[dev->data->port_id]); 622 623 return 0; 624 } 625 626 static int 627 eth_ark_dev_stop(struct rte_eth_dev *dev) 628 { 629 uint16_t i; 630 int status; 631 struct ark_adapter *ark = dev->data->dev_private; 632 633 if (ark->started == 0) 634 return 0; 635 ark->started = 0; 636 dev->data->dev_started = 0; 637 638 /* Stop the extension first */ 639 if (ark->user_ext.dev_stop) 640 ark->user_ext.dev_stop(dev, 641 ark->user_data[dev->data->port_id]); 642 643 /* Stop the packet generator */ 644 if (!ark->isvf && ark->start_pg && ark->pg_running) { 645 ark_pktgen_pause(ark->pg); 646 ark->pg_running = 0; 647 } 648 649 dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; 650 dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; 651 652 /* Stop RX Side */ 653 for (i = 0; i < dev->data->nb_rx_queues; i++) 654 eth_ark_rx_stop_queue(dev, i); 655 656 /* STOP TX Side */ 657 for (i = 0; i < dev->data->nb_tx_queues; i++) { 658 status = eth_ark_tx_queue_stop(dev, i); 659 if (status != 0) { 660 uint16_t port = dev->data->port_id; 661 ARK_PMD_LOG(ERR, 662 "tx_queue stop anomaly" 663 " port %u, queue %u\n", 664 port, i); 665 } 666 } 667 668 ark_udm_dump_stats(ark->udm.v, "Post stop"); 669 670 for (i = 0; i < dev->data->nb_rx_queues; i++) 671 eth_ark_rx_dump_queue(dev, i, __func__); 672 673 /* Stop the packet checker if it is running */ 674 if (!ark->isvf && ark->start_pg) { 675 ark_pktchkr_dump_stats(ark->pc); 676 ark_pktchkr_stop(ark->pc); 677 } 678 679 return 0; 680 } 681 682 static int 683 eth_ark_dev_close(struct rte_eth_dev *dev) 684 { 685 struct ark_adapter *ark = dev->data->dev_private; 686 uint16_t i; 687 688 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 689 return 0; 690 691 if (ark->user_ext.dev_close) 692 ark->user_ext.dev_close(dev, 693 ark->user_data[dev->data->port_id]); 694 695 eth_ark_dev_stop(dev); 696 697 /* 698 * This should only be called once for the device during shutdown 699 */ 700 if (ark->rqpacing) 701 ark_rqp_dump(ark->rqpacing); 702 703 /* return to power-on state */ 704 if (ark->pd) 705 ark_pktdir_setup(ark->pd, ARK_PKT_DIR_INIT_VAL); 706 707 for (i = 0; i < dev->data->nb_tx_queues; i++) { 708 eth_ark_tx_queue_release(dev->data->tx_queues[i]); 709 dev->data->tx_queues[i] = 0; 710 } 711 712 for (i = 0; i < dev->data->nb_rx_queues; i++) { 713 eth_ark_dev_rx_queue_release(dev->data->rx_queues[i]); 714 dev->data->rx_queues[i] = 0; 715 } 716 717 return 0; 718 } 719 720 static int 721 eth_ark_dev_info_get(struct rte_eth_dev *dev, 722 struct rte_eth_dev_info *dev_info) 723 { 724 struct ark_adapter *ark = dev->data->dev_private; 725 struct ark_mpu_t *tx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_TX_BASE); 726 struct ark_mpu_t *rx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_RX_BASE); 727 uint16_t ports = ark->num_ports; 728 729 dev_info->max_rx_pktlen = ARK_RX_MAX_PKT_LEN; 730 dev_info->min_rx_bufsize = ARK_RX_MIN_BUFSIZE; 731 732 dev_info->max_rx_queues = ark_api_num_queues_per_port(rx_mpu, ports); 733 dev_info->max_tx_queues = ark_api_num_queues_per_port(tx_mpu, ports); 734 735 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 736 .nb_max = ARK_RX_MAX_QUEUE, 737 .nb_min = ARK_RX_MIN_QUEUE, 738 .nb_align = ARK_RX_MIN_QUEUE}; /* power of 2 */ 739 740 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 741 .nb_max = ARK_TX_MAX_QUEUE, 742 .nb_min = ARK_TX_MIN_QUEUE, 743 .nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */ 744 745 /* ARK PMD supports all line rates, how do we indicate that here ?? */ 746 dev_info->speed_capa = (RTE_ETH_LINK_SPEED_1G | 747 RTE_ETH_LINK_SPEED_10G | 748 RTE_ETH_LINK_SPEED_25G | 749 RTE_ETH_LINK_SPEED_40G | 750 RTE_ETH_LINK_SPEED_50G | 751 RTE_ETH_LINK_SPEED_100G); 752 753 dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_TIMESTAMP; 754 755 return 0; 756 } 757 758 static int 759 eth_ark_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 760 { 761 ARK_PMD_LOG(DEBUG, "link status = %d\n", 762 dev->data->dev_link.link_status); 763 struct ark_adapter *ark = dev->data->dev_private; 764 765 if (ark->user_ext.link_update) { 766 return ark->user_ext.link_update 767 (dev, wait_to_complete, 768 ark->user_data[dev->data->port_id]); 769 } 770 return 0; 771 } 772 773 static int 774 eth_ark_dev_set_link_up(struct rte_eth_dev *dev) 775 { 776 dev->data->dev_link.link_status = 1; 777 struct ark_adapter *ark = dev->data->dev_private; 778 779 if (ark->user_ext.dev_set_link_up) 780 return ark->user_ext.dev_set_link_up(dev, 781 ark->user_data[dev->data->port_id]); 782 return 0; 783 } 784 785 static int 786 eth_ark_dev_set_link_down(struct rte_eth_dev *dev) 787 { 788 dev->data->dev_link.link_status = 0; 789 struct ark_adapter *ark = dev->data->dev_private; 790 791 if (ark->user_ext.dev_set_link_down) 792 return ark->user_ext.dev_set_link_down(dev, 793 ark->user_data[dev->data->port_id]); 794 return 0; 795 } 796 797 static int 798 eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 799 { 800 uint16_t i; 801 struct ark_adapter *ark = dev->data->dev_private; 802 803 stats->ipackets = 0; 804 stats->ibytes = 0; 805 stats->opackets = 0; 806 stats->obytes = 0; 807 stats->imissed = 0; 808 stats->oerrors = 0; 809 810 for (i = 0; i < dev->data->nb_tx_queues; i++) 811 eth_tx_queue_stats_get(dev->data->tx_queues[i], stats); 812 for (i = 0; i < dev->data->nb_rx_queues; i++) 813 eth_rx_queue_stats_get(dev->data->rx_queues[i], stats); 814 if (ark->user_ext.stats_get) 815 return ark->user_ext.stats_get(dev, stats, 816 ark->user_data[dev->data->port_id]); 817 return 0; 818 } 819 820 static int 821 eth_ark_dev_stats_reset(struct rte_eth_dev *dev) 822 { 823 uint16_t i; 824 struct ark_adapter *ark = dev->data->dev_private; 825 826 for (i = 0; i < dev->data->nb_tx_queues; i++) 827 eth_tx_queue_stats_reset(dev->data->tx_queues[i]); 828 for (i = 0; i < dev->data->nb_rx_queues; i++) 829 eth_rx_queue_stats_reset(dev->data->rx_queues[i]); 830 if (ark->user_ext.stats_reset) 831 ark->user_ext.stats_reset(dev, 832 ark->user_data[dev->data->port_id]); 833 834 return 0; 835 } 836 837 static int 838 eth_ark_macaddr_add(struct rte_eth_dev *dev, 839 struct rte_ether_addr *mac_addr, 840 uint32_t index, 841 uint32_t pool) 842 { 843 struct ark_adapter *ark = dev->data->dev_private; 844 845 if (ark->user_ext.mac_addr_add) { 846 ark->user_ext.mac_addr_add(dev, 847 mac_addr, 848 index, 849 pool, 850 ark->user_data[dev->data->port_id]); 851 return 0; 852 } 853 return -ENOTSUP; 854 } 855 856 static void 857 eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) 858 { 859 struct ark_adapter *ark = dev->data->dev_private; 860 861 if (ark->user_ext.mac_addr_remove) 862 ark->user_ext.mac_addr_remove(dev, index, 863 ark->user_data[dev->data->port_id]); 864 } 865 866 static int 867 eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, 868 struct rte_ether_addr *mac_addr) 869 { 870 struct ark_adapter *ark = dev->data->dev_private; 871 872 if (ark->user_ext.mac_addr_set) { 873 ark->user_ext.mac_addr_set(dev, mac_addr, 874 ark->user_data[dev->data->port_id]); 875 return 0; 876 } 877 return -ENOTSUP; 878 } 879 880 static int 881 eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size) 882 { 883 struct ark_adapter *ark = dev->data->dev_private; 884 885 if (ark->user_ext.set_mtu) 886 return ark->user_ext.set_mtu(dev, size, 887 ark->user_data[dev->data->port_id]); 888 889 return -ENOTSUP; 890 } 891 892 static inline int 893 process_pktdir_arg(const char *key, const char *value, 894 void *extra_args) 895 { 896 ARK_PMD_LOG(DEBUG, "key = %s, value = %s\n", 897 key, value); 898 struct ark_adapter *ark = 899 (struct ark_adapter *)extra_args; 900 901 ark->pkt_dir_v = strtol(value, NULL, 16); 902 ARK_PMD_LOG(DEBUG, "pkt_dir_v = 0x%x\n", ark->pkt_dir_v); 903 return 0; 904 } 905 906 static inline int 907 process_file_args(const char *key, const char *value, void *extra_args) 908 { 909 ARK_PMD_LOG(DEBUG, "key = %s, value = %s\n", 910 key, value); 911 char *args = (char *)extra_args; 912 913 /* Open the configuration file */ 914 FILE *file = fopen(value, "r"); 915 char line[ARK_MAX_ARG_LEN]; 916 int size = 0; 917 int first = 1; 918 919 if (file == NULL) { 920 ARK_PMD_LOG(ERR, "Unable to open " 921 "config file %s\n", value); 922 return -1; 923 } 924 925 while (fgets(line, sizeof(line), file)) { 926 size += strlen(line); 927 if (size >= ARK_MAX_ARG_LEN) { 928 ARK_PMD_LOG(ERR, "Unable to parse file %s args, " 929 "parameter list is too long\n", value); 930 fclose(file); 931 return -1; 932 } 933 if (first) { 934 strncpy(args, line, ARK_MAX_ARG_LEN); 935 first = 0; 936 } else { 937 strncat(args, line, ARK_MAX_ARG_LEN); 938 } 939 } 940 ARK_PMD_LOG(DEBUG, "file = %s\n", args); 941 fclose(file); 942 return 0; 943 } 944 945 static int 946 eth_ark_check_args(struct ark_adapter *ark, const char *params) 947 { 948 struct rte_kvargs *kvlist; 949 unsigned int k_idx; 950 struct rte_kvargs_pair *pair = NULL; 951 int ret = -1; 952 953 kvlist = rte_kvargs_parse(params, valid_arguments); 954 if (kvlist == NULL) 955 return 0; 956 957 ark->pkt_gen_args[0] = 0; 958 ark->pkt_chkr_args[0] = 0; 959 960 for (k_idx = 0; k_idx < kvlist->count; k_idx++) { 961 pair = &kvlist->pairs[k_idx]; 962 ARK_PMD_LOG(DEBUG, "**** Arg passed to PMD = %s:%s\n", 963 pair->key, 964 pair->value); 965 } 966 967 if (rte_kvargs_process(kvlist, 968 ARK_PKTDIR_ARG, 969 &process_pktdir_arg, 970 ark) != 0) { 971 ARK_PMD_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTDIR_ARG); 972 goto free_kvlist; 973 } 974 975 if (rte_kvargs_process(kvlist, 976 ARK_PKTGEN_ARG, 977 &process_file_args, 978 ark->pkt_gen_args) != 0) { 979 ARK_PMD_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTGEN_ARG); 980 goto free_kvlist; 981 } 982 983 if (rte_kvargs_process(kvlist, 984 ARK_PKTCHKR_ARG, 985 &process_file_args, 986 ark->pkt_chkr_args) != 0) { 987 ARK_PMD_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTCHKR_ARG); 988 goto free_kvlist; 989 } 990 991 if (ark->isvf) { 992 ret = 0; 993 goto free_kvlist; 994 } 995 ARK_PMD_LOG(INFO, "packet director set to 0x%x\n", ark->pkt_dir_v); 996 /* Setup the packet director */ 997 ark_pktdir_setup(ark->pd, ark->pkt_dir_v); 998 999 /* Setup the packet generator */ 1000 if (ark->pkt_gen_args[0]) { 1001 ARK_PMD_LOG(DEBUG, "Setting up the packet generator\n"); 1002 ark_pktgen_parse(ark->pkt_gen_args); 1003 ark_pktgen_reset(ark->pg); 1004 ark_pktgen_setup(ark->pg); 1005 ark->start_pg = 1; 1006 } 1007 1008 /* Setup the packet checker */ 1009 if (ark->pkt_chkr_args[0]) { 1010 ark_pktchkr_parse(ark->pkt_chkr_args); 1011 ark_pktchkr_setup(ark->pc); 1012 } 1013 1014 ret = 0; 1015 1016 free_kvlist: 1017 rte_kvargs_free(kvlist); 1018 1019 return ret; 1020 } 1021 1022 RTE_PMD_REGISTER_PCI(net_ark, rte_ark_pmd); 1023 RTE_PMD_REGISTER_KMOD_DEP(net_ark, "* igb_uio | uio_pci_generic "); 1024 RTE_PMD_REGISTER_PCI_TABLE(net_ark, pci_id_ark_map); 1025 RTE_PMD_REGISTER_PARAM_STRING(net_ark, 1026 ARK_PKTGEN_ARG "=<filename> " 1027 ARK_PKTCHKR_ARG "=<filename> " 1028 ARK_PKTDIR_ARG "=<bitmap>"); 1029 RTE_LOG_REGISTER_DEFAULT(ark_logtype, NOTICE); 1030