1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. 5 * Copyright 2017 NXP. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 /* System headers */ 34 #include <stdio.h> 35 #include <inttypes.h> 36 #include <unistd.h> 37 #include <limits.h> 38 #include <sched.h> 39 #include <signal.h> 40 #include <pthread.h> 41 #include <sys/types.h> 42 #include <sys/syscall.h> 43 44 #include <rte_config.h> 45 #include <rte_byteorder.h> 46 #include <rte_common.h> 47 #include <rte_interrupts.h> 48 #include <rte_log.h> 49 #include <rte_debug.h> 50 #include <rte_pci.h> 51 #include <rte_atomic.h> 52 #include <rte_branch_prediction.h> 53 #include <rte_memory.h> 54 #include <rte_memzone.h> 55 #include <rte_tailq.h> 56 #include <rte_eal.h> 57 #include <rte_alarm.h> 58 #include <rte_ether.h> 59 #include <rte_ethdev.h> 60 #include <rte_malloc.h> 61 #include <rte_ring.h> 62 63 #include <rte_dpaa_bus.h> 64 #include <rte_dpaa_logs.h> 65 #include <dpaa_mempool.h> 66 67 #include <dpaa_ethdev.h> 68 #include <dpaa_rxtx.h> 69 70 #include <fsl_usd.h> 71 #include <fsl_qman.h> 72 #include <fsl_bman.h> 73 #include <fsl_fman.h> 74 75 /* Keep track of whether QMAN and BMAN have been globally initialized */ 76 static int is_global_init; 77 78 struct rte_dpaa_xstats_name_off { 79 char name[RTE_ETH_XSTATS_NAME_SIZE]; 80 uint32_t offset; 81 }; 82 83 static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = { 84 {"rx_align_err", 85 offsetof(struct dpaa_if_stats, raln)}, 86 {"rx_valid_pause", 87 offsetof(struct dpaa_if_stats, rxpf)}, 88 {"rx_fcs_err", 89 offsetof(struct dpaa_if_stats, rfcs)}, 90 {"rx_vlan_frame", 91 offsetof(struct dpaa_if_stats, rvlan)}, 92 {"rx_frame_err", 93 offsetof(struct dpaa_if_stats, rerr)}, 94 {"rx_drop_err", 95 offsetof(struct dpaa_if_stats, rdrp)}, 96 {"rx_undersized", 97 offsetof(struct dpaa_if_stats, rund)}, 98 {"rx_oversize_err", 99 offsetof(struct dpaa_if_stats, rovr)}, 100 {"rx_fragment_pkt", 101 offsetof(struct dpaa_if_stats, rfrg)}, 102 {"tx_valid_pause", 103 offsetof(struct dpaa_if_stats, txpf)}, 104 {"tx_fcs_err", 105 offsetof(struct dpaa_if_stats, terr)}, 106 {"tx_vlan_frame", 107 offsetof(struct dpaa_if_stats, tvlan)}, 108 {"rx_undersized", 109 offsetof(struct dpaa_if_stats, tund)}, 110 }; 111 112 static int 113 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 114 { 115 struct dpaa_if *dpaa_intf = dev->data->dev_private; 116 117 PMD_INIT_FUNC_TRACE(); 118 119 if (mtu < ETHER_MIN_MTU) 120 return -EINVAL; 121 if (mtu > ETHER_MAX_LEN) 122 dev->data->dev_conf.rxmode.jumbo_frame = 1; 123 else 124 dev->data->dev_conf.rxmode.jumbo_frame = 0; 125 126 dev->data->dev_conf.rxmode.max_rx_pkt_len = mtu; 127 128 fman_if_set_maxfrm(dpaa_intf->fif, mtu); 129 130 return 0; 131 } 132 133 static int 134 dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused) 135 { 136 PMD_INIT_FUNC_TRACE(); 137 138 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) { 139 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= 140 DPAA_MAX_RX_PKT_LEN) 141 return dpaa_mtu_set(dev, 142 dev->data->dev_conf.rxmode.max_rx_pkt_len); 143 else 144 return -1; 145 } 146 return 0; 147 } 148 149 static const uint32_t * 150 dpaa_supported_ptypes_get(struct rte_eth_dev *dev) 151 { 152 static const uint32_t ptypes[] = { 153 /*todo -= add more types */ 154 RTE_PTYPE_L2_ETHER, 155 RTE_PTYPE_L3_IPV4, 156 RTE_PTYPE_L3_IPV4_EXT, 157 RTE_PTYPE_L3_IPV6, 158 RTE_PTYPE_L3_IPV6_EXT, 159 RTE_PTYPE_L4_TCP, 160 RTE_PTYPE_L4_UDP, 161 RTE_PTYPE_L4_SCTP 162 }; 163 164 PMD_INIT_FUNC_TRACE(); 165 166 if (dev->rx_pkt_burst == dpaa_eth_queue_rx) 167 return ptypes; 168 return NULL; 169 } 170 171 static int dpaa_eth_dev_start(struct rte_eth_dev *dev) 172 { 173 struct dpaa_if *dpaa_intf = dev->data->dev_private; 174 175 PMD_INIT_FUNC_TRACE(); 176 177 /* Change tx callback to the real one */ 178 dev->tx_pkt_burst = dpaa_eth_queue_tx; 179 fman_if_enable_rx(dpaa_intf->fif); 180 181 return 0; 182 } 183 184 static void dpaa_eth_dev_stop(struct rte_eth_dev *dev) 185 { 186 struct dpaa_if *dpaa_intf = dev->data->dev_private; 187 188 PMD_INIT_FUNC_TRACE(); 189 190 fman_if_disable_rx(dpaa_intf->fif); 191 dev->tx_pkt_burst = dpaa_eth_tx_drop_all; 192 } 193 194 static void dpaa_eth_dev_close(struct rte_eth_dev *dev) 195 { 196 PMD_INIT_FUNC_TRACE(); 197 198 dpaa_eth_dev_stop(dev); 199 } 200 201 static int 202 dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused, 203 char *fw_version, 204 size_t fw_size) 205 { 206 int ret; 207 FILE *svr_file = NULL; 208 unsigned int svr_ver = 0; 209 210 PMD_INIT_FUNC_TRACE(); 211 212 svr_file = fopen(DPAA_SOC_ID_FILE, "r"); 213 if (!svr_file) { 214 DPAA_PMD_ERR("Unable to open SoC device"); 215 return -ENOTSUP; /* Not supported on this infra */ 216 } 217 218 ret = fscanf(svr_file, "svr:%x", &svr_ver); 219 if (ret <= 0) { 220 DPAA_PMD_ERR("Unable to read SoC device"); 221 return -ENOTSUP; /* Not supported on this infra */ 222 } 223 224 ret = snprintf(fw_version, fw_size, 225 "svr:%x-fman-v%x", 226 svr_ver, 227 fman_ip_rev); 228 229 ret += 1; /* add the size of '\0' */ 230 if (fw_size < (uint32_t)ret) 231 return ret; 232 else 233 return 0; 234 } 235 236 static void dpaa_eth_dev_info(struct rte_eth_dev *dev, 237 struct rte_eth_dev_info *dev_info) 238 { 239 struct dpaa_if *dpaa_intf = dev->data->dev_private; 240 241 PMD_INIT_FUNC_TRACE(); 242 243 dev_info->max_rx_queues = dpaa_intf->nb_rx_queues; 244 dev_info->max_tx_queues = dpaa_intf->nb_tx_queues; 245 dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE; 246 dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN; 247 dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER; 248 dev_info->max_hash_mac_addrs = 0; 249 dev_info->max_vfs = 0; 250 dev_info->max_vmdq_pools = ETH_16_POOLS; 251 dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL; 252 dev_info->speed_capa = (ETH_LINK_SPEED_1G | 253 ETH_LINK_SPEED_10G); 254 dev_info->rx_offload_capa = 255 (DEV_RX_OFFLOAD_IPV4_CKSUM | 256 DEV_RX_OFFLOAD_UDP_CKSUM | 257 DEV_RX_OFFLOAD_TCP_CKSUM); 258 dev_info->tx_offload_capa = 259 (DEV_TX_OFFLOAD_IPV4_CKSUM | 260 DEV_TX_OFFLOAD_UDP_CKSUM | 261 DEV_TX_OFFLOAD_TCP_CKSUM); 262 } 263 264 static int dpaa_eth_link_update(struct rte_eth_dev *dev, 265 int wait_to_complete __rte_unused) 266 { 267 struct dpaa_if *dpaa_intf = dev->data->dev_private; 268 struct rte_eth_link *link = &dev->data->dev_link; 269 270 PMD_INIT_FUNC_TRACE(); 271 272 if (dpaa_intf->fif->mac_type == fman_mac_1g) 273 link->link_speed = 1000; 274 else if (dpaa_intf->fif->mac_type == fman_mac_10g) 275 link->link_speed = 10000; 276 else 277 DPAA_PMD_ERR("invalid link_speed: %s, %d", 278 dpaa_intf->name, dpaa_intf->fif->mac_type); 279 280 link->link_status = dpaa_intf->valid; 281 link->link_duplex = ETH_LINK_FULL_DUPLEX; 282 link->link_autoneg = ETH_LINK_AUTONEG; 283 return 0; 284 } 285 286 static void dpaa_eth_stats_get(struct rte_eth_dev *dev, 287 struct rte_eth_stats *stats) 288 { 289 struct dpaa_if *dpaa_intf = dev->data->dev_private; 290 291 PMD_INIT_FUNC_TRACE(); 292 293 fman_if_stats_get(dpaa_intf->fif, stats); 294 } 295 296 static void dpaa_eth_stats_reset(struct rte_eth_dev *dev) 297 { 298 struct dpaa_if *dpaa_intf = dev->data->dev_private; 299 300 PMD_INIT_FUNC_TRACE(); 301 302 fman_if_stats_reset(dpaa_intf->fif); 303 } 304 305 static int 306 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 307 unsigned int n) 308 { 309 struct dpaa_if *dpaa_intf = dev->data->dev_private; 310 unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings); 311 uint64_t values[sizeof(struct dpaa_if_stats) / 8]; 312 313 if (xstats == NULL) 314 return 0; 315 316 if (n < num) 317 return num; 318 319 fman_if_stats_get_all(dpaa_intf->fif, values, 320 sizeof(struct dpaa_if_stats) / 8); 321 322 for (i = 0; i < num; i++) { 323 xstats[i].id = i; 324 xstats[i].value = values[dpaa_xstats_strings[i].offset / 8]; 325 } 326 return i; 327 } 328 329 static int 330 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 331 struct rte_eth_xstat_name *xstats_names, 332 __rte_unused unsigned int limit) 333 { 334 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 335 336 if (xstats_names != NULL) 337 for (i = 0; i < stat_cnt; i++) 338 snprintf(xstats_names[i].name, 339 sizeof(xstats_names[i].name), 340 "%s", 341 dpaa_xstats_strings[i].name); 342 343 return stat_cnt; 344 } 345 346 static int 347 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 348 uint64_t *values, unsigned int n) 349 { 350 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 351 uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8]; 352 353 if (!ids) { 354 struct dpaa_if *dpaa_intf = dev->data->dev_private; 355 356 if (n < stat_cnt) 357 return stat_cnt; 358 359 if (!values) 360 return 0; 361 362 fman_if_stats_get_all(dpaa_intf->fif, values_copy, 363 sizeof(struct dpaa_if_stats)); 364 365 for (i = 0; i < stat_cnt; i++) 366 values[i] = 367 values_copy[dpaa_xstats_strings[i].offset / 8]; 368 369 return stat_cnt; 370 } 371 372 dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 373 374 for (i = 0; i < n; i++) { 375 if (ids[i] >= stat_cnt) { 376 DPAA_PMD_ERR("id value isn't valid"); 377 return -1; 378 } 379 values[i] = values_copy[ids[i]]; 380 } 381 return n; 382 } 383 384 static int 385 dpaa_xstats_get_names_by_id( 386 struct rte_eth_dev *dev, 387 struct rte_eth_xstat_name *xstats_names, 388 const uint64_t *ids, 389 unsigned int limit) 390 { 391 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 392 struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 393 394 if (!ids) 395 return dpaa_xstats_get_names(dev, xstats_names, limit); 396 397 dpaa_xstats_get_names(dev, xstats_names_copy, limit); 398 399 for (i = 0; i < limit; i++) { 400 if (ids[i] >= stat_cnt) { 401 DPAA_PMD_ERR("id value isn't valid"); 402 return -1; 403 } 404 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 405 } 406 return limit; 407 } 408 409 static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev) 410 { 411 struct dpaa_if *dpaa_intf = dev->data->dev_private; 412 413 PMD_INIT_FUNC_TRACE(); 414 415 fman_if_promiscuous_enable(dpaa_intf->fif); 416 } 417 418 static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev) 419 { 420 struct dpaa_if *dpaa_intf = dev->data->dev_private; 421 422 PMD_INIT_FUNC_TRACE(); 423 424 fman_if_promiscuous_disable(dpaa_intf->fif); 425 } 426 427 static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev) 428 { 429 struct dpaa_if *dpaa_intf = dev->data->dev_private; 430 431 PMD_INIT_FUNC_TRACE(); 432 433 fman_if_set_mcast_filter_table(dpaa_intf->fif); 434 } 435 436 static void dpaa_eth_multicast_disable(struct rte_eth_dev *dev) 437 { 438 struct dpaa_if *dpaa_intf = dev->data->dev_private; 439 440 PMD_INIT_FUNC_TRACE(); 441 442 fman_if_reset_mcast_filter_table(dpaa_intf->fif); 443 } 444 445 static 446 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 447 uint16_t nb_desc __rte_unused, 448 unsigned int socket_id __rte_unused, 449 const struct rte_eth_rxconf *rx_conf __rte_unused, 450 struct rte_mempool *mp) 451 { 452 struct dpaa_if *dpaa_intf = dev->data->dev_private; 453 454 PMD_INIT_FUNC_TRACE(); 455 456 DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx); 457 458 if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) { 459 struct fman_if_ic_params icp; 460 uint32_t fd_offset; 461 uint32_t bp_size; 462 463 if (!mp->pool_data) { 464 DPAA_PMD_ERR("Not an offloaded buffer pool!"); 465 return -1; 466 } 467 dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 468 469 memset(&icp, 0, sizeof(icp)); 470 /* set ICEOF for to the default value , which is 0*/ 471 icp.iciof = DEFAULT_ICIOF; 472 icp.iceof = DEFAULT_RX_ICEOF; 473 icp.icsz = DEFAULT_ICSZ; 474 fman_if_set_ic_params(dpaa_intf->fif, &icp); 475 476 fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE; 477 fman_if_set_fdoff(dpaa_intf->fif, fd_offset); 478 479 /* Buffer pool size should be equal to Dataroom Size*/ 480 bp_size = rte_pktmbuf_data_room_size(mp); 481 fman_if_set_bp(dpaa_intf->fif, mp->size, 482 dpaa_intf->bp_info->bpid, bp_size); 483 dpaa_intf->valid = 1; 484 DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d", 485 dpaa_intf->name, fd_offset, 486 fman_if_get_fdoff(dpaa_intf->fif)); 487 } 488 dev->data->rx_queues[queue_idx] = &dpaa_intf->rx_queues[queue_idx]; 489 490 return 0; 491 } 492 493 static 494 void dpaa_eth_rx_queue_release(void *rxq __rte_unused) 495 { 496 PMD_INIT_FUNC_TRACE(); 497 } 498 499 static 500 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 501 uint16_t nb_desc __rte_unused, 502 unsigned int socket_id __rte_unused, 503 const struct rte_eth_txconf *tx_conf __rte_unused) 504 { 505 struct dpaa_if *dpaa_intf = dev->data->dev_private; 506 507 PMD_INIT_FUNC_TRACE(); 508 509 DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx); 510 dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx]; 511 return 0; 512 } 513 514 static void dpaa_eth_tx_queue_release(void *txq __rte_unused) 515 { 516 PMD_INIT_FUNC_TRACE(); 517 } 518 519 static int dpaa_link_down(struct rte_eth_dev *dev) 520 { 521 PMD_INIT_FUNC_TRACE(); 522 523 dpaa_eth_dev_stop(dev); 524 return 0; 525 } 526 527 static int dpaa_link_up(struct rte_eth_dev *dev) 528 { 529 PMD_INIT_FUNC_TRACE(); 530 531 dpaa_eth_dev_start(dev); 532 return 0; 533 } 534 535 static int 536 dpaa_flow_ctrl_set(struct rte_eth_dev *dev, 537 struct rte_eth_fc_conf *fc_conf) 538 { 539 struct dpaa_if *dpaa_intf = dev->data->dev_private; 540 struct rte_eth_fc_conf *net_fc; 541 542 PMD_INIT_FUNC_TRACE(); 543 544 if (!(dpaa_intf->fc_conf)) { 545 dpaa_intf->fc_conf = rte_zmalloc(NULL, 546 sizeof(struct rte_eth_fc_conf), MAX_CACHELINE); 547 if (!dpaa_intf->fc_conf) { 548 DPAA_PMD_ERR("unable to save flow control info"); 549 return -ENOMEM; 550 } 551 } 552 net_fc = dpaa_intf->fc_conf; 553 554 if (fc_conf->high_water < fc_conf->low_water) { 555 DPAA_PMD_ERR("Incorrect Flow Control Configuration"); 556 return -EINVAL; 557 } 558 559 if (fc_conf->mode == RTE_FC_NONE) { 560 return 0; 561 } else if (fc_conf->mode == RTE_FC_TX_PAUSE || 562 fc_conf->mode == RTE_FC_FULL) { 563 fman_if_set_fc_threshold(dpaa_intf->fif, fc_conf->high_water, 564 fc_conf->low_water, 565 dpaa_intf->bp_info->bpid); 566 if (fc_conf->pause_time) 567 fman_if_set_fc_quanta(dpaa_intf->fif, 568 fc_conf->pause_time); 569 } 570 571 /* Save the information in dpaa device */ 572 net_fc->pause_time = fc_conf->pause_time; 573 net_fc->high_water = fc_conf->high_water; 574 net_fc->low_water = fc_conf->low_water; 575 net_fc->send_xon = fc_conf->send_xon; 576 net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd; 577 net_fc->mode = fc_conf->mode; 578 net_fc->autoneg = fc_conf->autoneg; 579 580 return 0; 581 } 582 583 static int 584 dpaa_flow_ctrl_get(struct rte_eth_dev *dev, 585 struct rte_eth_fc_conf *fc_conf) 586 { 587 struct dpaa_if *dpaa_intf = dev->data->dev_private; 588 struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf; 589 int ret; 590 591 PMD_INIT_FUNC_TRACE(); 592 593 if (net_fc) { 594 fc_conf->pause_time = net_fc->pause_time; 595 fc_conf->high_water = net_fc->high_water; 596 fc_conf->low_water = net_fc->low_water; 597 fc_conf->send_xon = net_fc->send_xon; 598 fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd; 599 fc_conf->mode = net_fc->mode; 600 fc_conf->autoneg = net_fc->autoneg; 601 return 0; 602 } 603 ret = fman_if_get_fc_threshold(dpaa_intf->fif); 604 if (ret) { 605 fc_conf->mode = RTE_FC_TX_PAUSE; 606 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif); 607 } else { 608 fc_conf->mode = RTE_FC_NONE; 609 } 610 611 return 0; 612 } 613 614 static int 615 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev, 616 struct ether_addr *addr, 617 uint32_t index, 618 __rte_unused uint32_t pool) 619 { 620 int ret; 621 struct dpaa_if *dpaa_intf = dev->data->dev_private; 622 623 PMD_INIT_FUNC_TRACE(); 624 625 ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, index); 626 627 if (ret) 628 RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:" 629 " err = %d", ret); 630 return 0; 631 } 632 633 static void 634 dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev, 635 uint32_t index) 636 { 637 struct dpaa_if *dpaa_intf = dev->data->dev_private; 638 639 PMD_INIT_FUNC_TRACE(); 640 641 fman_if_clear_mac_addr(dpaa_intf->fif, index); 642 } 643 644 static void 645 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, 646 struct ether_addr *addr) 647 { 648 int ret; 649 struct dpaa_if *dpaa_intf = dev->data->dev_private; 650 651 PMD_INIT_FUNC_TRACE(); 652 653 ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0); 654 if (ret) 655 RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret); 656 } 657 658 static struct eth_dev_ops dpaa_devops = { 659 .dev_configure = dpaa_eth_dev_configure, 660 .dev_start = dpaa_eth_dev_start, 661 .dev_stop = dpaa_eth_dev_stop, 662 .dev_close = dpaa_eth_dev_close, 663 .dev_infos_get = dpaa_eth_dev_info, 664 .dev_supported_ptypes_get = dpaa_supported_ptypes_get, 665 666 .rx_queue_setup = dpaa_eth_rx_queue_setup, 667 .tx_queue_setup = dpaa_eth_tx_queue_setup, 668 .rx_queue_release = dpaa_eth_rx_queue_release, 669 .tx_queue_release = dpaa_eth_tx_queue_release, 670 671 .flow_ctrl_get = dpaa_flow_ctrl_get, 672 .flow_ctrl_set = dpaa_flow_ctrl_set, 673 674 .link_update = dpaa_eth_link_update, 675 .stats_get = dpaa_eth_stats_get, 676 .xstats_get = dpaa_dev_xstats_get, 677 .xstats_get_by_id = dpaa_xstats_get_by_id, 678 .xstats_get_names_by_id = dpaa_xstats_get_names_by_id, 679 .xstats_get_names = dpaa_xstats_get_names, 680 .xstats_reset = dpaa_eth_stats_reset, 681 .stats_reset = dpaa_eth_stats_reset, 682 .promiscuous_enable = dpaa_eth_promiscuous_enable, 683 .promiscuous_disable = dpaa_eth_promiscuous_disable, 684 .allmulticast_enable = dpaa_eth_multicast_enable, 685 .allmulticast_disable = dpaa_eth_multicast_disable, 686 .mtu_set = dpaa_mtu_set, 687 .dev_set_link_down = dpaa_link_down, 688 .dev_set_link_up = dpaa_link_up, 689 .mac_addr_add = dpaa_dev_add_mac_addr, 690 .mac_addr_remove = dpaa_dev_remove_mac_addr, 691 .mac_addr_set = dpaa_dev_set_mac_addr, 692 693 .fw_version_get = dpaa_fw_version_get, 694 }; 695 696 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf) 697 { 698 struct rte_eth_fc_conf *fc_conf; 699 int ret; 700 701 PMD_INIT_FUNC_TRACE(); 702 703 if (!(dpaa_intf->fc_conf)) { 704 dpaa_intf->fc_conf = rte_zmalloc(NULL, 705 sizeof(struct rte_eth_fc_conf), MAX_CACHELINE); 706 if (!dpaa_intf->fc_conf) { 707 DPAA_PMD_ERR("unable to save flow control info"); 708 return -ENOMEM; 709 } 710 } 711 fc_conf = dpaa_intf->fc_conf; 712 ret = fman_if_get_fc_threshold(dpaa_intf->fif); 713 if (ret) { 714 fc_conf->mode = RTE_FC_TX_PAUSE; 715 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif); 716 } else { 717 fc_conf->mode = RTE_FC_NONE; 718 } 719 720 return 0; 721 } 722 723 /* Initialise an Rx FQ */ 724 static int dpaa_rx_queue_init(struct qman_fq *fq, 725 uint32_t fqid) 726 { 727 struct qm_mcc_initfq opts; 728 int ret; 729 730 PMD_INIT_FUNC_TRACE(); 731 732 ret = qman_reserve_fqid(fqid); 733 if (ret) { 734 DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d", 735 fqid, ret); 736 return -EINVAL; 737 } 738 739 DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid); 740 ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq); 741 if (ret) { 742 DPAA_PMD_ERR("create rx fqid %d failed with ret: %d", 743 fqid, ret); 744 return ret; 745 } 746 747 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 748 QM_INITFQ_WE_CONTEXTA; 749 750 opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY; 751 opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING | 752 QM_FQCTRL_PREFERINCACHE; 753 opts.fqd.context_a.stashing.exclusive = 0; 754 opts.fqd.context_a.stashing.annotation_cl = DPAA_IF_RX_ANNOTATION_STASH; 755 opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; 756 opts.fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH; 757 758 /*Enable tail drop */ 759 opts.we_mask = opts.we_mask | QM_INITFQ_WE_TDTHRESH; 760 opts.fqd.fq_ctrl = opts.fqd.fq_ctrl | QM_FQCTRL_TDE; 761 qm_fqd_taildrop_set(&opts.fqd.td, CONG_THRESHOLD_RX_Q, 1); 762 763 ret = qman_init_fq(fq, 0, &opts); 764 if (ret) 765 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret); 766 return ret; 767 } 768 769 /* Initialise a Tx FQ */ 770 static int dpaa_tx_queue_init(struct qman_fq *fq, 771 struct fman_if *fman_intf) 772 { 773 struct qm_mcc_initfq opts; 774 int ret; 775 776 PMD_INIT_FUNC_TRACE(); 777 778 ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID | 779 QMAN_FQ_FLAG_TO_DCPORTAL, fq); 780 if (ret) { 781 DPAA_PMD_ERR("create tx fq failed with ret: %d", ret); 782 return ret; 783 } 784 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 785 QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA; 786 opts.fqd.dest.channel = fman_intf->tx_channel_id; 787 opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY; 788 opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; 789 opts.fqd.context_b = 0; 790 /* no tx-confirmation */ 791 opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi; 792 opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo; 793 DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid); 794 ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); 795 if (ret) 796 DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret); 797 return ret; 798 } 799 800 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 801 /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */ 802 static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid) 803 { 804 struct qm_mcc_initfq opts; 805 int ret; 806 807 PMD_INIT_FUNC_TRACE(); 808 809 ret = qman_reserve_fqid(fqid); 810 if (ret) { 811 DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d", 812 fqid, ret); 813 return -EINVAL; 814 } 815 /* "map" this Rx FQ to one of the interfaces Tx FQID */ 816 DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid); 817 ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq); 818 if (ret) { 819 DPAA_PMD_ERR("create debug fqid %d failed with ret: %d", 820 fqid, ret); 821 return ret; 822 } 823 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL; 824 opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY; 825 ret = qman_init_fq(fq, 0, &opts); 826 if (ret) 827 DPAA_PMD_ERR("init debug fqid %d failed with ret: %d", 828 fqid, ret); 829 return ret; 830 } 831 #endif 832 833 /* Initialise a network interface */ 834 static int 835 dpaa_dev_init(struct rte_eth_dev *eth_dev) 836 { 837 int num_cores, num_rx_fqs, fqid; 838 int loop, ret = 0; 839 int dev_id; 840 struct rte_dpaa_device *dpaa_device; 841 struct dpaa_if *dpaa_intf; 842 struct fm_eth_port_cfg *cfg; 843 struct fman_if *fman_intf; 844 struct fman_if_bpool *bp, *tmp_bp; 845 846 PMD_INIT_FUNC_TRACE(); 847 848 /* For secondary processes, the primary has done all the work */ 849 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 850 return 0; 851 852 dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device); 853 dev_id = dpaa_device->id.dev_id; 854 dpaa_intf = eth_dev->data->dev_private; 855 cfg = &dpaa_netcfg->port_cfg[dev_id]; 856 fman_intf = cfg->fman_if; 857 858 dpaa_intf->name = dpaa_device->name; 859 860 /* save fman_if & cfg in the interface struture */ 861 dpaa_intf->fif = fman_intf; 862 dpaa_intf->ifid = dev_id; 863 dpaa_intf->cfg = cfg; 864 865 /* Initialize Rx FQ's */ 866 if (getenv("DPAA_NUM_RX_QUEUES")) 867 num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES")); 868 else 869 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; 870 871 /* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX 872 * queues. 873 */ 874 if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) { 875 DPAA_PMD_ERR("Invalid number of RX queues\n"); 876 return -EINVAL; 877 } 878 879 dpaa_intf->rx_queues = rte_zmalloc(NULL, 880 sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE); 881 for (loop = 0; loop < num_rx_fqs; loop++) { 882 fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid * 883 DPAA_PCD_FQID_MULTIPLIER + loop; 884 ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], fqid); 885 if (ret) 886 return ret; 887 dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf; 888 } 889 dpaa_intf->nb_rx_queues = num_rx_fqs; 890 891 /* Initialise Tx FQs. Have as many Tx FQ's as number of cores */ 892 num_cores = rte_lcore_count(); 893 dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) * 894 num_cores, MAX_CACHELINE); 895 if (!dpaa_intf->tx_queues) 896 return -ENOMEM; 897 898 for (loop = 0; loop < num_cores; loop++) { 899 ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop], 900 fman_intf); 901 if (ret) 902 return ret; 903 dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf; 904 } 905 dpaa_intf->nb_tx_queues = num_cores; 906 907 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 908 dpaa_debug_queue_init(&dpaa_intf->debug_queues[ 909 DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err); 910 dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf; 911 dpaa_debug_queue_init(&dpaa_intf->debug_queues[ 912 DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err); 913 dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf; 914 #endif 915 916 DPAA_PMD_DEBUG("All frame queues created"); 917 918 /* Get the initial configuration for flow control */ 919 dpaa_fc_set_default(dpaa_intf); 920 921 /* reset bpool list, initialize bpool dynamically */ 922 list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) { 923 list_del(&bp->node); 924 rte_free(bp); 925 } 926 927 /* Populate ethdev structure */ 928 eth_dev->dev_ops = &dpaa_devops; 929 eth_dev->rx_pkt_burst = dpaa_eth_queue_rx; 930 eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all; 931 932 /* Allocate memory for storing MAC addresses */ 933 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", 934 ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0); 935 if (eth_dev->data->mac_addrs == NULL) { 936 DPAA_PMD_ERR("Failed to allocate %d bytes needed to " 937 "store MAC addresses", 938 ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER); 939 rte_free(dpaa_intf->rx_queues); 940 rte_free(dpaa_intf->tx_queues); 941 dpaa_intf->rx_queues = NULL; 942 dpaa_intf->tx_queues = NULL; 943 dpaa_intf->nb_rx_queues = 0; 944 dpaa_intf->nb_tx_queues = 0; 945 return -ENOMEM; 946 } 947 948 /* copy the primary mac address */ 949 ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]); 950 951 RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n", 952 dpaa_device->name, 953 fman_intf->mac_addr.addr_bytes[0], 954 fman_intf->mac_addr.addr_bytes[1], 955 fman_intf->mac_addr.addr_bytes[2], 956 fman_intf->mac_addr.addr_bytes[3], 957 fman_intf->mac_addr.addr_bytes[4], 958 fman_intf->mac_addr.addr_bytes[5]); 959 960 /* Disable RX mode */ 961 fman_if_discard_rx_errors(fman_intf); 962 fman_if_disable_rx(fman_intf); 963 /* Disable promiscuous mode */ 964 fman_if_promiscuous_disable(fman_intf); 965 /* Disable multicast */ 966 fman_if_reset_mcast_filter_table(fman_intf); 967 /* Reset interface statistics */ 968 fman_if_stats_reset(fman_intf); 969 970 return 0; 971 } 972 973 static int 974 dpaa_dev_uninit(struct rte_eth_dev *dev) 975 { 976 struct dpaa_if *dpaa_intf = dev->data->dev_private; 977 978 PMD_INIT_FUNC_TRACE(); 979 980 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 981 return -EPERM; 982 983 if (!dpaa_intf) { 984 DPAA_PMD_WARN("Already closed or not started"); 985 return -1; 986 } 987 988 dpaa_eth_dev_close(dev); 989 990 /* release configuration memory */ 991 if (dpaa_intf->fc_conf) 992 rte_free(dpaa_intf->fc_conf); 993 994 rte_free(dpaa_intf->rx_queues); 995 dpaa_intf->rx_queues = NULL; 996 997 rte_free(dpaa_intf->tx_queues); 998 dpaa_intf->tx_queues = NULL; 999 1000 /* free memory for storing MAC addresses */ 1001 rte_free(dev->data->mac_addrs); 1002 dev->data->mac_addrs = NULL; 1003 1004 dev->dev_ops = NULL; 1005 dev->rx_pkt_burst = NULL; 1006 dev->tx_pkt_burst = NULL; 1007 1008 return 0; 1009 } 1010 1011 static int 1012 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, 1013 struct rte_dpaa_device *dpaa_dev) 1014 { 1015 int diag; 1016 int ret; 1017 struct rte_eth_dev *eth_dev; 1018 1019 PMD_INIT_FUNC_TRACE(); 1020 1021 /* In case of secondary process, the device is already configured 1022 * and no further action is required, except portal initialization 1023 * and verifying secondary attachment to port name. 1024 */ 1025 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1026 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name); 1027 if (!eth_dev) 1028 return -ENOMEM; 1029 return 0; 1030 } 1031 1032 if (!is_global_init) { 1033 /* One time load of Qman/Bman drivers */ 1034 ret = qman_global_init(); 1035 if (ret) { 1036 DPAA_PMD_ERR("QMAN initialization failed: %d", 1037 ret); 1038 return ret; 1039 } 1040 ret = bman_global_init(); 1041 if (ret) { 1042 DPAA_PMD_ERR("BMAN initialization failed: %d", 1043 ret); 1044 return ret; 1045 } 1046 1047 is_global_init = 1; 1048 } 1049 1050 ret = rte_dpaa_portal_init((void *)1); 1051 if (ret) { 1052 DPAA_PMD_ERR("Unable to initialize portal"); 1053 return ret; 1054 } 1055 1056 eth_dev = rte_eth_dev_allocate(dpaa_dev->name); 1057 if (eth_dev == NULL) 1058 return -ENOMEM; 1059 1060 eth_dev->data->dev_private = rte_zmalloc( 1061 "ethdev private structure", 1062 sizeof(struct dpaa_if), 1063 RTE_CACHE_LINE_SIZE); 1064 if (!eth_dev->data->dev_private) { 1065 DPAA_PMD_ERR("Cannot allocate memzone for port data"); 1066 rte_eth_dev_release_port(eth_dev); 1067 return -ENOMEM; 1068 } 1069 1070 eth_dev->device = &dpaa_dev->device; 1071 eth_dev->device->driver = &dpaa_drv->driver; 1072 dpaa_dev->eth_dev = eth_dev; 1073 1074 /* Invoke PMD device initialization function */ 1075 diag = dpaa_dev_init(eth_dev); 1076 if (diag == 0) 1077 return 0; 1078 1079 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1080 rte_free(eth_dev->data->dev_private); 1081 1082 rte_eth_dev_release_port(eth_dev); 1083 return diag; 1084 } 1085 1086 static int 1087 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev) 1088 { 1089 struct rte_eth_dev *eth_dev; 1090 1091 PMD_INIT_FUNC_TRACE(); 1092 1093 eth_dev = dpaa_dev->eth_dev; 1094 dpaa_dev_uninit(eth_dev); 1095 1096 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1097 rte_free(eth_dev->data->dev_private); 1098 1099 rte_eth_dev_release_port(eth_dev); 1100 1101 return 0; 1102 } 1103 1104 static struct rte_dpaa_driver rte_dpaa_pmd = { 1105 .drv_type = FSL_DPAA_ETH, 1106 .probe = rte_dpaa_probe, 1107 .remove = rte_dpaa_remove, 1108 }; 1109 1110 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd); 1111