1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2017-2020 NXP 5 * 6 */ 7 /* System headers */ 8 #include <stdio.h> 9 #include <inttypes.h> 10 #include <unistd.h> 11 #include <limits.h> 12 #include <sched.h> 13 #include <signal.h> 14 #include <pthread.h> 15 #include <sys/types.h> 16 #include <sys/syscall.h> 17 18 #include <rte_string_fns.h> 19 #include <rte_byteorder.h> 20 #include <rte_common.h> 21 #include <rte_interrupts.h> 22 #include <rte_log.h> 23 #include <rte_debug.h> 24 #include <rte_pci.h> 25 #include <rte_atomic.h> 26 #include <rte_branch_prediction.h> 27 #include <rte_memory.h> 28 #include <rte_tailq.h> 29 #include <rte_eal.h> 30 #include <rte_alarm.h> 31 #include <rte_ether.h> 32 #include <ethdev_driver.h> 33 #include <rte_malloc.h> 34 #include <rte_ring.h> 35 36 #include <rte_dpaa_bus.h> 37 #include <rte_dpaa_logs.h> 38 #include <dpaa_mempool.h> 39 40 #include <dpaa_ethdev.h> 41 #include <dpaa_rxtx.h> 42 #include <dpaa_flow.h> 43 #include <rte_pmd_dpaa.h> 44 45 #include <fsl_usd.h> 46 #include <fsl_qman.h> 47 #include <fsl_bman.h> 48 #include <fsl_fman.h> 49 #include <process.h> 50 #include <fmlib/fm_ext.h> 51 52 #define CHECK_INTERVAL 100 /* 100ms */ 53 #define MAX_REPEAT_TIME 90 /* 9s (90 * 100ms) in total */ 54 55 /* Supported Rx offloads */ 56 static uint64_t dev_rx_offloads_sup = 57 DEV_RX_OFFLOAD_JUMBO_FRAME | 58 DEV_RX_OFFLOAD_SCATTER; 59 60 /* Rx offloads which cannot be disabled */ 61 static uint64_t dev_rx_offloads_nodis = 62 DEV_RX_OFFLOAD_IPV4_CKSUM | 63 DEV_RX_OFFLOAD_UDP_CKSUM | 64 DEV_RX_OFFLOAD_TCP_CKSUM | 65 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 66 DEV_RX_OFFLOAD_RSS_HASH; 67 68 /* Supported Tx offloads */ 69 static uint64_t dev_tx_offloads_sup = 70 DEV_TX_OFFLOAD_MT_LOCKFREE | 71 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 72 73 /* Tx offloads which cannot be disabled */ 74 static uint64_t dev_tx_offloads_nodis = 75 DEV_TX_OFFLOAD_IPV4_CKSUM | 76 DEV_TX_OFFLOAD_UDP_CKSUM | 77 DEV_TX_OFFLOAD_TCP_CKSUM | 78 DEV_TX_OFFLOAD_SCTP_CKSUM | 79 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 80 DEV_TX_OFFLOAD_MULTI_SEGS; 81 82 /* Keep track of whether QMAN and BMAN have been globally initialized */ 83 static int is_global_init; 84 static int fmc_q = 1; /* Indicates the use of static fmc for distribution */ 85 static int default_q; /* use default queue - FMC is not executed*/ 86 /* At present we only allow up to 4 push mode queues as default - as each of 87 * this queue need dedicated portal and we are short of portals. 88 */ 89 #define DPAA_MAX_PUSH_MODE_QUEUE 8 90 #define DPAA_DEFAULT_PUSH_MODE_QUEUE 4 91 92 static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE; 93 static int dpaa_push_queue_idx; /* Queue index which are in push mode*/ 94 95 96 /* Per RX FQ Taildrop in frame count */ 97 static unsigned int td_threshold = CGR_RX_PERFQ_THRESH; 98 99 /* Per TX FQ Taildrop in frame count, disabled by default */ 100 static unsigned int td_tx_threshold; 101 102 struct rte_dpaa_xstats_name_off { 103 char name[RTE_ETH_XSTATS_NAME_SIZE]; 104 uint32_t offset; 105 }; 106 107 static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = { 108 {"rx_align_err", 109 offsetof(struct dpaa_if_stats, raln)}, 110 {"rx_valid_pause", 111 offsetof(struct dpaa_if_stats, rxpf)}, 112 {"rx_fcs_err", 113 offsetof(struct dpaa_if_stats, rfcs)}, 114 {"rx_vlan_frame", 115 offsetof(struct dpaa_if_stats, rvlan)}, 116 {"rx_frame_err", 117 offsetof(struct dpaa_if_stats, rerr)}, 118 {"rx_drop_err", 119 offsetof(struct dpaa_if_stats, rdrp)}, 120 {"rx_undersized", 121 offsetof(struct dpaa_if_stats, rund)}, 122 {"rx_oversize_err", 123 offsetof(struct dpaa_if_stats, rovr)}, 124 {"rx_fragment_pkt", 125 offsetof(struct dpaa_if_stats, rfrg)}, 126 {"tx_valid_pause", 127 offsetof(struct dpaa_if_stats, txpf)}, 128 {"tx_fcs_err", 129 offsetof(struct dpaa_if_stats, terr)}, 130 {"tx_vlan_frame", 131 offsetof(struct dpaa_if_stats, tvlan)}, 132 {"rx_undersized", 133 offsetof(struct dpaa_if_stats, tund)}, 134 }; 135 136 static struct rte_dpaa_driver rte_dpaa_pmd; 137 138 static int 139 dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); 140 141 static int dpaa_eth_link_update(struct rte_eth_dev *dev, 142 int wait_to_complete __rte_unused); 143 144 static void dpaa_interrupt_handler(void *param); 145 146 static inline void 147 dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts) 148 { 149 memset(opts, 0, sizeof(struct qm_mcc_initfq)); 150 opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; 151 opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING | 152 QM_FQCTRL_PREFERINCACHE; 153 opts->fqd.context_a.stashing.exclusive = 0; 154 if (dpaa_svr_family != SVR_LS1046A_FAMILY) 155 opts->fqd.context_a.stashing.annotation_cl = 156 DPAA_IF_RX_ANNOTATION_STASH; 157 opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; 158 opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH; 159 } 160 161 static int 162 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 163 { 164 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN 165 + VLAN_TAG_SIZE; 166 uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; 167 168 PMD_INIT_FUNC_TRACE(); 169 170 if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN) 171 return -EINVAL; 172 /* 173 * Refuse mtu that requires the support of scattered packets 174 * when this feature has not been enabled before. 175 */ 176 if (dev->data->min_rx_buf_size && 177 !dev->data->scattered_rx && frame_size > buffsz) { 178 DPAA_PMD_ERR("SG not enabled, will not fit in one buffer"); 179 return -EINVAL; 180 } 181 182 /* check <seg size> * <max_seg> >= max_frame */ 183 if (dev->data->min_rx_buf_size && dev->data->scattered_rx && 184 (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) { 185 DPAA_PMD_ERR("Too big to fit for Max SG list %d", 186 buffsz * DPAA_SGT_MAX_ENTRIES); 187 return -EINVAL; 188 } 189 190 if (frame_size > DPAA_ETH_MAX_LEN) 191 dev->data->dev_conf.rxmode.offloads |= 192 DEV_RX_OFFLOAD_JUMBO_FRAME; 193 else 194 dev->data->dev_conf.rxmode.offloads &= 195 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 196 197 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 198 199 fman_if_set_maxfrm(dev->process_private, frame_size); 200 201 return 0; 202 } 203 204 static int 205 dpaa_eth_dev_configure(struct rte_eth_dev *dev) 206 { 207 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 208 uint64_t rx_offloads = eth_conf->rxmode.offloads; 209 uint64_t tx_offloads = eth_conf->txmode.offloads; 210 struct rte_device *rdev = dev->device; 211 struct rte_eth_link *link = &dev->data->dev_link; 212 struct rte_dpaa_device *dpaa_dev; 213 struct fman_if *fif = dev->process_private; 214 struct __fman_if *__fif; 215 struct rte_intr_handle *intr_handle; 216 int speed, duplex; 217 int ret; 218 219 PMD_INIT_FUNC_TRACE(); 220 221 dpaa_dev = container_of(rdev, struct rte_dpaa_device, device); 222 intr_handle = &dpaa_dev->intr_handle; 223 __fif = container_of(fif, struct __fman_if, __if); 224 225 /* Rx offloads which are enabled by default */ 226 if (dev_rx_offloads_nodis & ~rx_offloads) { 227 DPAA_PMD_INFO( 228 "Some of rx offloads enabled by default - requested 0x%" PRIx64 229 " fixed are 0x%" PRIx64, 230 rx_offloads, dev_rx_offloads_nodis); 231 } 232 233 /* Tx offloads which are enabled by default */ 234 if (dev_tx_offloads_nodis & ~tx_offloads) { 235 DPAA_PMD_INFO( 236 "Some of tx offloads enabled by default - requested 0x%" PRIx64 237 " fixed are 0x%" PRIx64, 238 tx_offloads, dev_tx_offloads_nodis); 239 } 240 241 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 242 uint32_t max_len; 243 244 DPAA_PMD_DEBUG("enabling jumbo"); 245 246 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= 247 DPAA_MAX_RX_PKT_LEN) 248 max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; 249 else { 250 DPAA_PMD_INFO("enabling jumbo override conf max len=%d " 251 "supported is %d", 252 dev->data->dev_conf.rxmode.max_rx_pkt_len, 253 DPAA_MAX_RX_PKT_LEN); 254 max_len = DPAA_MAX_RX_PKT_LEN; 255 } 256 257 fman_if_set_maxfrm(dev->process_private, max_len); 258 dev->data->mtu = max_len 259 - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE; 260 } 261 262 if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) { 263 DPAA_PMD_DEBUG("enabling scatter mode"); 264 fman_if_set_sg(dev->process_private, 1); 265 dev->data->scattered_rx = 1; 266 } 267 268 if (!(default_q || fmc_q)) { 269 if (dpaa_fm_config(dev, 270 eth_conf->rx_adv_conf.rss_conf.rss_hf)) { 271 dpaa_write_fm_config_to_file(); 272 DPAA_PMD_ERR("FM port configuration: Failed\n"); 273 return -1; 274 } 275 dpaa_write_fm_config_to_file(); 276 } 277 278 /* if the interrupts were configured on this devices*/ 279 if (intr_handle && intr_handle->fd) { 280 if (dev->data->dev_conf.intr_conf.lsc != 0) 281 rte_intr_callback_register(intr_handle, 282 dpaa_interrupt_handler, 283 (void *)dev); 284 285 ret = dpaa_intr_enable(__fif->node_name, intr_handle->fd); 286 if (ret) { 287 if (dev->data->dev_conf.intr_conf.lsc != 0) { 288 rte_intr_callback_unregister(intr_handle, 289 dpaa_interrupt_handler, 290 (void *)dev); 291 if (ret == EINVAL) 292 printf("Failed to enable interrupt: Not Supported\n"); 293 else 294 printf("Failed to enable interrupt\n"); 295 } 296 dev->data->dev_conf.intr_conf.lsc = 0; 297 dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; 298 } 299 } 300 301 /* Wait for link status to get updated */ 302 if (!link->link_status) 303 sleep(1); 304 305 /* Configure link only if link is UP*/ 306 if (link->link_status) { 307 if (eth_conf->link_speeds == ETH_LINK_SPEED_AUTONEG) { 308 /* Start autoneg only if link is not in autoneg mode */ 309 if (!link->link_autoneg) 310 dpaa_restart_link_autoneg(__fif->node_name); 311 } else if (eth_conf->link_speeds & ETH_LINK_SPEED_FIXED) { 312 switch (eth_conf->link_speeds & ~ETH_LINK_SPEED_FIXED) { 313 case ETH_LINK_SPEED_10M_HD: 314 speed = ETH_SPEED_NUM_10M; 315 duplex = ETH_LINK_HALF_DUPLEX; 316 break; 317 case ETH_LINK_SPEED_10M: 318 speed = ETH_SPEED_NUM_10M; 319 duplex = ETH_LINK_FULL_DUPLEX; 320 break; 321 case ETH_LINK_SPEED_100M_HD: 322 speed = ETH_SPEED_NUM_100M; 323 duplex = ETH_LINK_HALF_DUPLEX; 324 break; 325 case ETH_LINK_SPEED_100M: 326 speed = ETH_SPEED_NUM_100M; 327 duplex = ETH_LINK_FULL_DUPLEX; 328 break; 329 case ETH_LINK_SPEED_1G: 330 speed = ETH_SPEED_NUM_1G; 331 duplex = ETH_LINK_FULL_DUPLEX; 332 break; 333 case ETH_LINK_SPEED_2_5G: 334 speed = ETH_SPEED_NUM_2_5G; 335 duplex = ETH_LINK_FULL_DUPLEX; 336 break; 337 case ETH_LINK_SPEED_10G: 338 speed = ETH_SPEED_NUM_10G; 339 duplex = ETH_LINK_FULL_DUPLEX; 340 break; 341 default: 342 speed = ETH_SPEED_NUM_NONE; 343 duplex = ETH_LINK_FULL_DUPLEX; 344 break; 345 } 346 /* Set link speed */ 347 dpaa_update_link_speed(__fif->node_name, speed, duplex); 348 } else { 349 /* Manual autoneg - custom advertisement speed. */ 350 printf("Custom Advertisement speeds not supported\n"); 351 } 352 } 353 354 return 0; 355 } 356 357 static const uint32_t * 358 dpaa_supported_ptypes_get(struct rte_eth_dev *dev) 359 { 360 static const uint32_t ptypes[] = { 361 RTE_PTYPE_L2_ETHER, 362 RTE_PTYPE_L2_ETHER_VLAN, 363 RTE_PTYPE_L2_ETHER_ARP, 364 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 365 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 366 RTE_PTYPE_L4_ICMP, 367 RTE_PTYPE_L4_TCP, 368 RTE_PTYPE_L4_UDP, 369 RTE_PTYPE_L4_FRAG, 370 RTE_PTYPE_L4_TCP, 371 RTE_PTYPE_L4_UDP, 372 RTE_PTYPE_L4_SCTP 373 }; 374 375 PMD_INIT_FUNC_TRACE(); 376 377 if (dev->rx_pkt_burst == dpaa_eth_queue_rx) 378 return ptypes; 379 return NULL; 380 } 381 382 static void dpaa_interrupt_handler(void *param) 383 { 384 struct rte_eth_dev *dev = param; 385 struct rte_device *rdev = dev->device; 386 struct rte_dpaa_device *dpaa_dev; 387 struct rte_intr_handle *intr_handle; 388 uint64_t buf; 389 int bytes_read; 390 391 dpaa_dev = container_of(rdev, struct rte_dpaa_device, device); 392 intr_handle = &dpaa_dev->intr_handle; 393 394 bytes_read = read(intr_handle->fd, &buf, sizeof(uint64_t)); 395 if (bytes_read < 0) 396 DPAA_PMD_ERR("Error reading eventfd\n"); 397 dpaa_eth_link_update(dev, 0); 398 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 399 } 400 401 static int dpaa_eth_dev_start(struct rte_eth_dev *dev) 402 { 403 struct dpaa_if *dpaa_intf = dev->data->dev_private; 404 405 PMD_INIT_FUNC_TRACE(); 406 407 if (!(default_q || fmc_q)) 408 dpaa_write_fm_config_to_file(); 409 410 /* Change tx callback to the real one */ 411 if (dpaa_intf->cgr_tx) 412 dev->tx_pkt_burst = dpaa_eth_queue_tx_slow; 413 else 414 dev->tx_pkt_burst = dpaa_eth_queue_tx; 415 416 fman_if_enable_rx(dev->process_private); 417 418 return 0; 419 } 420 421 static int dpaa_eth_dev_stop(struct rte_eth_dev *dev) 422 { 423 struct fman_if *fif = dev->process_private; 424 425 PMD_INIT_FUNC_TRACE(); 426 dev->data->dev_started = 0; 427 428 if (!fif->is_shared_mac) 429 fman_if_disable_rx(fif); 430 dev->tx_pkt_burst = dpaa_eth_tx_drop_all; 431 432 return 0; 433 } 434 435 static int dpaa_eth_dev_close(struct rte_eth_dev *dev) 436 { 437 struct fman_if *fif = dev->process_private; 438 struct __fman_if *__fif; 439 struct rte_device *rdev = dev->device; 440 struct rte_dpaa_device *dpaa_dev; 441 struct rte_intr_handle *intr_handle; 442 struct rte_eth_link *link = &dev->data->dev_link; 443 struct dpaa_if *dpaa_intf = dev->data->dev_private; 444 int loop; 445 int ret; 446 447 PMD_INIT_FUNC_TRACE(); 448 449 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 450 return 0; 451 452 if (!dpaa_intf) { 453 DPAA_PMD_WARN("Already closed or not started"); 454 return -1; 455 } 456 457 /* DPAA FM deconfig */ 458 if (!(default_q || fmc_q)) { 459 if (dpaa_fm_deconfig(dpaa_intf, dev->process_private)) 460 DPAA_PMD_WARN("DPAA FM deconfig failed\n"); 461 } 462 463 dpaa_dev = container_of(rdev, struct rte_dpaa_device, device); 464 intr_handle = &dpaa_dev->intr_handle; 465 __fif = container_of(fif, struct __fman_if, __if); 466 467 ret = dpaa_eth_dev_stop(dev); 468 469 /* Reset link to autoneg */ 470 if (link->link_status && !link->link_autoneg) 471 dpaa_restart_link_autoneg(__fif->node_name); 472 473 if (intr_handle && intr_handle->fd && 474 dev->data->dev_conf.intr_conf.lsc != 0) { 475 dpaa_intr_disable(__fif->node_name); 476 rte_intr_callback_unregister(intr_handle, 477 dpaa_interrupt_handler, 478 (void *)dev); 479 } 480 481 /* release configuration memory */ 482 if (dpaa_intf->fc_conf) 483 rte_free(dpaa_intf->fc_conf); 484 485 /* Release RX congestion Groups */ 486 if (dpaa_intf->cgr_rx) { 487 for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++) 488 qman_delete_cgr(&dpaa_intf->cgr_rx[loop]); 489 } 490 491 rte_free(dpaa_intf->cgr_rx); 492 dpaa_intf->cgr_rx = NULL; 493 /* Release TX congestion Groups */ 494 if (dpaa_intf->cgr_tx) { 495 for (loop = 0; loop < MAX_DPAA_CORES; loop++) 496 qman_delete_cgr(&dpaa_intf->cgr_tx[loop]); 497 rte_free(dpaa_intf->cgr_tx); 498 dpaa_intf->cgr_tx = NULL; 499 } 500 501 rte_free(dpaa_intf->rx_queues); 502 dpaa_intf->rx_queues = NULL; 503 504 rte_free(dpaa_intf->tx_queues); 505 dpaa_intf->tx_queues = NULL; 506 507 return ret; 508 } 509 510 static int 511 dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused, 512 char *fw_version, 513 size_t fw_size) 514 { 515 int ret; 516 FILE *svr_file = NULL; 517 unsigned int svr_ver = 0; 518 519 PMD_INIT_FUNC_TRACE(); 520 521 svr_file = fopen(DPAA_SOC_ID_FILE, "r"); 522 if (!svr_file) { 523 DPAA_PMD_ERR("Unable to open SoC device"); 524 return -ENOTSUP; /* Not supported on this infra */ 525 } 526 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0) 527 dpaa_svr_family = svr_ver & SVR_MASK; 528 else 529 DPAA_PMD_ERR("Unable to read SoC device"); 530 531 fclose(svr_file); 532 533 ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x", 534 svr_ver, fman_ip_rev); 535 if (ret < 0) 536 return -EINVAL; 537 538 ret += 1; /* add the size of '\0' */ 539 if (fw_size < (size_t)ret) 540 return ret; 541 else 542 return 0; 543 } 544 545 static int dpaa_eth_dev_info(struct rte_eth_dev *dev, 546 struct rte_eth_dev_info *dev_info) 547 { 548 struct dpaa_if *dpaa_intf = dev->data->dev_private; 549 struct fman_if *fif = dev->process_private; 550 551 DPAA_PMD_DEBUG(": %s", dpaa_intf->name); 552 553 dev_info->max_rx_queues = dpaa_intf->nb_rx_queues; 554 dev_info->max_tx_queues = dpaa_intf->nb_tx_queues; 555 dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN; 556 dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER; 557 dev_info->max_hash_mac_addrs = 0; 558 dev_info->max_vfs = 0; 559 dev_info->max_vmdq_pools = ETH_16_POOLS; 560 dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL; 561 562 if (fif->mac_type == fman_mac_1g) { 563 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD 564 | ETH_LINK_SPEED_10M 565 | ETH_LINK_SPEED_100M_HD 566 | ETH_LINK_SPEED_100M 567 | ETH_LINK_SPEED_1G; 568 } else if (fif->mac_type == fman_mac_2_5g) { 569 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD 570 | ETH_LINK_SPEED_10M 571 | ETH_LINK_SPEED_100M_HD 572 | ETH_LINK_SPEED_100M 573 | ETH_LINK_SPEED_1G 574 | ETH_LINK_SPEED_2_5G; 575 } else if (fif->mac_type == fman_mac_10g) { 576 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD 577 | ETH_LINK_SPEED_10M 578 | ETH_LINK_SPEED_100M_HD 579 | ETH_LINK_SPEED_100M 580 | ETH_LINK_SPEED_1G 581 | ETH_LINK_SPEED_2_5G 582 | ETH_LINK_SPEED_10G; 583 } else { 584 DPAA_PMD_ERR("invalid link_speed: %s, %d", 585 dpaa_intf->name, fif->mac_type); 586 return -EINVAL; 587 } 588 589 dev_info->rx_offload_capa = dev_rx_offloads_sup | 590 dev_rx_offloads_nodis; 591 dev_info->tx_offload_capa = dev_tx_offloads_sup | 592 dev_tx_offloads_nodis; 593 dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE; 594 dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE; 595 dev_info->default_rxportconf.nb_queues = 1; 596 dev_info->default_txportconf.nb_queues = 1; 597 dev_info->default_txportconf.ring_size = CGR_TX_CGR_THRESH; 598 dev_info->default_rxportconf.ring_size = CGR_RX_PERFQ_THRESH; 599 600 return 0; 601 } 602 603 static int 604 dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev, 605 __rte_unused uint16_t queue_id, 606 struct rte_eth_burst_mode *mode) 607 { 608 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 609 int ret = -EINVAL; 610 unsigned int i; 611 const struct burst_info { 612 uint64_t flags; 613 const char *output; 614 } rx_offload_map[] = { 615 {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"}, 616 {DEV_RX_OFFLOAD_SCATTER, " Scattered,"}, 617 {DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"}, 618 {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"}, 619 {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"}, 620 {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, 621 {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"} 622 }; 623 624 /* Update Rx offload info */ 625 for (i = 0; i < RTE_DIM(rx_offload_map); i++) { 626 if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) { 627 snprintf(mode->info, sizeof(mode->info), "%s", 628 rx_offload_map[i].output); 629 ret = 0; 630 break; 631 } 632 } 633 return ret; 634 } 635 636 static int 637 dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev, 638 __rte_unused uint16_t queue_id, 639 struct rte_eth_burst_mode *mode) 640 { 641 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 642 int ret = -EINVAL; 643 unsigned int i; 644 const struct burst_info { 645 uint64_t flags; 646 const char *output; 647 } tx_offload_map[] = { 648 {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"}, 649 {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"}, 650 {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"}, 651 {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"}, 652 {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"}, 653 {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"}, 654 {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, 655 {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"} 656 }; 657 658 /* Update Tx offload info */ 659 for (i = 0; i < RTE_DIM(tx_offload_map); i++) { 660 if (eth_conf->txmode.offloads & tx_offload_map[i].flags) { 661 snprintf(mode->info, sizeof(mode->info), "%s", 662 tx_offload_map[i].output); 663 ret = 0; 664 break; 665 } 666 } 667 return ret; 668 } 669 670 static int dpaa_eth_link_update(struct rte_eth_dev *dev, 671 int wait_to_complete) 672 { 673 struct dpaa_if *dpaa_intf = dev->data->dev_private; 674 struct rte_eth_link *link = &dev->data->dev_link; 675 struct fman_if *fif = dev->process_private; 676 struct __fman_if *__fif = container_of(fif, struct __fman_if, __if); 677 int ret, ioctl_version; 678 uint8_t count; 679 680 PMD_INIT_FUNC_TRACE(); 681 682 ioctl_version = dpaa_get_ioctl_version_number(); 683 684 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { 685 for (count = 0; count <= MAX_REPEAT_TIME; count++) { 686 ret = dpaa_get_link_status(__fif->node_name, link); 687 if (ret) 688 return ret; 689 if (link->link_status == ETH_LINK_DOWN && 690 wait_to_complete) 691 rte_delay_ms(CHECK_INTERVAL); 692 else 693 break; 694 } 695 } else { 696 link->link_status = dpaa_intf->valid; 697 } 698 699 if (ioctl_version < 2) { 700 link->link_duplex = ETH_LINK_FULL_DUPLEX; 701 link->link_autoneg = ETH_LINK_AUTONEG; 702 703 if (fif->mac_type == fman_mac_1g) 704 link->link_speed = ETH_SPEED_NUM_1G; 705 else if (fif->mac_type == fman_mac_2_5g) 706 link->link_speed = ETH_SPEED_NUM_2_5G; 707 else if (fif->mac_type == fman_mac_10g) 708 link->link_speed = ETH_SPEED_NUM_10G; 709 else 710 DPAA_PMD_ERR("invalid link_speed: %s, %d", 711 dpaa_intf->name, fif->mac_type); 712 } 713 714 DPAA_PMD_INFO("Port %d Link is %s\n", dev->data->port_id, 715 link->link_status ? "Up" : "Down"); 716 return 0; 717 } 718 719 static int dpaa_eth_stats_get(struct rte_eth_dev *dev, 720 struct rte_eth_stats *stats) 721 { 722 PMD_INIT_FUNC_TRACE(); 723 724 fman_if_stats_get(dev->process_private, stats); 725 return 0; 726 } 727 728 static int dpaa_eth_stats_reset(struct rte_eth_dev *dev) 729 { 730 PMD_INIT_FUNC_TRACE(); 731 732 fman_if_stats_reset(dev->process_private); 733 734 return 0; 735 } 736 737 static int 738 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 739 unsigned int n) 740 { 741 unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings); 742 uint64_t values[sizeof(struct dpaa_if_stats) / 8]; 743 744 if (n < num) 745 return num; 746 747 if (xstats == NULL) 748 return 0; 749 750 fman_if_stats_get_all(dev->process_private, values, 751 sizeof(struct dpaa_if_stats) / 8); 752 753 for (i = 0; i < num; i++) { 754 xstats[i].id = i; 755 xstats[i].value = values[dpaa_xstats_strings[i].offset / 8]; 756 } 757 return i; 758 } 759 760 static int 761 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 762 struct rte_eth_xstat_name *xstats_names, 763 unsigned int limit) 764 { 765 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 766 767 if (limit < stat_cnt) 768 return stat_cnt; 769 770 if (xstats_names != NULL) 771 for (i = 0; i < stat_cnt; i++) 772 strlcpy(xstats_names[i].name, 773 dpaa_xstats_strings[i].name, 774 sizeof(xstats_names[i].name)); 775 776 return stat_cnt; 777 } 778 779 static int 780 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 781 uint64_t *values, unsigned int n) 782 { 783 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 784 uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8]; 785 786 if (!ids) { 787 if (n < stat_cnt) 788 return stat_cnt; 789 790 if (!values) 791 return 0; 792 793 fman_if_stats_get_all(dev->process_private, values_copy, 794 sizeof(struct dpaa_if_stats) / 8); 795 796 for (i = 0; i < stat_cnt; i++) 797 values[i] = 798 values_copy[dpaa_xstats_strings[i].offset / 8]; 799 800 return stat_cnt; 801 } 802 803 dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 804 805 for (i = 0; i < n; i++) { 806 if (ids[i] >= stat_cnt) { 807 DPAA_PMD_ERR("id value isn't valid"); 808 return -1; 809 } 810 values[i] = values_copy[ids[i]]; 811 } 812 return n; 813 } 814 815 static int 816 dpaa_xstats_get_names_by_id( 817 struct rte_eth_dev *dev, 818 const uint64_t *ids, 819 struct rte_eth_xstat_name *xstats_names, 820 unsigned int limit) 821 { 822 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 823 struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 824 825 if (!ids) 826 return dpaa_xstats_get_names(dev, xstats_names, limit); 827 828 dpaa_xstats_get_names(dev, xstats_names_copy, limit); 829 830 for (i = 0; i < limit; i++) { 831 if (ids[i] >= stat_cnt) { 832 DPAA_PMD_ERR("id value isn't valid"); 833 return -1; 834 } 835 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 836 } 837 return limit; 838 } 839 840 static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev) 841 { 842 PMD_INIT_FUNC_TRACE(); 843 844 fman_if_promiscuous_enable(dev->process_private); 845 846 return 0; 847 } 848 849 static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev) 850 { 851 PMD_INIT_FUNC_TRACE(); 852 853 fman_if_promiscuous_disable(dev->process_private); 854 855 return 0; 856 } 857 858 static int dpaa_eth_multicast_enable(struct rte_eth_dev *dev) 859 { 860 PMD_INIT_FUNC_TRACE(); 861 862 fman_if_set_mcast_filter_table(dev->process_private); 863 864 return 0; 865 } 866 867 static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev) 868 { 869 PMD_INIT_FUNC_TRACE(); 870 871 fman_if_reset_mcast_filter_table(dev->process_private); 872 873 return 0; 874 } 875 876 static void dpaa_fman_if_pool_setup(struct rte_eth_dev *dev) 877 { 878 struct dpaa_if *dpaa_intf = dev->data->dev_private; 879 struct fman_if_ic_params icp; 880 uint32_t fd_offset; 881 uint32_t bp_size; 882 883 memset(&icp, 0, sizeof(icp)); 884 /* set ICEOF for to the default value , which is 0*/ 885 icp.iciof = DEFAULT_ICIOF; 886 icp.iceof = DEFAULT_RX_ICEOF; 887 icp.icsz = DEFAULT_ICSZ; 888 fman_if_set_ic_params(dev->process_private, &icp); 889 890 fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE; 891 fman_if_set_fdoff(dev->process_private, fd_offset); 892 893 /* Buffer pool size should be equal to Dataroom Size*/ 894 bp_size = rte_pktmbuf_data_room_size(dpaa_intf->bp_info->mp); 895 896 fman_if_set_bp(dev->process_private, 897 dpaa_intf->bp_info->mp->size, 898 dpaa_intf->bp_info->bpid, bp_size); 899 } 900 901 static inline int dpaa_eth_rx_queue_bp_check(struct rte_eth_dev *dev, 902 int8_t vsp_id, uint32_t bpid) 903 { 904 struct dpaa_if *dpaa_intf = dev->data->dev_private; 905 struct fman_if *fif = dev->process_private; 906 907 if (fif->num_profiles) { 908 if (vsp_id < 0) 909 vsp_id = fif->base_profile_id; 910 } else { 911 if (vsp_id < 0) 912 vsp_id = 0; 913 } 914 915 if (dpaa_intf->vsp_bpid[vsp_id] && 916 bpid != dpaa_intf->vsp_bpid[vsp_id]) { 917 DPAA_PMD_ERR("Various MPs are assigned to RXQs with same VSP"); 918 919 return -1; 920 } 921 922 return 0; 923 } 924 925 static 926 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 927 uint16_t nb_desc, 928 unsigned int socket_id __rte_unused, 929 const struct rte_eth_rxconf *rx_conf, 930 struct rte_mempool *mp) 931 { 932 struct dpaa_if *dpaa_intf = dev->data->dev_private; 933 struct fman_if *fif = dev->process_private; 934 struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx]; 935 struct qm_mcc_initfq opts = {0}; 936 u32 flags = 0; 937 int ret; 938 u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; 939 940 PMD_INIT_FUNC_TRACE(); 941 942 if (queue_idx >= dev->data->nb_rx_queues) { 943 rte_errno = EOVERFLOW; 944 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", 945 (void *)dev, queue_idx, dev->data->nb_rx_queues); 946 return -rte_errno; 947 } 948 949 /* Rx deferred start is not supported */ 950 if (rx_conf->rx_deferred_start) { 951 DPAA_PMD_ERR("%p:Rx deferred start not supported", (void *)dev); 952 return -EINVAL; 953 } 954 rxq->nb_desc = UINT16_MAX; 955 rxq->offloads = rx_conf->offloads; 956 957 DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)", 958 queue_idx, rxq->fqid); 959 960 if (!fif->num_profiles) { 961 if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp && 962 dpaa_intf->bp_info->mp != mp) { 963 DPAA_PMD_WARN("Multiple pools on same interface not" 964 " supported"); 965 return -EINVAL; 966 } 967 } else { 968 if (dpaa_eth_rx_queue_bp_check(dev, rxq->vsp_id, 969 DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid)) { 970 return -EINVAL; 971 } 972 } 973 974 if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp && 975 dpaa_intf->bp_info->mp != mp) { 976 DPAA_PMD_WARN("Multiple pools on same interface not supported"); 977 return -EINVAL; 978 } 979 980 /* Max packet can fit in single buffer */ 981 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) { 982 ; 983 } else if (dev->data->dev_conf.rxmode.offloads & 984 DEV_RX_OFFLOAD_SCATTER) { 985 if (dev->data->dev_conf.rxmode.max_rx_pkt_len > 986 buffsz * DPAA_SGT_MAX_ENTRIES) { 987 DPAA_PMD_ERR("max RxPkt size %d too big to fit " 988 "MaxSGlist %d", 989 dev->data->dev_conf.rxmode.max_rx_pkt_len, 990 buffsz * DPAA_SGT_MAX_ENTRIES); 991 rte_errno = EOVERFLOW; 992 return -rte_errno; 993 } 994 } else { 995 DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is" 996 " larger than a single mbuf (%u) and scattered" 997 " mode has not been requested", 998 dev->data->dev_conf.rxmode.max_rx_pkt_len, 999 buffsz - RTE_PKTMBUF_HEADROOM); 1000 } 1001 1002 dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 1003 1004 /* For shared interface, it's done in kernel, skip.*/ 1005 if (!fif->is_shared_mac) 1006 dpaa_fman_if_pool_setup(dev); 1007 1008 if (fif->num_profiles) { 1009 int8_t vsp_id = rxq->vsp_id; 1010 1011 if (vsp_id >= 0) { 1012 ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id, 1013 DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid, 1014 fif); 1015 if (ret) { 1016 DPAA_PMD_ERR("dpaa_port_vsp_update failed"); 1017 return ret; 1018 } 1019 } else { 1020 DPAA_PMD_INFO("Base profile is associated to" 1021 " RXQ fqid:%d\r\n", rxq->fqid); 1022 if (fif->is_shared_mac) { 1023 DPAA_PMD_ERR("Fatal: Base profile is associated" 1024 " to shared interface on DPDK."); 1025 return -EINVAL; 1026 } 1027 dpaa_intf->vsp_bpid[fif->base_profile_id] = 1028 DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid; 1029 } 1030 } else { 1031 dpaa_intf->vsp_bpid[0] = 1032 DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid; 1033 } 1034 1035 dpaa_intf->valid = 1; 1036 DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name, 1037 fman_if_get_sg_enable(fif), 1038 dev->data->dev_conf.rxmode.max_rx_pkt_len); 1039 /* checking if push mode only, no error check for now */ 1040 if (!rxq->is_static && 1041 dpaa_push_mode_max_queue > dpaa_push_queue_idx) { 1042 struct qman_portal *qp; 1043 int q_fd; 1044 1045 dpaa_push_queue_idx++; 1046 opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; 1047 opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | 1048 QM_FQCTRL_CTXASTASHING | 1049 QM_FQCTRL_PREFERINCACHE; 1050 opts.fqd.context_a.stashing.exclusive = 0; 1051 /* In muticore scenario stashing becomes a bottleneck on LS1046. 1052 * So do not enable stashing in this case 1053 */ 1054 if (dpaa_svr_family != SVR_LS1046A_FAMILY) 1055 opts.fqd.context_a.stashing.annotation_cl = 1056 DPAA_IF_RX_ANNOTATION_STASH; 1057 opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; 1058 opts.fqd.context_a.stashing.context_cl = 1059 DPAA_IF_RX_CONTEXT_STASH; 1060 1061 /*Create a channel and associate given queue with the channel*/ 1062 qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0); 1063 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; 1064 opts.fqd.dest.channel = rxq->ch_id; 1065 opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY; 1066 flags = QMAN_INITFQ_FLAG_SCHED; 1067 1068 /* Configure tail drop */ 1069 if (dpaa_intf->cgr_rx) { 1070 opts.we_mask |= QM_INITFQ_WE_CGID; 1071 opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid; 1072 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 1073 } 1074 ret = qman_init_fq(rxq, flags, &opts); 1075 if (ret) { 1076 DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x " 1077 "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); 1078 return ret; 1079 } 1080 if (dpaa_svr_family == SVR_LS1043A_FAMILY) { 1081 rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch; 1082 } else { 1083 rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb; 1084 rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare; 1085 } 1086 1087 rxq->is_static = true; 1088 1089 /* Allocate qman specific portals */ 1090 qp = fsl_qman_fq_portal_create(&q_fd); 1091 if (!qp) { 1092 DPAA_PMD_ERR("Unable to alloc fq portal"); 1093 return -1; 1094 } 1095 rxq->qp = qp; 1096 1097 /* Set up the device interrupt handler */ 1098 if (!dev->intr_handle) { 1099 struct rte_dpaa_device *dpaa_dev; 1100 struct rte_device *rdev = dev->device; 1101 1102 dpaa_dev = container_of(rdev, struct rte_dpaa_device, 1103 device); 1104 dev->intr_handle = &dpaa_dev->intr_handle; 1105 dev->intr_handle->intr_vec = rte_zmalloc(NULL, 1106 dpaa_push_mode_max_queue, 0); 1107 if (!dev->intr_handle->intr_vec) { 1108 DPAA_PMD_ERR("intr_vec alloc failed"); 1109 return -ENOMEM; 1110 } 1111 dev->intr_handle->nb_efd = dpaa_push_mode_max_queue; 1112 dev->intr_handle->max_intr = dpaa_push_mode_max_queue; 1113 } 1114 1115 dev->intr_handle->type = RTE_INTR_HANDLE_EXT; 1116 dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1; 1117 dev->intr_handle->efds[queue_idx] = q_fd; 1118 rxq->q_fd = q_fd; 1119 } 1120 rxq->bp_array = rte_dpaa_bpid_info; 1121 dev->data->rx_queues[queue_idx] = rxq; 1122 1123 /* configure the CGR size as per the desc size */ 1124 if (dpaa_intf->cgr_rx) { 1125 struct qm_mcc_initcgr cgr_opts = {0}; 1126 1127 rxq->nb_desc = nb_desc; 1128 /* Enable tail drop with cgr on this queue */ 1129 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0); 1130 ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts); 1131 if (ret) { 1132 DPAA_PMD_WARN( 1133 "rx taildrop modify fail on fqid %d (ret=%d)", 1134 rxq->fqid, ret); 1135 } 1136 } 1137 /* Enable main queue to receive error packets also by default */ 1138 fman_if_set_err_fqid(fif, rxq->fqid); 1139 return 0; 1140 } 1141 1142 int 1143 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, 1144 int eth_rx_queue_id, 1145 u16 ch_id, 1146 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 1147 { 1148 int ret; 1149 u32 flags = 0; 1150 struct dpaa_if *dpaa_intf = dev->data->dev_private; 1151 struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id]; 1152 struct qm_mcc_initfq opts = {0}; 1153 1154 if (dpaa_push_mode_max_queue) 1155 DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n" 1156 "PUSH mode already enabled for first %d queues.\n" 1157 "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n", 1158 dpaa_push_mode_max_queue); 1159 1160 dpaa_poll_queue_default_config(&opts); 1161 1162 switch (queue_conf->ev.sched_type) { 1163 case RTE_SCHED_TYPE_ATOMIC: 1164 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; 1165 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary 1166 * configuration with HOLD_ACTIVE setting 1167 */ 1168 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK); 1169 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic; 1170 break; 1171 case RTE_SCHED_TYPE_ORDERED: 1172 DPAA_PMD_ERR("Ordered queue schedule type is not supported\n"); 1173 return -1; 1174 default: 1175 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK; 1176 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel; 1177 break; 1178 } 1179 1180 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; 1181 opts.fqd.dest.channel = ch_id; 1182 opts.fqd.dest.wq = queue_conf->ev.priority; 1183 1184 if (dpaa_intf->cgr_rx) { 1185 opts.we_mask |= QM_INITFQ_WE_CGID; 1186 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid; 1187 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 1188 } 1189 1190 flags = QMAN_INITFQ_FLAG_SCHED; 1191 1192 ret = qman_init_fq(rxq, flags, &opts); 1193 if (ret) { 1194 DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x " 1195 "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); 1196 return ret; 1197 } 1198 1199 /* copy configuration which needs to be filled during dequeue */ 1200 memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event)); 1201 dev->data->rx_queues[eth_rx_queue_id] = rxq; 1202 1203 return ret; 1204 } 1205 1206 int 1207 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, 1208 int eth_rx_queue_id) 1209 { 1210 struct qm_mcc_initfq opts; 1211 int ret; 1212 u32 flags = 0; 1213 struct dpaa_if *dpaa_intf = dev->data->dev_private; 1214 struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id]; 1215 1216 dpaa_poll_queue_default_config(&opts); 1217 1218 if (dpaa_intf->cgr_rx) { 1219 opts.we_mask |= QM_INITFQ_WE_CGID; 1220 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid; 1221 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 1222 } 1223 1224 ret = qman_init_fq(rxq, flags, &opts); 1225 if (ret) { 1226 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", 1227 rxq->fqid, ret); 1228 } 1229 1230 rxq->cb.dqrr_dpdk_cb = NULL; 1231 dev->data->rx_queues[eth_rx_queue_id] = NULL; 1232 1233 return 0; 1234 } 1235 1236 static 1237 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 1238 uint16_t nb_desc __rte_unused, 1239 unsigned int socket_id __rte_unused, 1240 const struct rte_eth_txconf *tx_conf) 1241 { 1242 struct dpaa_if *dpaa_intf = dev->data->dev_private; 1243 struct qman_fq *txq = &dpaa_intf->tx_queues[queue_idx]; 1244 1245 PMD_INIT_FUNC_TRACE(); 1246 1247 /* Tx deferred start is not supported */ 1248 if (tx_conf->tx_deferred_start) { 1249 DPAA_PMD_ERR("%p:Tx deferred start not supported", (void *)dev); 1250 return -EINVAL; 1251 } 1252 txq->nb_desc = UINT16_MAX; 1253 txq->offloads = tx_conf->offloads; 1254 1255 if (queue_idx >= dev->data->nb_tx_queues) { 1256 rte_errno = EOVERFLOW; 1257 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", 1258 (void *)dev, queue_idx, dev->data->nb_tx_queues); 1259 return -rte_errno; 1260 } 1261 1262 DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)", 1263 queue_idx, txq->fqid); 1264 dev->data->tx_queues[queue_idx] = txq; 1265 1266 return 0; 1267 } 1268 1269 static uint32_t 1270 dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1271 { 1272 struct dpaa_if *dpaa_intf = dev->data->dev_private; 1273 struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id]; 1274 u32 frm_cnt = 0; 1275 1276 PMD_INIT_FUNC_TRACE(); 1277 1278 if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) { 1279 DPAA_PMD_DEBUG("RX frame count for q(%d) is %u", 1280 rx_queue_id, frm_cnt); 1281 } 1282 return frm_cnt; 1283 } 1284 1285 static int dpaa_link_down(struct rte_eth_dev *dev) 1286 { 1287 struct fman_if *fif = dev->process_private; 1288 struct __fman_if *__fif; 1289 1290 PMD_INIT_FUNC_TRACE(); 1291 1292 __fif = container_of(fif, struct __fman_if, __if); 1293 1294 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) 1295 dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN); 1296 else 1297 return dpaa_eth_dev_stop(dev); 1298 return 0; 1299 } 1300 1301 static int dpaa_link_up(struct rte_eth_dev *dev) 1302 { 1303 struct fman_if *fif = dev->process_private; 1304 struct __fman_if *__fif; 1305 1306 PMD_INIT_FUNC_TRACE(); 1307 1308 __fif = container_of(fif, struct __fman_if, __if); 1309 1310 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) 1311 dpaa_update_link_status(__fif->node_name, ETH_LINK_UP); 1312 else 1313 dpaa_eth_dev_start(dev); 1314 return 0; 1315 } 1316 1317 static int 1318 dpaa_flow_ctrl_set(struct rte_eth_dev *dev, 1319 struct rte_eth_fc_conf *fc_conf) 1320 { 1321 struct dpaa_if *dpaa_intf = dev->data->dev_private; 1322 struct rte_eth_fc_conf *net_fc; 1323 1324 PMD_INIT_FUNC_TRACE(); 1325 1326 if (!(dpaa_intf->fc_conf)) { 1327 dpaa_intf->fc_conf = rte_zmalloc(NULL, 1328 sizeof(struct rte_eth_fc_conf), MAX_CACHELINE); 1329 if (!dpaa_intf->fc_conf) { 1330 DPAA_PMD_ERR("unable to save flow control info"); 1331 return -ENOMEM; 1332 } 1333 } 1334 net_fc = dpaa_intf->fc_conf; 1335 1336 if (fc_conf->high_water < fc_conf->low_water) { 1337 DPAA_PMD_ERR("Incorrect Flow Control Configuration"); 1338 return -EINVAL; 1339 } 1340 1341 if (fc_conf->mode == RTE_FC_NONE) { 1342 return 0; 1343 } else if (fc_conf->mode == RTE_FC_TX_PAUSE || 1344 fc_conf->mode == RTE_FC_FULL) { 1345 fman_if_set_fc_threshold(dev->process_private, 1346 fc_conf->high_water, 1347 fc_conf->low_water, 1348 dpaa_intf->bp_info->bpid); 1349 if (fc_conf->pause_time) 1350 fman_if_set_fc_quanta(dev->process_private, 1351 fc_conf->pause_time); 1352 } 1353 1354 /* Save the information in dpaa device */ 1355 net_fc->pause_time = fc_conf->pause_time; 1356 net_fc->high_water = fc_conf->high_water; 1357 net_fc->low_water = fc_conf->low_water; 1358 net_fc->send_xon = fc_conf->send_xon; 1359 net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd; 1360 net_fc->mode = fc_conf->mode; 1361 net_fc->autoneg = fc_conf->autoneg; 1362 1363 return 0; 1364 } 1365 1366 static int 1367 dpaa_flow_ctrl_get(struct rte_eth_dev *dev, 1368 struct rte_eth_fc_conf *fc_conf) 1369 { 1370 struct dpaa_if *dpaa_intf = dev->data->dev_private; 1371 struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf; 1372 int ret; 1373 1374 PMD_INIT_FUNC_TRACE(); 1375 1376 if (net_fc) { 1377 fc_conf->pause_time = net_fc->pause_time; 1378 fc_conf->high_water = net_fc->high_water; 1379 fc_conf->low_water = net_fc->low_water; 1380 fc_conf->send_xon = net_fc->send_xon; 1381 fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd; 1382 fc_conf->mode = net_fc->mode; 1383 fc_conf->autoneg = net_fc->autoneg; 1384 return 0; 1385 } 1386 ret = fman_if_get_fc_threshold(dev->process_private); 1387 if (ret) { 1388 fc_conf->mode = RTE_FC_TX_PAUSE; 1389 fc_conf->pause_time = 1390 fman_if_get_fc_quanta(dev->process_private); 1391 } else { 1392 fc_conf->mode = RTE_FC_NONE; 1393 } 1394 1395 return 0; 1396 } 1397 1398 static int 1399 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev, 1400 struct rte_ether_addr *addr, 1401 uint32_t index, 1402 __rte_unused uint32_t pool) 1403 { 1404 int ret; 1405 1406 PMD_INIT_FUNC_TRACE(); 1407 1408 ret = fman_if_add_mac_addr(dev->process_private, 1409 addr->addr_bytes, index); 1410 1411 if (ret) 1412 DPAA_PMD_ERR("Adding the MAC ADDR failed: err = %d", ret); 1413 return 0; 1414 } 1415 1416 static void 1417 dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev, 1418 uint32_t index) 1419 { 1420 PMD_INIT_FUNC_TRACE(); 1421 1422 fman_if_clear_mac_addr(dev->process_private, index); 1423 } 1424 1425 static int 1426 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, 1427 struct rte_ether_addr *addr) 1428 { 1429 int ret; 1430 1431 PMD_INIT_FUNC_TRACE(); 1432 1433 ret = fman_if_add_mac_addr(dev->process_private, addr->addr_bytes, 0); 1434 if (ret) 1435 DPAA_PMD_ERR("Setting the MAC ADDR failed %d", ret); 1436 1437 return ret; 1438 } 1439 1440 static int 1441 dpaa_dev_rss_hash_update(struct rte_eth_dev *dev, 1442 struct rte_eth_rss_conf *rss_conf) 1443 { 1444 struct rte_eth_dev_data *data = dev->data; 1445 struct rte_eth_conf *eth_conf = &data->dev_conf; 1446 1447 PMD_INIT_FUNC_TRACE(); 1448 1449 if (!(default_q || fmc_q)) { 1450 if (dpaa_fm_config(dev, rss_conf->rss_hf)) { 1451 DPAA_PMD_ERR("FM port configuration: Failed\n"); 1452 return -1; 1453 } 1454 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; 1455 } else { 1456 DPAA_PMD_ERR("Function not supported\n"); 1457 return -ENOTSUP; 1458 } 1459 return 0; 1460 } 1461 1462 static int 1463 dpaa_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1464 struct rte_eth_rss_conf *rss_conf) 1465 { 1466 struct rte_eth_dev_data *data = dev->data; 1467 struct rte_eth_conf *eth_conf = &data->dev_conf; 1468 1469 /* dpaa does not support rss_key, so length should be 0*/ 1470 rss_conf->rss_key_len = 0; 1471 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; 1472 return 0; 1473 } 1474 1475 static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev, 1476 uint16_t queue_id) 1477 { 1478 struct dpaa_if *dpaa_intf = dev->data->dev_private; 1479 struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id]; 1480 1481 if (!rxq->is_static) 1482 return -EINVAL; 1483 1484 return qman_fq_portal_irqsource_add(rxq->qp, QM_PIRQ_DQRI); 1485 } 1486 1487 static int dpaa_dev_queue_intr_disable(struct rte_eth_dev *dev, 1488 uint16_t queue_id) 1489 { 1490 struct dpaa_if *dpaa_intf = dev->data->dev_private; 1491 struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id]; 1492 uint32_t temp; 1493 ssize_t temp1; 1494 1495 if (!rxq->is_static) 1496 return -EINVAL; 1497 1498 qman_fq_portal_irqsource_remove(rxq->qp, ~0); 1499 1500 temp1 = read(rxq->q_fd, &temp, sizeof(temp)); 1501 if (temp1 != sizeof(temp)) 1502 DPAA_PMD_ERR("irq read error"); 1503 1504 qman_fq_portal_thread_irq(rxq->qp); 1505 1506 return 0; 1507 } 1508 1509 static void 1510 dpaa_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1511 struct rte_eth_rxq_info *qinfo) 1512 { 1513 struct dpaa_if *dpaa_intf = dev->data->dev_private; 1514 struct qman_fq *rxq; 1515 int ret; 1516 1517 rxq = dev->data->rx_queues[queue_id]; 1518 1519 qinfo->mp = dpaa_intf->bp_info->mp; 1520 qinfo->scattered_rx = dev->data->scattered_rx; 1521 qinfo->nb_desc = rxq->nb_desc; 1522 1523 /* Report the HW Rx buffer length to user */ 1524 ret = fman_if_get_maxfrm(dev->process_private); 1525 if (ret > 0) 1526 qinfo->rx_buf_size = ret; 1527 1528 qinfo->conf.rx_free_thresh = 1; 1529 qinfo->conf.rx_drop_en = 1; 1530 qinfo->conf.rx_deferred_start = 0; 1531 qinfo->conf.offloads = rxq->offloads; 1532 } 1533 1534 static void 1535 dpaa_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1536 struct rte_eth_txq_info *qinfo) 1537 { 1538 struct qman_fq *txq; 1539 1540 txq = dev->data->tx_queues[queue_id]; 1541 1542 qinfo->nb_desc = txq->nb_desc; 1543 qinfo->conf.tx_thresh.pthresh = 0; 1544 qinfo->conf.tx_thresh.hthresh = 0; 1545 qinfo->conf.tx_thresh.wthresh = 0; 1546 1547 qinfo->conf.tx_free_thresh = 0; 1548 qinfo->conf.tx_rs_thresh = 0; 1549 qinfo->conf.offloads = txq->offloads; 1550 qinfo->conf.tx_deferred_start = 0; 1551 } 1552 1553 static struct eth_dev_ops dpaa_devops = { 1554 .dev_configure = dpaa_eth_dev_configure, 1555 .dev_start = dpaa_eth_dev_start, 1556 .dev_stop = dpaa_eth_dev_stop, 1557 .dev_close = dpaa_eth_dev_close, 1558 .dev_infos_get = dpaa_eth_dev_info, 1559 .dev_supported_ptypes_get = dpaa_supported_ptypes_get, 1560 1561 .rx_queue_setup = dpaa_eth_rx_queue_setup, 1562 .tx_queue_setup = dpaa_eth_tx_queue_setup, 1563 .rx_burst_mode_get = dpaa_dev_rx_burst_mode_get, 1564 .tx_burst_mode_get = dpaa_dev_tx_burst_mode_get, 1565 .rxq_info_get = dpaa_rxq_info_get, 1566 .txq_info_get = dpaa_txq_info_get, 1567 1568 .flow_ctrl_get = dpaa_flow_ctrl_get, 1569 .flow_ctrl_set = dpaa_flow_ctrl_set, 1570 1571 .link_update = dpaa_eth_link_update, 1572 .stats_get = dpaa_eth_stats_get, 1573 .xstats_get = dpaa_dev_xstats_get, 1574 .xstats_get_by_id = dpaa_xstats_get_by_id, 1575 .xstats_get_names_by_id = dpaa_xstats_get_names_by_id, 1576 .xstats_get_names = dpaa_xstats_get_names, 1577 .xstats_reset = dpaa_eth_stats_reset, 1578 .stats_reset = dpaa_eth_stats_reset, 1579 .promiscuous_enable = dpaa_eth_promiscuous_enable, 1580 .promiscuous_disable = dpaa_eth_promiscuous_disable, 1581 .allmulticast_enable = dpaa_eth_multicast_enable, 1582 .allmulticast_disable = dpaa_eth_multicast_disable, 1583 .mtu_set = dpaa_mtu_set, 1584 .dev_set_link_down = dpaa_link_down, 1585 .dev_set_link_up = dpaa_link_up, 1586 .mac_addr_add = dpaa_dev_add_mac_addr, 1587 .mac_addr_remove = dpaa_dev_remove_mac_addr, 1588 .mac_addr_set = dpaa_dev_set_mac_addr, 1589 1590 .fw_version_get = dpaa_fw_version_get, 1591 1592 .rx_queue_intr_enable = dpaa_dev_queue_intr_enable, 1593 .rx_queue_intr_disable = dpaa_dev_queue_intr_disable, 1594 .rss_hash_update = dpaa_dev_rss_hash_update, 1595 .rss_hash_conf_get = dpaa_dev_rss_hash_conf_get, 1596 }; 1597 1598 static bool 1599 is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv) 1600 { 1601 if (strcmp(dev->device->driver->name, 1602 drv->driver.name)) 1603 return false; 1604 1605 return true; 1606 } 1607 1608 static bool 1609 is_dpaa_supported(struct rte_eth_dev *dev) 1610 { 1611 return is_device_supported(dev, &rte_dpaa_pmd); 1612 } 1613 1614 int 1615 rte_pmd_dpaa_set_tx_loopback(uint16_t port, uint8_t on) 1616 { 1617 struct rte_eth_dev *dev; 1618 1619 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); 1620 1621 dev = &rte_eth_devices[port]; 1622 1623 if (!is_dpaa_supported(dev)) 1624 return -ENOTSUP; 1625 1626 if (on) 1627 fman_if_loopback_enable(dev->process_private); 1628 else 1629 fman_if_loopback_disable(dev->process_private); 1630 1631 return 0; 1632 } 1633 1634 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf, 1635 struct fman_if *fman_intf) 1636 { 1637 struct rte_eth_fc_conf *fc_conf; 1638 int ret; 1639 1640 PMD_INIT_FUNC_TRACE(); 1641 1642 if (!(dpaa_intf->fc_conf)) { 1643 dpaa_intf->fc_conf = rte_zmalloc(NULL, 1644 sizeof(struct rte_eth_fc_conf), MAX_CACHELINE); 1645 if (!dpaa_intf->fc_conf) { 1646 DPAA_PMD_ERR("unable to save flow control info"); 1647 return -ENOMEM; 1648 } 1649 } 1650 fc_conf = dpaa_intf->fc_conf; 1651 ret = fman_if_get_fc_threshold(fman_intf); 1652 if (ret) { 1653 fc_conf->mode = RTE_FC_TX_PAUSE; 1654 fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf); 1655 } else { 1656 fc_conf->mode = RTE_FC_NONE; 1657 } 1658 1659 return 0; 1660 } 1661 1662 /* Initialise an Rx FQ */ 1663 static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, 1664 uint32_t fqid) 1665 { 1666 struct qm_mcc_initfq opts = {0}; 1667 int ret; 1668 u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE; 1669 struct qm_mcc_initcgr cgr_opts = { 1670 .we_mask = QM_CGR_WE_CS_THRES | 1671 QM_CGR_WE_CSTD_EN | 1672 QM_CGR_WE_MODE, 1673 .cgr = { 1674 .cstd_en = QM_CGR_EN, 1675 .mode = QMAN_CGR_MODE_FRAME 1676 } 1677 }; 1678 1679 if (fmc_q || default_q) { 1680 ret = qman_reserve_fqid(fqid); 1681 if (ret) { 1682 DPAA_PMD_ERR("reserve rx fqid 0x%x failed, ret: %d", 1683 fqid, ret); 1684 return -EINVAL; 1685 } 1686 } 1687 1688 DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid); 1689 ret = qman_create_fq(fqid, flags, fq); 1690 if (ret) { 1691 DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d", 1692 fqid, ret); 1693 return ret; 1694 } 1695 fq->is_static = false; 1696 1697 dpaa_poll_queue_default_config(&opts); 1698 1699 if (cgr_rx) { 1700 /* Enable tail drop with cgr on this queue */ 1701 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0); 1702 cgr_rx->cb = NULL; 1703 ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT, 1704 &cgr_opts); 1705 if (ret) { 1706 DPAA_PMD_WARN( 1707 "rx taildrop init fail on rx fqid 0x%x(ret=%d)", 1708 fq->fqid, ret); 1709 goto without_cgr; 1710 } 1711 opts.we_mask |= QM_INITFQ_WE_CGID; 1712 opts.fqd.cgid = cgr_rx->cgrid; 1713 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 1714 } 1715 without_cgr: 1716 ret = qman_init_fq(fq, 0, &opts); 1717 if (ret) 1718 DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret); 1719 return ret; 1720 } 1721 1722 /* Initialise a Tx FQ */ 1723 static int dpaa_tx_queue_init(struct qman_fq *fq, 1724 struct fman_if *fman_intf, 1725 struct qman_cgr *cgr_tx) 1726 { 1727 struct qm_mcc_initfq opts = {0}; 1728 struct qm_mcc_initcgr cgr_opts = { 1729 .we_mask = QM_CGR_WE_CS_THRES | 1730 QM_CGR_WE_CSTD_EN | 1731 QM_CGR_WE_MODE, 1732 .cgr = { 1733 .cstd_en = QM_CGR_EN, 1734 .mode = QMAN_CGR_MODE_FRAME 1735 } 1736 }; 1737 int ret; 1738 1739 ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID | 1740 QMAN_FQ_FLAG_TO_DCPORTAL, fq); 1741 if (ret) { 1742 DPAA_PMD_ERR("create tx fq failed with ret: %d", ret); 1743 return ret; 1744 } 1745 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 1746 QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA; 1747 opts.fqd.dest.channel = fman_intf->tx_channel_id; 1748 opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY; 1749 opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; 1750 opts.fqd.context_b = 0; 1751 /* no tx-confirmation */ 1752 opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi; 1753 opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo; 1754 DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid); 1755 1756 if (cgr_tx) { 1757 /* Enable tail drop with cgr on this queue */ 1758 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, 1759 td_tx_threshold, 0); 1760 cgr_tx->cb = NULL; 1761 ret = qman_create_cgr(cgr_tx, QMAN_CGR_FLAG_USE_INIT, 1762 &cgr_opts); 1763 if (ret) { 1764 DPAA_PMD_WARN( 1765 "rx taildrop init fail on rx fqid 0x%x(ret=%d)", 1766 fq->fqid, ret); 1767 goto without_cgr; 1768 } 1769 opts.we_mask |= QM_INITFQ_WE_CGID; 1770 opts.fqd.cgid = cgr_tx->cgrid; 1771 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 1772 DPAA_PMD_DEBUG("Tx FQ tail drop enabled, threshold = %d\n", 1773 td_tx_threshold); 1774 } 1775 without_cgr: 1776 ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); 1777 if (ret) 1778 DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret); 1779 return ret; 1780 } 1781 1782 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 1783 /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */ 1784 static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid) 1785 { 1786 struct qm_mcc_initfq opts = {0}; 1787 int ret; 1788 1789 PMD_INIT_FUNC_TRACE(); 1790 1791 ret = qman_reserve_fqid(fqid); 1792 if (ret) { 1793 DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d", 1794 fqid, ret); 1795 return -EINVAL; 1796 } 1797 /* "map" this Rx FQ to one of the interfaces Tx FQID */ 1798 DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid); 1799 ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq); 1800 if (ret) { 1801 DPAA_PMD_ERR("create debug fqid %d failed with ret: %d", 1802 fqid, ret); 1803 return ret; 1804 } 1805 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL; 1806 opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY; 1807 ret = qman_init_fq(fq, 0, &opts); 1808 if (ret) 1809 DPAA_PMD_ERR("init debug fqid %d failed with ret: %d", 1810 fqid, ret); 1811 return ret; 1812 } 1813 #endif 1814 1815 /* Initialise a network interface */ 1816 static int 1817 dpaa_dev_init_secondary(struct rte_eth_dev *eth_dev) 1818 { 1819 struct rte_dpaa_device *dpaa_device; 1820 struct fm_eth_port_cfg *cfg; 1821 struct dpaa_if *dpaa_intf; 1822 struct fman_if *fman_intf; 1823 int dev_id; 1824 1825 PMD_INIT_FUNC_TRACE(); 1826 1827 dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device); 1828 dev_id = dpaa_device->id.dev_id; 1829 cfg = dpaa_get_eth_port_cfg(dev_id); 1830 fman_intf = cfg->fman_if; 1831 eth_dev->process_private = fman_intf; 1832 1833 /* Plugging of UCODE burst API not supported in Secondary */ 1834 dpaa_intf = eth_dev->data->dev_private; 1835 eth_dev->rx_pkt_burst = dpaa_eth_queue_rx; 1836 if (dpaa_intf->cgr_tx) 1837 eth_dev->tx_pkt_burst = dpaa_eth_queue_tx_slow; 1838 else 1839 eth_dev->tx_pkt_burst = dpaa_eth_queue_tx; 1840 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 1841 qman_set_fq_lookup_table( 1842 dpaa_intf->rx_queues->qman_fq_lookup_table); 1843 #endif 1844 1845 return 0; 1846 } 1847 1848 /* Initialise a network interface */ 1849 static int 1850 dpaa_dev_init(struct rte_eth_dev *eth_dev) 1851 { 1852 int num_rx_fqs, fqid; 1853 int loop, ret = 0; 1854 int dev_id; 1855 struct rte_dpaa_device *dpaa_device; 1856 struct dpaa_if *dpaa_intf; 1857 struct fm_eth_port_cfg *cfg; 1858 struct fman_if *fman_intf; 1859 struct fman_if_bpool *bp, *tmp_bp; 1860 uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES]; 1861 uint32_t cgrid_tx[MAX_DPAA_CORES]; 1862 uint32_t dev_rx_fqids[DPAA_MAX_NUM_PCD_QUEUES]; 1863 int8_t dev_vspids[DPAA_MAX_NUM_PCD_QUEUES]; 1864 int8_t vsp_id = -1; 1865 1866 PMD_INIT_FUNC_TRACE(); 1867 1868 dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device); 1869 dev_id = dpaa_device->id.dev_id; 1870 dpaa_intf = eth_dev->data->dev_private; 1871 cfg = dpaa_get_eth_port_cfg(dev_id); 1872 fman_intf = cfg->fman_if; 1873 1874 dpaa_intf->name = dpaa_device->name; 1875 1876 /* save fman_if & cfg in the interface struture */ 1877 eth_dev->process_private = fman_intf; 1878 dpaa_intf->ifid = dev_id; 1879 dpaa_intf->cfg = cfg; 1880 1881 memset((char *)dev_rx_fqids, 0, 1882 sizeof(uint32_t) * DPAA_MAX_NUM_PCD_QUEUES); 1883 1884 memset(dev_vspids, -1, DPAA_MAX_NUM_PCD_QUEUES); 1885 1886 /* Initialize Rx FQ's */ 1887 if (default_q) { 1888 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; 1889 } else if (fmc_q) { 1890 num_rx_fqs = dpaa_port_fmc_init(fman_intf, dev_rx_fqids, 1891 dev_vspids, 1892 DPAA_MAX_NUM_PCD_QUEUES); 1893 if (num_rx_fqs < 0) { 1894 DPAA_PMD_ERR("%s FMC initializes failed!", 1895 dpaa_intf->name); 1896 goto free_rx; 1897 } 1898 if (!num_rx_fqs) { 1899 DPAA_PMD_WARN("%s is not configured by FMC.", 1900 dpaa_intf->name); 1901 } 1902 } else { 1903 /* FMCLESS mode, load balance to multiple cores.*/ 1904 num_rx_fqs = rte_lcore_count(); 1905 } 1906 1907 /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX 1908 * queues. 1909 */ 1910 if (num_rx_fqs < 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) { 1911 DPAA_PMD_ERR("Invalid number of RX queues\n"); 1912 return -EINVAL; 1913 } 1914 1915 if (num_rx_fqs > 0) { 1916 dpaa_intf->rx_queues = rte_zmalloc(NULL, 1917 sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE); 1918 if (!dpaa_intf->rx_queues) { 1919 DPAA_PMD_ERR("Failed to alloc mem for RX queues\n"); 1920 return -ENOMEM; 1921 } 1922 } else { 1923 dpaa_intf->rx_queues = NULL; 1924 } 1925 1926 memset(cgrid, 0, sizeof(cgrid)); 1927 memset(cgrid_tx, 0, sizeof(cgrid_tx)); 1928 1929 /* if DPAA_TX_TAILDROP_THRESHOLD is set, use that value; if 0, it means 1930 * Tx tail drop is disabled. 1931 */ 1932 if (getenv("DPAA_TX_TAILDROP_THRESHOLD")) { 1933 td_tx_threshold = atoi(getenv("DPAA_TX_TAILDROP_THRESHOLD")); 1934 DPAA_PMD_DEBUG("Tail drop threshold env configured: %u", 1935 td_tx_threshold); 1936 /* if a very large value is being configured */ 1937 if (td_tx_threshold > UINT16_MAX) 1938 td_tx_threshold = CGR_RX_PERFQ_THRESH; 1939 } 1940 1941 /* If congestion control is enabled globally*/ 1942 if (num_rx_fqs > 0 && td_threshold) { 1943 dpaa_intf->cgr_rx = rte_zmalloc(NULL, 1944 sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE); 1945 if (!dpaa_intf->cgr_rx) { 1946 DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n"); 1947 ret = -ENOMEM; 1948 goto free_rx; 1949 } 1950 1951 ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0); 1952 if (ret != num_rx_fqs) { 1953 DPAA_PMD_WARN("insufficient CGRIDs available"); 1954 ret = -EINVAL; 1955 goto free_rx; 1956 } 1957 } else { 1958 dpaa_intf->cgr_rx = NULL; 1959 } 1960 1961 if (!fmc_q && !default_q) { 1962 ret = qman_alloc_fqid_range(dev_rx_fqids, num_rx_fqs, 1963 num_rx_fqs, 0); 1964 if (ret < 0) { 1965 DPAA_PMD_ERR("Failed to alloc rx fqid's\n"); 1966 goto free_rx; 1967 } 1968 } 1969 1970 for (loop = 0; loop < num_rx_fqs; loop++) { 1971 if (default_q) 1972 fqid = cfg->rx_def; 1973 else 1974 fqid = dev_rx_fqids[loop]; 1975 1976 vsp_id = dev_vspids[loop]; 1977 1978 if (dpaa_intf->cgr_rx) 1979 dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop]; 1980 1981 ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], 1982 dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL, 1983 fqid); 1984 if (ret) 1985 goto free_rx; 1986 dpaa_intf->rx_queues[loop].vsp_id = vsp_id; 1987 dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf; 1988 } 1989 dpaa_intf->nb_rx_queues = num_rx_fqs; 1990 1991 /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */ 1992 dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) * 1993 MAX_DPAA_CORES, MAX_CACHELINE); 1994 if (!dpaa_intf->tx_queues) { 1995 DPAA_PMD_ERR("Failed to alloc mem for TX queues\n"); 1996 ret = -ENOMEM; 1997 goto free_rx; 1998 } 1999 2000 /* If congestion control is enabled globally*/ 2001 if (td_tx_threshold) { 2002 dpaa_intf->cgr_tx = rte_zmalloc(NULL, 2003 sizeof(struct qman_cgr) * MAX_DPAA_CORES, 2004 MAX_CACHELINE); 2005 if (!dpaa_intf->cgr_tx) { 2006 DPAA_PMD_ERR("Failed to alloc mem for cgr_tx\n"); 2007 ret = -ENOMEM; 2008 goto free_rx; 2009 } 2010 2011 ret = qman_alloc_cgrid_range(&cgrid_tx[0], MAX_DPAA_CORES, 2012 1, 0); 2013 if (ret != MAX_DPAA_CORES) { 2014 DPAA_PMD_WARN("insufficient CGRIDs available"); 2015 ret = -EINVAL; 2016 goto free_rx; 2017 } 2018 } else { 2019 dpaa_intf->cgr_tx = NULL; 2020 } 2021 2022 2023 for (loop = 0; loop < MAX_DPAA_CORES; loop++) { 2024 if (dpaa_intf->cgr_tx) 2025 dpaa_intf->cgr_tx[loop].cgrid = cgrid_tx[loop]; 2026 2027 ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop], 2028 fman_intf, 2029 dpaa_intf->cgr_tx ? &dpaa_intf->cgr_tx[loop] : NULL); 2030 if (ret) 2031 goto free_tx; 2032 dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf; 2033 } 2034 dpaa_intf->nb_tx_queues = MAX_DPAA_CORES; 2035 2036 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 2037 ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues 2038 [DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err); 2039 if (ret) { 2040 DPAA_PMD_ERR("DPAA RX ERROR queue init failed!"); 2041 goto free_tx; 2042 } 2043 dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf; 2044 ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues 2045 [DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err); 2046 if (ret) { 2047 DPAA_PMD_ERR("DPAA TX ERROR queue init failed!"); 2048 goto free_tx; 2049 } 2050 dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf; 2051 #endif 2052 2053 DPAA_PMD_DEBUG("All frame queues created"); 2054 2055 /* Get the initial configuration for flow control */ 2056 dpaa_fc_set_default(dpaa_intf, fman_intf); 2057 2058 /* reset bpool list, initialize bpool dynamically */ 2059 list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) { 2060 list_del(&bp->node); 2061 rte_free(bp); 2062 } 2063 2064 /* Populate ethdev structure */ 2065 eth_dev->dev_ops = &dpaa_devops; 2066 eth_dev->rx_queue_count = dpaa_dev_rx_queue_count; 2067 eth_dev->rx_pkt_burst = dpaa_eth_queue_rx; 2068 eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all; 2069 2070 /* Allocate memory for storing MAC addresses */ 2071 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", 2072 RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0); 2073 if (eth_dev->data->mac_addrs == NULL) { 2074 DPAA_PMD_ERR("Failed to allocate %d bytes needed to " 2075 "store MAC addresses", 2076 RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER); 2077 ret = -ENOMEM; 2078 goto free_tx; 2079 } 2080 2081 /* copy the primary mac address */ 2082 rte_ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]); 2083 2084 RTE_LOG(INFO, PMD, "net: dpaa: %s: " RTE_ETHER_ADDR_PRT_FMT "\n", 2085 dpaa_device->name, RTE_ETHER_ADDR_BYTES(&fman_intf->mac_addr)); 2086 2087 if (!fman_intf->is_shared_mac) { 2088 /* Configure error packet handling */ 2089 fman_if_receive_rx_errors(fman_intf, 2090 FM_FD_RX_STATUS_ERR_MASK); 2091 /* Disable RX mode */ 2092 fman_if_disable_rx(fman_intf); 2093 /* Disable promiscuous mode */ 2094 fman_if_promiscuous_disable(fman_intf); 2095 /* Disable multicast */ 2096 fman_if_reset_mcast_filter_table(fman_intf); 2097 /* Reset interface statistics */ 2098 fman_if_stats_reset(fman_intf); 2099 /* Disable SG by default */ 2100 fman_if_set_sg(fman_intf, 0); 2101 fman_if_set_maxfrm(fman_intf, 2102 RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE); 2103 } 2104 2105 return 0; 2106 2107 free_tx: 2108 rte_free(dpaa_intf->tx_queues); 2109 dpaa_intf->tx_queues = NULL; 2110 dpaa_intf->nb_tx_queues = 0; 2111 2112 free_rx: 2113 rte_free(dpaa_intf->cgr_rx); 2114 rte_free(dpaa_intf->cgr_tx); 2115 rte_free(dpaa_intf->rx_queues); 2116 dpaa_intf->rx_queues = NULL; 2117 dpaa_intf->nb_rx_queues = 0; 2118 return ret; 2119 } 2120 2121 static int 2122 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, 2123 struct rte_dpaa_device *dpaa_dev) 2124 { 2125 int diag; 2126 int ret; 2127 struct rte_eth_dev *eth_dev; 2128 2129 PMD_INIT_FUNC_TRACE(); 2130 2131 if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) > 2132 RTE_PKTMBUF_HEADROOM) { 2133 DPAA_PMD_ERR( 2134 "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)", 2135 RTE_PKTMBUF_HEADROOM, 2136 DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE); 2137 2138 return -1; 2139 } 2140 2141 /* In case of secondary process, the device is already configured 2142 * and no further action is required, except portal initialization 2143 * and verifying secondary attachment to port name. 2144 */ 2145 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2146 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name); 2147 if (!eth_dev) 2148 return -ENOMEM; 2149 eth_dev->device = &dpaa_dev->device; 2150 eth_dev->dev_ops = &dpaa_devops; 2151 2152 ret = dpaa_dev_init_secondary(eth_dev); 2153 if (ret != 0) { 2154 RTE_LOG(ERR, PMD, "secondary dev init failed\n"); 2155 return ret; 2156 } 2157 2158 rte_eth_dev_probing_finish(eth_dev); 2159 return 0; 2160 } 2161 2162 if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) { 2163 if (access("/tmp/fmc.bin", F_OK) == -1) { 2164 DPAA_PMD_INFO("* FMC not configured.Enabling default mode"); 2165 default_q = 1; 2166 } 2167 2168 if (!(default_q || fmc_q)) { 2169 if (dpaa_fm_init()) { 2170 DPAA_PMD_ERR("FM init failed\n"); 2171 return -1; 2172 } 2173 } 2174 2175 /* disabling the default push mode for LS1043 */ 2176 if (dpaa_svr_family == SVR_LS1043A_FAMILY) 2177 dpaa_push_mode_max_queue = 0; 2178 2179 /* if push mode queues to be enabled. Currenly we are allowing 2180 * only one queue per thread. 2181 */ 2182 if (getenv("DPAA_PUSH_QUEUES_NUMBER")) { 2183 dpaa_push_mode_max_queue = 2184 atoi(getenv("DPAA_PUSH_QUEUES_NUMBER")); 2185 if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE) 2186 dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; 2187 } 2188 2189 is_global_init = 1; 2190 } 2191 2192 if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 2193 ret = rte_dpaa_portal_init((void *)1); 2194 if (ret) { 2195 DPAA_PMD_ERR("Unable to initialize portal"); 2196 return ret; 2197 } 2198 } 2199 2200 eth_dev = rte_eth_dev_allocate(dpaa_dev->name); 2201 if (!eth_dev) 2202 return -ENOMEM; 2203 2204 eth_dev->data->dev_private = 2205 rte_zmalloc("ethdev private structure", 2206 sizeof(struct dpaa_if), 2207 RTE_CACHE_LINE_SIZE); 2208 if (!eth_dev->data->dev_private) { 2209 DPAA_PMD_ERR("Cannot allocate memzone for port data"); 2210 rte_eth_dev_release_port(eth_dev); 2211 return -ENOMEM; 2212 } 2213 2214 eth_dev->device = &dpaa_dev->device; 2215 dpaa_dev->eth_dev = eth_dev; 2216 2217 qman_ern_register_cb(dpaa_free_mbuf); 2218 2219 if (dpaa_drv->drv_flags & RTE_DPAA_DRV_INTR_LSC) 2220 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 2221 2222 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 2223 2224 /* Invoke PMD device initialization function */ 2225 diag = dpaa_dev_init(eth_dev); 2226 if (diag == 0) { 2227 rte_eth_dev_probing_finish(eth_dev); 2228 return 0; 2229 } 2230 2231 rte_eth_dev_release_port(eth_dev); 2232 return diag; 2233 } 2234 2235 static int 2236 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev) 2237 { 2238 struct rte_eth_dev *eth_dev; 2239 int ret; 2240 2241 PMD_INIT_FUNC_TRACE(); 2242 2243 eth_dev = dpaa_dev->eth_dev; 2244 dpaa_eth_dev_close(eth_dev); 2245 ret = rte_eth_dev_release_port(eth_dev); 2246 2247 return ret; 2248 } 2249 2250 static void __attribute__((destructor(102))) dpaa_finish(void) 2251 { 2252 /* For secondary, primary will do all the cleanup */ 2253 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2254 return; 2255 2256 if (!(default_q || fmc_q)) { 2257 unsigned int i; 2258 2259 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 2260 if (rte_eth_devices[i].dev_ops == &dpaa_devops) { 2261 struct rte_eth_dev *dev = &rte_eth_devices[i]; 2262 struct dpaa_if *dpaa_intf = 2263 dev->data->dev_private; 2264 struct fman_if *fif = 2265 dev->process_private; 2266 if (dpaa_intf->port_handle) 2267 if (dpaa_fm_deconfig(dpaa_intf, fif)) 2268 DPAA_PMD_WARN("DPAA FM " 2269 "deconfig failed\n"); 2270 if (fif->num_profiles) { 2271 if (dpaa_port_vsp_cleanup(dpaa_intf, 2272 fif)) 2273 DPAA_PMD_WARN("DPAA FM vsp cleanup failed\n"); 2274 } 2275 } 2276 } 2277 if (is_global_init) 2278 if (dpaa_fm_term()) 2279 DPAA_PMD_WARN("DPAA FM term failed\n"); 2280 2281 is_global_init = 0; 2282 2283 DPAA_PMD_INFO("DPAA fman cleaned up"); 2284 } 2285 } 2286 2287 static struct rte_dpaa_driver rte_dpaa_pmd = { 2288 .drv_flags = RTE_DPAA_DRV_INTR_LSC, 2289 .drv_type = FSL_DPAA_ETH, 2290 .probe = rte_dpaa_probe, 2291 .remove = rte_dpaa_remove, 2292 }; 2293 2294 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd); 2295 RTE_LOG_REGISTER_DEFAULT(dpaa_logtype_pmd, NOTICE); 2296