1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2017-2019 NXP 5 * 6 */ 7 /* System headers */ 8 #include <stdio.h> 9 #include <inttypes.h> 10 #include <unistd.h> 11 #include <limits.h> 12 #include <sched.h> 13 #include <signal.h> 14 #include <pthread.h> 15 #include <sys/types.h> 16 #include <sys/syscall.h> 17 18 #include <rte_string_fns.h> 19 #include <rte_byteorder.h> 20 #include <rte_common.h> 21 #include <rte_interrupts.h> 22 #include <rte_log.h> 23 #include <rte_debug.h> 24 #include <rte_pci.h> 25 #include <rte_atomic.h> 26 #include <rte_branch_prediction.h> 27 #include <rte_memory.h> 28 #include <rte_tailq.h> 29 #include <rte_eal.h> 30 #include <rte_alarm.h> 31 #include <rte_ether.h> 32 #include <rte_ethdev_driver.h> 33 #include <rte_malloc.h> 34 #include <rte_ring.h> 35 36 #include <rte_dpaa_bus.h> 37 #include <rte_dpaa_logs.h> 38 #include <dpaa_mempool.h> 39 40 #include <dpaa_ethdev.h> 41 #include <dpaa_rxtx.h> 42 #include <rte_pmd_dpaa.h> 43 44 #include <fsl_usd.h> 45 #include <fsl_qman.h> 46 #include <fsl_bman.h> 47 #include <fsl_fman.h> 48 49 /* Supported Rx offloads */ 50 static uint64_t dev_rx_offloads_sup = 51 DEV_RX_OFFLOAD_JUMBO_FRAME | 52 DEV_RX_OFFLOAD_SCATTER; 53 54 /* Rx offloads which cannot be disabled */ 55 static uint64_t dev_rx_offloads_nodis = 56 DEV_RX_OFFLOAD_IPV4_CKSUM | 57 DEV_RX_OFFLOAD_UDP_CKSUM | 58 DEV_RX_OFFLOAD_TCP_CKSUM | 59 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 60 DEV_RX_OFFLOAD_RSS_HASH; 61 62 /* Supported Tx offloads */ 63 static uint64_t dev_tx_offloads_sup = 64 DEV_TX_OFFLOAD_MT_LOCKFREE | 65 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 66 67 /* Tx offloads which cannot be disabled */ 68 static uint64_t dev_tx_offloads_nodis = 69 DEV_TX_OFFLOAD_IPV4_CKSUM | 70 DEV_TX_OFFLOAD_UDP_CKSUM | 71 DEV_TX_OFFLOAD_TCP_CKSUM | 72 DEV_TX_OFFLOAD_SCTP_CKSUM | 73 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 74 DEV_TX_OFFLOAD_MULTI_SEGS; 75 76 /* Keep track of whether QMAN and BMAN have been globally initialized */ 77 static int is_global_init; 78 static int default_q; /* use default queue - FMC is not executed*/ 79 /* At present we only allow up to 4 push mode queues as default - as each of 80 * this queue need dedicated portal and we are short of portals. 81 */ 82 #define DPAA_MAX_PUSH_MODE_QUEUE 8 83 #define DPAA_DEFAULT_PUSH_MODE_QUEUE 4 84 85 static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE; 86 static int dpaa_push_queue_idx; /* Queue index which are in push mode*/ 87 88 89 /* Per FQ Taildrop in frame count */ 90 static unsigned int td_threshold = CGR_RX_PERFQ_THRESH; 91 92 struct rte_dpaa_xstats_name_off { 93 char name[RTE_ETH_XSTATS_NAME_SIZE]; 94 uint32_t offset; 95 }; 96 97 static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = { 98 {"rx_align_err", 99 offsetof(struct dpaa_if_stats, raln)}, 100 {"rx_valid_pause", 101 offsetof(struct dpaa_if_stats, rxpf)}, 102 {"rx_fcs_err", 103 offsetof(struct dpaa_if_stats, rfcs)}, 104 {"rx_vlan_frame", 105 offsetof(struct dpaa_if_stats, rvlan)}, 106 {"rx_frame_err", 107 offsetof(struct dpaa_if_stats, rerr)}, 108 {"rx_drop_err", 109 offsetof(struct dpaa_if_stats, rdrp)}, 110 {"rx_undersized", 111 offsetof(struct dpaa_if_stats, rund)}, 112 {"rx_oversize_err", 113 offsetof(struct dpaa_if_stats, rovr)}, 114 {"rx_fragment_pkt", 115 offsetof(struct dpaa_if_stats, rfrg)}, 116 {"tx_valid_pause", 117 offsetof(struct dpaa_if_stats, txpf)}, 118 {"tx_fcs_err", 119 offsetof(struct dpaa_if_stats, terr)}, 120 {"tx_vlan_frame", 121 offsetof(struct dpaa_if_stats, tvlan)}, 122 {"rx_undersized", 123 offsetof(struct dpaa_if_stats, tund)}, 124 }; 125 126 static struct rte_dpaa_driver rte_dpaa_pmd; 127 128 static int 129 dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); 130 131 static inline void 132 dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts) 133 { 134 memset(opts, 0, sizeof(struct qm_mcc_initfq)); 135 opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; 136 opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING | 137 QM_FQCTRL_PREFERINCACHE; 138 opts->fqd.context_a.stashing.exclusive = 0; 139 if (dpaa_svr_family != SVR_LS1046A_FAMILY) 140 opts->fqd.context_a.stashing.annotation_cl = 141 DPAA_IF_RX_ANNOTATION_STASH; 142 opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; 143 opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH; 144 } 145 146 static int 147 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 148 { 149 struct dpaa_if *dpaa_intf = dev->data->dev_private; 150 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN 151 + VLAN_TAG_SIZE; 152 uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; 153 154 PMD_INIT_FUNC_TRACE(); 155 156 if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN) 157 return -EINVAL; 158 /* 159 * Refuse mtu that requires the support of scattered packets 160 * when this feature has not been enabled before. 161 */ 162 if (dev->data->min_rx_buf_size && 163 !dev->data->scattered_rx && frame_size > buffsz) { 164 DPAA_PMD_ERR("SG not enabled, will not fit in one buffer"); 165 return -EINVAL; 166 } 167 168 /* check <seg size> * <max_seg> >= max_frame */ 169 if (dev->data->min_rx_buf_size && dev->data->scattered_rx && 170 (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) { 171 DPAA_PMD_ERR("Too big to fit for Max SG list %d", 172 buffsz * DPAA_SGT_MAX_ENTRIES); 173 return -EINVAL; 174 } 175 176 if (frame_size > RTE_ETHER_MAX_LEN) 177 dev->data->dev_conf.rxmode.offloads |= 178 DEV_RX_OFFLOAD_JUMBO_FRAME; 179 else 180 dev->data->dev_conf.rxmode.offloads &= 181 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 182 183 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 184 185 fman_if_set_maxfrm(dpaa_intf->fif, frame_size); 186 187 return 0; 188 } 189 190 static int 191 dpaa_eth_dev_configure(struct rte_eth_dev *dev) 192 { 193 struct dpaa_if *dpaa_intf = dev->data->dev_private; 194 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 195 uint64_t rx_offloads = eth_conf->rxmode.offloads; 196 uint64_t tx_offloads = eth_conf->txmode.offloads; 197 198 PMD_INIT_FUNC_TRACE(); 199 200 /* Rx offloads which are enabled by default */ 201 if (dev_rx_offloads_nodis & ~rx_offloads) { 202 DPAA_PMD_INFO( 203 "Some of rx offloads enabled by default - requested 0x%" PRIx64 204 " fixed are 0x%" PRIx64, 205 rx_offloads, dev_rx_offloads_nodis); 206 } 207 208 /* Tx offloads which are enabled by default */ 209 if (dev_tx_offloads_nodis & ~tx_offloads) { 210 DPAA_PMD_INFO( 211 "Some of tx offloads enabled by default - requested 0x%" PRIx64 212 " fixed are 0x%" PRIx64, 213 tx_offloads, dev_tx_offloads_nodis); 214 } 215 216 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 217 uint32_t max_len; 218 219 DPAA_PMD_DEBUG("enabling jumbo"); 220 221 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= 222 DPAA_MAX_RX_PKT_LEN) 223 max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; 224 else { 225 DPAA_PMD_INFO("enabling jumbo override conf max len=%d " 226 "supported is %d", 227 dev->data->dev_conf.rxmode.max_rx_pkt_len, 228 DPAA_MAX_RX_PKT_LEN); 229 max_len = DPAA_MAX_RX_PKT_LEN; 230 } 231 232 fman_if_set_maxfrm(dpaa_intf->fif, max_len); 233 dev->data->mtu = max_len 234 - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE; 235 } 236 237 if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) { 238 DPAA_PMD_DEBUG("enabling scatter mode"); 239 fman_if_set_sg(dpaa_intf->fif, 1); 240 dev->data->scattered_rx = 1; 241 } 242 243 return 0; 244 } 245 246 static const uint32_t * 247 dpaa_supported_ptypes_get(struct rte_eth_dev *dev) 248 { 249 static const uint32_t ptypes[] = { 250 RTE_PTYPE_L2_ETHER, 251 RTE_PTYPE_L2_ETHER_VLAN, 252 RTE_PTYPE_L2_ETHER_ARP, 253 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 254 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 255 RTE_PTYPE_L4_ICMP, 256 RTE_PTYPE_L4_TCP, 257 RTE_PTYPE_L4_UDP, 258 RTE_PTYPE_L4_FRAG, 259 RTE_PTYPE_L4_TCP, 260 RTE_PTYPE_L4_UDP, 261 RTE_PTYPE_L4_SCTP 262 }; 263 264 PMD_INIT_FUNC_TRACE(); 265 266 if (dev->rx_pkt_burst == dpaa_eth_queue_rx) 267 return ptypes; 268 return NULL; 269 } 270 271 static int dpaa_eth_dev_start(struct rte_eth_dev *dev) 272 { 273 struct dpaa_if *dpaa_intf = dev->data->dev_private; 274 275 PMD_INIT_FUNC_TRACE(); 276 277 /* Change tx callback to the real one */ 278 dev->tx_pkt_burst = dpaa_eth_queue_tx; 279 fman_if_enable_rx(dpaa_intf->fif); 280 281 return 0; 282 } 283 284 static void dpaa_eth_dev_stop(struct rte_eth_dev *dev) 285 { 286 struct dpaa_if *dpaa_intf = dev->data->dev_private; 287 288 PMD_INIT_FUNC_TRACE(); 289 290 fman_if_disable_rx(dpaa_intf->fif); 291 dev->tx_pkt_burst = dpaa_eth_tx_drop_all; 292 } 293 294 static void dpaa_eth_dev_close(struct rte_eth_dev *dev) 295 { 296 PMD_INIT_FUNC_TRACE(); 297 298 dpaa_eth_dev_stop(dev); 299 } 300 301 static int 302 dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused, 303 char *fw_version, 304 size_t fw_size) 305 { 306 int ret; 307 FILE *svr_file = NULL; 308 unsigned int svr_ver = 0; 309 310 PMD_INIT_FUNC_TRACE(); 311 312 svr_file = fopen(DPAA_SOC_ID_FILE, "r"); 313 if (!svr_file) { 314 DPAA_PMD_ERR("Unable to open SoC device"); 315 return -ENOTSUP; /* Not supported on this infra */ 316 } 317 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0) 318 dpaa_svr_family = svr_ver & SVR_MASK; 319 else 320 DPAA_PMD_ERR("Unable to read SoC device"); 321 322 fclose(svr_file); 323 324 ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x", 325 svr_ver, fman_ip_rev); 326 ret += 1; /* add the size of '\0' */ 327 328 if (fw_size < (uint32_t)ret) 329 return ret; 330 else 331 return 0; 332 } 333 334 static int dpaa_eth_dev_info(struct rte_eth_dev *dev, 335 struct rte_eth_dev_info *dev_info) 336 { 337 struct dpaa_if *dpaa_intf = dev->data->dev_private; 338 339 DPAA_PMD_DEBUG(": %s", dpaa_intf->name); 340 341 dev_info->max_rx_queues = dpaa_intf->nb_rx_queues; 342 dev_info->max_tx_queues = dpaa_intf->nb_tx_queues; 343 dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN; 344 dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER; 345 dev_info->max_hash_mac_addrs = 0; 346 dev_info->max_vfs = 0; 347 dev_info->max_vmdq_pools = ETH_16_POOLS; 348 dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL; 349 350 if (dpaa_intf->fif->mac_type == fman_mac_1g) { 351 dev_info->speed_capa = ETH_LINK_SPEED_1G; 352 } else if (dpaa_intf->fif->mac_type == fman_mac_10g) { 353 dev_info->speed_capa = (ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G); 354 } else { 355 DPAA_PMD_ERR("invalid link_speed: %s, %d", 356 dpaa_intf->name, dpaa_intf->fif->mac_type); 357 return -EINVAL; 358 } 359 360 dev_info->rx_offload_capa = dev_rx_offloads_sup | 361 dev_rx_offloads_nodis; 362 dev_info->tx_offload_capa = dev_tx_offloads_sup | 363 dev_tx_offloads_nodis; 364 dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE; 365 dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE; 366 dev_info->default_rxportconf.nb_queues = 1; 367 dev_info->default_txportconf.nb_queues = 1; 368 dev_info->default_txportconf.ring_size = CGR_TX_CGR_THRESH; 369 dev_info->default_rxportconf.ring_size = CGR_RX_PERFQ_THRESH; 370 371 return 0; 372 } 373 374 static int dpaa_eth_link_update(struct rte_eth_dev *dev, 375 int wait_to_complete __rte_unused) 376 { 377 struct dpaa_if *dpaa_intf = dev->data->dev_private; 378 struct rte_eth_link *link = &dev->data->dev_link; 379 380 PMD_INIT_FUNC_TRACE(); 381 382 if (dpaa_intf->fif->mac_type == fman_mac_1g) 383 link->link_speed = ETH_SPEED_NUM_1G; 384 else if (dpaa_intf->fif->mac_type == fman_mac_10g) 385 link->link_speed = ETH_SPEED_NUM_10G; 386 else 387 DPAA_PMD_ERR("invalid link_speed: %s, %d", 388 dpaa_intf->name, dpaa_intf->fif->mac_type); 389 390 link->link_status = dpaa_intf->valid; 391 link->link_duplex = ETH_LINK_FULL_DUPLEX; 392 link->link_autoneg = ETH_LINK_AUTONEG; 393 return 0; 394 } 395 396 static int dpaa_eth_stats_get(struct rte_eth_dev *dev, 397 struct rte_eth_stats *stats) 398 { 399 struct dpaa_if *dpaa_intf = dev->data->dev_private; 400 401 PMD_INIT_FUNC_TRACE(); 402 403 fman_if_stats_get(dpaa_intf->fif, stats); 404 return 0; 405 } 406 407 static int dpaa_eth_stats_reset(struct rte_eth_dev *dev) 408 { 409 struct dpaa_if *dpaa_intf = dev->data->dev_private; 410 411 PMD_INIT_FUNC_TRACE(); 412 413 fman_if_stats_reset(dpaa_intf->fif); 414 415 return 0; 416 } 417 418 static int 419 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 420 unsigned int n) 421 { 422 struct dpaa_if *dpaa_intf = dev->data->dev_private; 423 unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings); 424 uint64_t values[sizeof(struct dpaa_if_stats) / 8]; 425 426 if (n < num) 427 return num; 428 429 if (xstats == NULL) 430 return 0; 431 432 fman_if_stats_get_all(dpaa_intf->fif, values, 433 sizeof(struct dpaa_if_stats) / 8); 434 435 for (i = 0; i < num; i++) { 436 xstats[i].id = i; 437 xstats[i].value = values[dpaa_xstats_strings[i].offset / 8]; 438 } 439 return i; 440 } 441 442 static int 443 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 444 struct rte_eth_xstat_name *xstats_names, 445 unsigned int limit) 446 { 447 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 448 449 if (limit < stat_cnt) 450 return stat_cnt; 451 452 if (xstats_names != NULL) 453 for (i = 0; i < stat_cnt; i++) 454 strlcpy(xstats_names[i].name, 455 dpaa_xstats_strings[i].name, 456 sizeof(xstats_names[i].name)); 457 458 return stat_cnt; 459 } 460 461 static int 462 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 463 uint64_t *values, unsigned int n) 464 { 465 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 466 uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8]; 467 468 if (!ids) { 469 struct dpaa_if *dpaa_intf = dev->data->dev_private; 470 471 if (n < stat_cnt) 472 return stat_cnt; 473 474 if (!values) 475 return 0; 476 477 fman_if_stats_get_all(dpaa_intf->fif, values_copy, 478 sizeof(struct dpaa_if_stats) / 8); 479 480 for (i = 0; i < stat_cnt; i++) 481 values[i] = 482 values_copy[dpaa_xstats_strings[i].offset / 8]; 483 484 return stat_cnt; 485 } 486 487 dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 488 489 for (i = 0; i < n; i++) { 490 if (ids[i] >= stat_cnt) { 491 DPAA_PMD_ERR("id value isn't valid"); 492 return -1; 493 } 494 values[i] = values_copy[ids[i]]; 495 } 496 return n; 497 } 498 499 static int 500 dpaa_xstats_get_names_by_id( 501 struct rte_eth_dev *dev, 502 struct rte_eth_xstat_name *xstats_names, 503 const uint64_t *ids, 504 unsigned int limit) 505 { 506 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 507 struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 508 509 if (!ids) 510 return dpaa_xstats_get_names(dev, xstats_names, limit); 511 512 dpaa_xstats_get_names(dev, xstats_names_copy, limit); 513 514 for (i = 0; i < limit; i++) { 515 if (ids[i] >= stat_cnt) { 516 DPAA_PMD_ERR("id value isn't valid"); 517 return -1; 518 } 519 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 520 } 521 return limit; 522 } 523 524 static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev) 525 { 526 struct dpaa_if *dpaa_intf = dev->data->dev_private; 527 528 PMD_INIT_FUNC_TRACE(); 529 530 fman_if_promiscuous_enable(dpaa_intf->fif); 531 532 return 0; 533 } 534 535 static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev) 536 { 537 struct dpaa_if *dpaa_intf = dev->data->dev_private; 538 539 PMD_INIT_FUNC_TRACE(); 540 541 fman_if_promiscuous_disable(dpaa_intf->fif); 542 543 return 0; 544 } 545 546 static int dpaa_eth_multicast_enable(struct rte_eth_dev *dev) 547 { 548 struct dpaa_if *dpaa_intf = dev->data->dev_private; 549 550 PMD_INIT_FUNC_TRACE(); 551 552 fman_if_set_mcast_filter_table(dpaa_intf->fif); 553 554 return 0; 555 } 556 557 static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev) 558 { 559 struct dpaa_if *dpaa_intf = dev->data->dev_private; 560 561 PMD_INIT_FUNC_TRACE(); 562 563 fman_if_reset_mcast_filter_table(dpaa_intf->fif); 564 565 return 0; 566 } 567 568 static 569 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 570 uint16_t nb_desc, 571 unsigned int socket_id __rte_unused, 572 const struct rte_eth_rxconf *rx_conf __rte_unused, 573 struct rte_mempool *mp) 574 { 575 struct dpaa_if *dpaa_intf = dev->data->dev_private; 576 struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx]; 577 struct qm_mcc_initfq opts = {0}; 578 u32 flags = 0; 579 int ret; 580 u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; 581 582 PMD_INIT_FUNC_TRACE(); 583 584 if (queue_idx >= dev->data->nb_rx_queues) { 585 rte_errno = EOVERFLOW; 586 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", 587 (void *)dev, queue_idx, dev->data->nb_rx_queues); 588 return -rte_errno; 589 } 590 591 DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)", 592 queue_idx, rxq->fqid); 593 594 /* Max packet can fit in single buffer */ 595 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) { 596 ; 597 } else if (dev->data->dev_conf.rxmode.offloads & 598 DEV_RX_OFFLOAD_SCATTER) { 599 if (dev->data->dev_conf.rxmode.max_rx_pkt_len > 600 buffsz * DPAA_SGT_MAX_ENTRIES) { 601 DPAA_PMD_ERR("max RxPkt size %d too big to fit " 602 "MaxSGlist %d", 603 dev->data->dev_conf.rxmode.max_rx_pkt_len, 604 buffsz * DPAA_SGT_MAX_ENTRIES); 605 rte_errno = EOVERFLOW; 606 return -rte_errno; 607 } 608 } else { 609 DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is" 610 " larger than a single mbuf (%u) and scattered" 611 " mode has not been requested", 612 dev->data->dev_conf.rxmode.max_rx_pkt_len, 613 buffsz - RTE_PKTMBUF_HEADROOM); 614 } 615 616 if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) { 617 struct fman_if_ic_params icp; 618 uint32_t fd_offset; 619 uint32_t bp_size; 620 621 if (!mp->pool_data) { 622 DPAA_PMD_ERR("Not an offloaded buffer pool!"); 623 return -1; 624 } 625 dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 626 627 memset(&icp, 0, sizeof(icp)); 628 /* set ICEOF for to the default value , which is 0*/ 629 icp.iciof = DEFAULT_ICIOF; 630 icp.iceof = DEFAULT_RX_ICEOF; 631 icp.icsz = DEFAULT_ICSZ; 632 fman_if_set_ic_params(dpaa_intf->fif, &icp); 633 634 fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE; 635 fman_if_set_fdoff(dpaa_intf->fif, fd_offset); 636 637 /* Buffer pool size should be equal to Dataroom Size*/ 638 bp_size = rte_pktmbuf_data_room_size(mp); 639 fman_if_set_bp(dpaa_intf->fif, mp->size, 640 dpaa_intf->bp_info->bpid, bp_size); 641 dpaa_intf->valid = 1; 642 DPAA_PMD_DEBUG("if:%s fd_offset = %d offset = %d", 643 dpaa_intf->name, fd_offset, 644 fman_if_get_fdoff(dpaa_intf->fif)); 645 } 646 DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name, 647 fman_if_get_sg_enable(dpaa_intf->fif), 648 dev->data->dev_conf.rxmode.max_rx_pkt_len); 649 /* checking if push mode only, no error check for now */ 650 if (!rxq->is_static && 651 dpaa_push_mode_max_queue > dpaa_push_queue_idx) { 652 struct qman_portal *qp; 653 int q_fd; 654 655 dpaa_push_queue_idx++; 656 opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; 657 opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | 658 QM_FQCTRL_CTXASTASHING | 659 QM_FQCTRL_PREFERINCACHE; 660 opts.fqd.context_a.stashing.exclusive = 0; 661 /* In muticore scenario stashing becomes a bottleneck on LS1046. 662 * So do not enable stashing in this case 663 */ 664 if (dpaa_svr_family != SVR_LS1046A_FAMILY) 665 opts.fqd.context_a.stashing.annotation_cl = 666 DPAA_IF_RX_ANNOTATION_STASH; 667 opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; 668 opts.fqd.context_a.stashing.context_cl = 669 DPAA_IF_RX_CONTEXT_STASH; 670 671 /*Create a channel and associate given queue with the channel*/ 672 qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0); 673 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; 674 opts.fqd.dest.channel = rxq->ch_id; 675 opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY; 676 flags = QMAN_INITFQ_FLAG_SCHED; 677 678 /* Configure tail drop */ 679 if (dpaa_intf->cgr_rx) { 680 opts.we_mask |= QM_INITFQ_WE_CGID; 681 opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid; 682 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 683 } 684 ret = qman_init_fq(rxq, flags, &opts); 685 if (ret) { 686 DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x " 687 "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); 688 return ret; 689 } 690 if (dpaa_svr_family == SVR_LS1043A_FAMILY) { 691 rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch; 692 } else { 693 rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb; 694 rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare; 695 } 696 697 rxq->is_static = true; 698 699 /* Allocate qman specific portals */ 700 qp = fsl_qman_fq_portal_create(&q_fd); 701 if (!qp) { 702 DPAA_PMD_ERR("Unable to alloc fq portal"); 703 return -1; 704 } 705 rxq->qp = qp; 706 707 /* Set up the device interrupt handler */ 708 if (!dev->intr_handle) { 709 struct rte_dpaa_device *dpaa_dev; 710 struct rte_device *rdev = dev->device; 711 712 dpaa_dev = container_of(rdev, struct rte_dpaa_device, 713 device); 714 dev->intr_handle = &dpaa_dev->intr_handle; 715 dev->intr_handle->intr_vec = rte_zmalloc(NULL, 716 dpaa_push_mode_max_queue, 0); 717 if (!dev->intr_handle->intr_vec) { 718 DPAA_PMD_ERR("intr_vec alloc failed"); 719 return -ENOMEM; 720 } 721 dev->intr_handle->nb_efd = dpaa_push_mode_max_queue; 722 dev->intr_handle->max_intr = dpaa_push_mode_max_queue; 723 } 724 725 dev->intr_handle->type = RTE_INTR_HANDLE_EXT; 726 dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1; 727 dev->intr_handle->efds[queue_idx] = q_fd; 728 rxq->q_fd = q_fd; 729 } 730 rxq->bp_array = rte_dpaa_bpid_info; 731 dev->data->rx_queues[queue_idx] = rxq; 732 733 /* configure the CGR size as per the desc size */ 734 if (dpaa_intf->cgr_rx) { 735 struct qm_mcc_initcgr cgr_opts = {0}; 736 737 /* Enable tail drop with cgr on this queue */ 738 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0); 739 ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts); 740 if (ret) { 741 DPAA_PMD_WARN( 742 "rx taildrop modify fail on fqid %d (ret=%d)", 743 rxq->fqid, ret); 744 } 745 } 746 747 return 0; 748 } 749 750 int 751 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, 752 int eth_rx_queue_id, 753 u16 ch_id, 754 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 755 { 756 int ret; 757 u32 flags = 0; 758 struct dpaa_if *dpaa_intf = dev->data->dev_private; 759 struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id]; 760 struct qm_mcc_initfq opts = {0}; 761 762 if (dpaa_push_mode_max_queue) 763 DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n" 764 "PUSH mode already enabled for first %d queues.\n" 765 "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n", 766 dpaa_push_mode_max_queue); 767 768 dpaa_poll_queue_default_config(&opts); 769 770 switch (queue_conf->ev.sched_type) { 771 case RTE_SCHED_TYPE_ATOMIC: 772 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; 773 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary 774 * configuration with HOLD_ACTIVE setting 775 */ 776 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK); 777 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic; 778 break; 779 case RTE_SCHED_TYPE_ORDERED: 780 DPAA_PMD_ERR("Ordered queue schedule type is not supported\n"); 781 return -1; 782 default: 783 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK; 784 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel; 785 break; 786 } 787 788 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; 789 opts.fqd.dest.channel = ch_id; 790 opts.fqd.dest.wq = queue_conf->ev.priority; 791 792 if (dpaa_intf->cgr_rx) { 793 opts.we_mask |= QM_INITFQ_WE_CGID; 794 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid; 795 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 796 } 797 798 flags = QMAN_INITFQ_FLAG_SCHED; 799 800 ret = qman_init_fq(rxq, flags, &opts); 801 if (ret) { 802 DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x " 803 "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); 804 return ret; 805 } 806 807 /* copy configuration which needs to be filled during dequeue */ 808 memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event)); 809 dev->data->rx_queues[eth_rx_queue_id] = rxq; 810 811 return ret; 812 } 813 814 int 815 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, 816 int eth_rx_queue_id) 817 { 818 struct qm_mcc_initfq opts; 819 int ret; 820 u32 flags = 0; 821 struct dpaa_if *dpaa_intf = dev->data->dev_private; 822 struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id]; 823 824 dpaa_poll_queue_default_config(&opts); 825 826 if (dpaa_intf->cgr_rx) { 827 opts.we_mask |= QM_INITFQ_WE_CGID; 828 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid; 829 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 830 } 831 832 ret = qman_init_fq(rxq, flags, &opts); 833 if (ret) { 834 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", 835 rxq->fqid, ret); 836 } 837 838 rxq->cb.dqrr_dpdk_cb = NULL; 839 dev->data->rx_queues[eth_rx_queue_id] = NULL; 840 841 return 0; 842 } 843 844 static 845 void dpaa_eth_rx_queue_release(void *rxq __rte_unused) 846 { 847 PMD_INIT_FUNC_TRACE(); 848 } 849 850 static 851 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 852 uint16_t nb_desc __rte_unused, 853 unsigned int socket_id __rte_unused, 854 const struct rte_eth_txconf *tx_conf __rte_unused) 855 { 856 struct dpaa_if *dpaa_intf = dev->data->dev_private; 857 858 PMD_INIT_FUNC_TRACE(); 859 860 if (queue_idx >= dev->data->nb_tx_queues) { 861 rte_errno = EOVERFLOW; 862 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", 863 (void *)dev, queue_idx, dev->data->nb_tx_queues); 864 return -rte_errno; 865 } 866 867 DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)", 868 queue_idx, dpaa_intf->tx_queues[queue_idx].fqid); 869 dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx]; 870 return 0; 871 } 872 873 static void dpaa_eth_tx_queue_release(void *txq __rte_unused) 874 { 875 PMD_INIT_FUNC_TRACE(); 876 } 877 878 static uint32_t 879 dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 880 { 881 struct dpaa_if *dpaa_intf = dev->data->dev_private; 882 struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id]; 883 u32 frm_cnt = 0; 884 885 PMD_INIT_FUNC_TRACE(); 886 887 if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) { 888 DPAA_PMD_DEBUG("RX frame count for q(%d) is %u", 889 rx_queue_id, frm_cnt); 890 } 891 return frm_cnt; 892 } 893 894 static int dpaa_link_down(struct rte_eth_dev *dev) 895 { 896 PMD_INIT_FUNC_TRACE(); 897 898 dpaa_eth_dev_stop(dev); 899 return 0; 900 } 901 902 static int dpaa_link_up(struct rte_eth_dev *dev) 903 { 904 PMD_INIT_FUNC_TRACE(); 905 906 dpaa_eth_dev_start(dev); 907 return 0; 908 } 909 910 static int 911 dpaa_flow_ctrl_set(struct rte_eth_dev *dev, 912 struct rte_eth_fc_conf *fc_conf) 913 { 914 struct dpaa_if *dpaa_intf = dev->data->dev_private; 915 struct rte_eth_fc_conf *net_fc; 916 917 PMD_INIT_FUNC_TRACE(); 918 919 if (!(dpaa_intf->fc_conf)) { 920 dpaa_intf->fc_conf = rte_zmalloc(NULL, 921 sizeof(struct rte_eth_fc_conf), MAX_CACHELINE); 922 if (!dpaa_intf->fc_conf) { 923 DPAA_PMD_ERR("unable to save flow control info"); 924 return -ENOMEM; 925 } 926 } 927 net_fc = dpaa_intf->fc_conf; 928 929 if (fc_conf->high_water < fc_conf->low_water) { 930 DPAA_PMD_ERR("Incorrect Flow Control Configuration"); 931 return -EINVAL; 932 } 933 934 if (fc_conf->mode == RTE_FC_NONE) { 935 return 0; 936 } else if (fc_conf->mode == RTE_FC_TX_PAUSE || 937 fc_conf->mode == RTE_FC_FULL) { 938 fman_if_set_fc_threshold(dpaa_intf->fif, fc_conf->high_water, 939 fc_conf->low_water, 940 dpaa_intf->bp_info->bpid); 941 if (fc_conf->pause_time) 942 fman_if_set_fc_quanta(dpaa_intf->fif, 943 fc_conf->pause_time); 944 } 945 946 /* Save the information in dpaa device */ 947 net_fc->pause_time = fc_conf->pause_time; 948 net_fc->high_water = fc_conf->high_water; 949 net_fc->low_water = fc_conf->low_water; 950 net_fc->send_xon = fc_conf->send_xon; 951 net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd; 952 net_fc->mode = fc_conf->mode; 953 net_fc->autoneg = fc_conf->autoneg; 954 955 return 0; 956 } 957 958 static int 959 dpaa_flow_ctrl_get(struct rte_eth_dev *dev, 960 struct rte_eth_fc_conf *fc_conf) 961 { 962 struct dpaa_if *dpaa_intf = dev->data->dev_private; 963 struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf; 964 int ret; 965 966 PMD_INIT_FUNC_TRACE(); 967 968 if (net_fc) { 969 fc_conf->pause_time = net_fc->pause_time; 970 fc_conf->high_water = net_fc->high_water; 971 fc_conf->low_water = net_fc->low_water; 972 fc_conf->send_xon = net_fc->send_xon; 973 fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd; 974 fc_conf->mode = net_fc->mode; 975 fc_conf->autoneg = net_fc->autoneg; 976 return 0; 977 } 978 ret = fman_if_get_fc_threshold(dpaa_intf->fif); 979 if (ret) { 980 fc_conf->mode = RTE_FC_TX_PAUSE; 981 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif); 982 } else { 983 fc_conf->mode = RTE_FC_NONE; 984 } 985 986 return 0; 987 } 988 989 static int 990 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev, 991 struct rte_ether_addr *addr, 992 uint32_t index, 993 __rte_unused uint32_t pool) 994 { 995 int ret; 996 struct dpaa_if *dpaa_intf = dev->data->dev_private; 997 998 PMD_INIT_FUNC_TRACE(); 999 1000 ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, index); 1001 1002 if (ret) 1003 DPAA_PMD_ERR("Adding the MAC ADDR failed: err = %d", ret); 1004 return 0; 1005 } 1006 1007 static void 1008 dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev, 1009 uint32_t index) 1010 { 1011 struct dpaa_if *dpaa_intf = dev->data->dev_private; 1012 1013 PMD_INIT_FUNC_TRACE(); 1014 1015 fman_if_clear_mac_addr(dpaa_intf->fif, index); 1016 } 1017 1018 static int 1019 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, 1020 struct rte_ether_addr *addr) 1021 { 1022 int ret; 1023 struct dpaa_if *dpaa_intf = dev->data->dev_private; 1024 1025 PMD_INIT_FUNC_TRACE(); 1026 1027 ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0); 1028 if (ret) 1029 DPAA_PMD_ERR("Setting the MAC ADDR failed %d", ret); 1030 1031 return ret; 1032 } 1033 1034 static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev, 1035 uint16_t queue_id) 1036 { 1037 struct dpaa_if *dpaa_intf = dev->data->dev_private; 1038 struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id]; 1039 1040 if (!rxq->is_static) 1041 return -EINVAL; 1042 1043 return qman_fq_portal_irqsource_add(rxq->qp, QM_PIRQ_DQRI); 1044 } 1045 1046 static int dpaa_dev_queue_intr_disable(struct rte_eth_dev *dev, 1047 uint16_t queue_id) 1048 { 1049 struct dpaa_if *dpaa_intf = dev->data->dev_private; 1050 struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id]; 1051 uint32_t temp; 1052 ssize_t temp1; 1053 1054 if (!rxq->is_static) 1055 return -EINVAL; 1056 1057 qman_fq_portal_irqsource_remove(rxq->qp, ~0); 1058 1059 temp1 = read(rxq->q_fd, &temp, sizeof(temp)); 1060 if (temp1 != sizeof(temp)) 1061 DPAA_PMD_ERR("irq read error"); 1062 1063 qman_fq_portal_thread_irq(rxq->qp); 1064 1065 return 0; 1066 } 1067 1068 static struct eth_dev_ops dpaa_devops = { 1069 .dev_configure = dpaa_eth_dev_configure, 1070 .dev_start = dpaa_eth_dev_start, 1071 .dev_stop = dpaa_eth_dev_stop, 1072 .dev_close = dpaa_eth_dev_close, 1073 .dev_infos_get = dpaa_eth_dev_info, 1074 .dev_supported_ptypes_get = dpaa_supported_ptypes_get, 1075 1076 .rx_queue_setup = dpaa_eth_rx_queue_setup, 1077 .tx_queue_setup = dpaa_eth_tx_queue_setup, 1078 .rx_queue_release = dpaa_eth_rx_queue_release, 1079 .tx_queue_release = dpaa_eth_tx_queue_release, 1080 .rx_queue_count = dpaa_dev_rx_queue_count, 1081 1082 .flow_ctrl_get = dpaa_flow_ctrl_get, 1083 .flow_ctrl_set = dpaa_flow_ctrl_set, 1084 1085 .link_update = dpaa_eth_link_update, 1086 .stats_get = dpaa_eth_stats_get, 1087 .xstats_get = dpaa_dev_xstats_get, 1088 .xstats_get_by_id = dpaa_xstats_get_by_id, 1089 .xstats_get_names_by_id = dpaa_xstats_get_names_by_id, 1090 .xstats_get_names = dpaa_xstats_get_names, 1091 .xstats_reset = dpaa_eth_stats_reset, 1092 .stats_reset = dpaa_eth_stats_reset, 1093 .promiscuous_enable = dpaa_eth_promiscuous_enable, 1094 .promiscuous_disable = dpaa_eth_promiscuous_disable, 1095 .allmulticast_enable = dpaa_eth_multicast_enable, 1096 .allmulticast_disable = dpaa_eth_multicast_disable, 1097 .mtu_set = dpaa_mtu_set, 1098 .dev_set_link_down = dpaa_link_down, 1099 .dev_set_link_up = dpaa_link_up, 1100 .mac_addr_add = dpaa_dev_add_mac_addr, 1101 .mac_addr_remove = dpaa_dev_remove_mac_addr, 1102 .mac_addr_set = dpaa_dev_set_mac_addr, 1103 1104 .fw_version_get = dpaa_fw_version_get, 1105 1106 .rx_queue_intr_enable = dpaa_dev_queue_intr_enable, 1107 .rx_queue_intr_disable = dpaa_dev_queue_intr_disable, 1108 }; 1109 1110 static bool 1111 is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv) 1112 { 1113 if (strcmp(dev->device->driver->name, 1114 drv->driver.name)) 1115 return false; 1116 1117 return true; 1118 } 1119 1120 static bool 1121 is_dpaa_supported(struct rte_eth_dev *dev) 1122 { 1123 return is_device_supported(dev, &rte_dpaa_pmd); 1124 } 1125 1126 int 1127 rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on) 1128 { 1129 struct rte_eth_dev *dev; 1130 struct dpaa_if *dpaa_intf; 1131 1132 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); 1133 1134 dev = &rte_eth_devices[port]; 1135 1136 if (!is_dpaa_supported(dev)) 1137 return -ENOTSUP; 1138 1139 dpaa_intf = dev->data->dev_private; 1140 1141 if (on) 1142 fman_if_loopback_enable(dpaa_intf->fif); 1143 else 1144 fman_if_loopback_disable(dpaa_intf->fif); 1145 1146 return 0; 1147 } 1148 1149 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf) 1150 { 1151 struct rte_eth_fc_conf *fc_conf; 1152 int ret; 1153 1154 PMD_INIT_FUNC_TRACE(); 1155 1156 if (!(dpaa_intf->fc_conf)) { 1157 dpaa_intf->fc_conf = rte_zmalloc(NULL, 1158 sizeof(struct rte_eth_fc_conf), MAX_CACHELINE); 1159 if (!dpaa_intf->fc_conf) { 1160 DPAA_PMD_ERR("unable to save flow control info"); 1161 return -ENOMEM; 1162 } 1163 } 1164 fc_conf = dpaa_intf->fc_conf; 1165 ret = fman_if_get_fc_threshold(dpaa_intf->fif); 1166 if (ret) { 1167 fc_conf->mode = RTE_FC_TX_PAUSE; 1168 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif); 1169 } else { 1170 fc_conf->mode = RTE_FC_NONE; 1171 } 1172 1173 return 0; 1174 } 1175 1176 /* Initialise an Rx FQ */ 1177 static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, 1178 uint32_t fqid) 1179 { 1180 struct qm_mcc_initfq opts = {0}; 1181 int ret; 1182 u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE; 1183 struct qm_mcc_initcgr cgr_opts = { 1184 .we_mask = QM_CGR_WE_CS_THRES | 1185 QM_CGR_WE_CSTD_EN | 1186 QM_CGR_WE_MODE, 1187 .cgr = { 1188 .cstd_en = QM_CGR_EN, 1189 .mode = QMAN_CGR_MODE_FRAME 1190 } 1191 }; 1192 1193 if (fqid) { 1194 ret = qman_reserve_fqid(fqid); 1195 if (ret) { 1196 DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d", 1197 fqid, ret); 1198 return -EINVAL; 1199 } 1200 } else { 1201 flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; 1202 } 1203 DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid); 1204 ret = qman_create_fq(fqid, flags, fq); 1205 if (ret) { 1206 DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d", 1207 fqid, ret); 1208 return ret; 1209 } 1210 fq->is_static = false; 1211 1212 dpaa_poll_queue_default_config(&opts); 1213 1214 if (cgr_rx) { 1215 /* Enable tail drop with cgr on this queue */ 1216 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0); 1217 cgr_rx->cb = NULL; 1218 ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT, 1219 &cgr_opts); 1220 if (ret) { 1221 DPAA_PMD_WARN( 1222 "rx taildrop init fail on rx fqid 0x%x(ret=%d)", 1223 fq->fqid, ret); 1224 goto without_cgr; 1225 } 1226 opts.we_mask |= QM_INITFQ_WE_CGID; 1227 opts.fqd.cgid = cgr_rx->cgrid; 1228 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 1229 } 1230 without_cgr: 1231 ret = qman_init_fq(fq, 0, &opts); 1232 if (ret) 1233 DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret); 1234 return ret; 1235 } 1236 1237 /* Initialise a Tx FQ */ 1238 static int dpaa_tx_queue_init(struct qman_fq *fq, 1239 struct fman_if *fman_intf) 1240 { 1241 struct qm_mcc_initfq opts = {0}; 1242 int ret; 1243 1244 ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID | 1245 QMAN_FQ_FLAG_TO_DCPORTAL, fq); 1246 if (ret) { 1247 DPAA_PMD_ERR("create tx fq failed with ret: %d", ret); 1248 return ret; 1249 } 1250 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 1251 QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA; 1252 opts.fqd.dest.channel = fman_intf->tx_channel_id; 1253 opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY; 1254 opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; 1255 opts.fqd.context_b = 0; 1256 /* no tx-confirmation */ 1257 opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi; 1258 opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo; 1259 DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid); 1260 ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); 1261 if (ret) 1262 DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret); 1263 return ret; 1264 } 1265 1266 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 1267 /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */ 1268 static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid) 1269 { 1270 struct qm_mcc_initfq opts = {0}; 1271 int ret; 1272 1273 PMD_INIT_FUNC_TRACE(); 1274 1275 ret = qman_reserve_fqid(fqid); 1276 if (ret) { 1277 DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d", 1278 fqid, ret); 1279 return -EINVAL; 1280 } 1281 /* "map" this Rx FQ to one of the interfaces Tx FQID */ 1282 DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid); 1283 ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq); 1284 if (ret) { 1285 DPAA_PMD_ERR("create debug fqid %d failed with ret: %d", 1286 fqid, ret); 1287 return ret; 1288 } 1289 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL; 1290 opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY; 1291 ret = qman_init_fq(fq, 0, &opts); 1292 if (ret) 1293 DPAA_PMD_ERR("init debug fqid %d failed with ret: %d", 1294 fqid, ret); 1295 return ret; 1296 } 1297 #endif 1298 1299 /* Initialise a network interface */ 1300 static int 1301 dpaa_dev_init(struct rte_eth_dev *eth_dev) 1302 { 1303 int num_rx_fqs, fqid; 1304 int loop, ret = 0; 1305 int dev_id; 1306 struct rte_dpaa_device *dpaa_device; 1307 struct dpaa_if *dpaa_intf; 1308 struct fm_eth_port_cfg *cfg; 1309 struct fman_if *fman_intf; 1310 struct fman_if_bpool *bp, *tmp_bp; 1311 uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES]; 1312 char eth_buf[RTE_ETHER_ADDR_FMT_SIZE]; 1313 1314 PMD_INIT_FUNC_TRACE(); 1315 1316 dpaa_intf = eth_dev->data->dev_private; 1317 /* For secondary processes, the primary has done all the work */ 1318 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1319 eth_dev->dev_ops = &dpaa_devops; 1320 /* Plugging of UCODE burst API not supported in Secondary */ 1321 eth_dev->rx_pkt_burst = dpaa_eth_queue_rx; 1322 eth_dev->tx_pkt_burst = dpaa_eth_queue_tx; 1323 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 1324 qman_set_fq_lookup_table( 1325 dpaa_intf->rx_queues->qman_fq_lookup_table); 1326 #endif 1327 return 0; 1328 } 1329 1330 dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device); 1331 dev_id = dpaa_device->id.dev_id; 1332 dpaa_intf = eth_dev->data->dev_private; 1333 cfg = dpaa_get_eth_port_cfg(dev_id); 1334 fman_intf = cfg->fman_if; 1335 1336 dpaa_intf->name = dpaa_device->name; 1337 1338 /* save fman_if & cfg in the interface struture */ 1339 dpaa_intf->fif = fman_intf; 1340 dpaa_intf->ifid = dev_id; 1341 dpaa_intf->cfg = cfg; 1342 1343 /* Initialize Rx FQ's */ 1344 if (default_q) { 1345 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; 1346 } else { 1347 if (getenv("DPAA_NUM_RX_QUEUES")) 1348 num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES")); 1349 else 1350 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; 1351 } 1352 1353 1354 /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX 1355 * queues. 1356 */ 1357 if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) { 1358 DPAA_PMD_ERR("Invalid number of RX queues\n"); 1359 return -EINVAL; 1360 } 1361 1362 dpaa_intf->rx_queues = rte_zmalloc(NULL, 1363 sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE); 1364 if (!dpaa_intf->rx_queues) { 1365 DPAA_PMD_ERR("Failed to alloc mem for RX queues\n"); 1366 return -ENOMEM; 1367 } 1368 1369 /* If congestion control is enabled globally*/ 1370 if (td_threshold) { 1371 dpaa_intf->cgr_rx = rte_zmalloc(NULL, 1372 sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE); 1373 if (!dpaa_intf->cgr_rx) { 1374 DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n"); 1375 ret = -ENOMEM; 1376 goto free_rx; 1377 } 1378 1379 ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0); 1380 if (ret != num_rx_fqs) { 1381 DPAA_PMD_WARN("insufficient CGRIDs available"); 1382 ret = -EINVAL; 1383 goto free_rx; 1384 } 1385 } else { 1386 dpaa_intf->cgr_rx = NULL; 1387 } 1388 1389 for (loop = 0; loop < num_rx_fqs; loop++) { 1390 if (default_q) 1391 fqid = cfg->rx_def; 1392 else 1393 fqid = DPAA_PCD_FQID_START + dpaa_intf->fif->mac_idx * 1394 DPAA_PCD_FQID_MULTIPLIER + loop; 1395 1396 if (dpaa_intf->cgr_rx) 1397 dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop]; 1398 1399 ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], 1400 dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL, 1401 fqid); 1402 if (ret) 1403 goto free_rx; 1404 dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf; 1405 } 1406 dpaa_intf->nb_rx_queues = num_rx_fqs; 1407 1408 /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */ 1409 dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) * 1410 MAX_DPAA_CORES, MAX_CACHELINE); 1411 if (!dpaa_intf->tx_queues) { 1412 DPAA_PMD_ERR("Failed to alloc mem for TX queues\n"); 1413 ret = -ENOMEM; 1414 goto free_rx; 1415 } 1416 1417 for (loop = 0; loop < MAX_DPAA_CORES; loop++) { 1418 ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop], 1419 fman_intf); 1420 if (ret) 1421 goto free_tx; 1422 dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf; 1423 } 1424 dpaa_intf->nb_tx_queues = MAX_DPAA_CORES; 1425 1426 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 1427 dpaa_debug_queue_init(&dpaa_intf->debug_queues[ 1428 DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err); 1429 dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf; 1430 dpaa_debug_queue_init(&dpaa_intf->debug_queues[ 1431 DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err); 1432 dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf; 1433 #endif 1434 1435 DPAA_PMD_DEBUG("All frame queues created"); 1436 1437 /* Get the initial configuration for flow control */ 1438 dpaa_fc_set_default(dpaa_intf); 1439 1440 /* reset bpool list, initialize bpool dynamically */ 1441 list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) { 1442 list_del(&bp->node); 1443 rte_free(bp); 1444 } 1445 1446 /* Populate ethdev structure */ 1447 eth_dev->dev_ops = &dpaa_devops; 1448 eth_dev->rx_pkt_burst = dpaa_eth_queue_rx; 1449 eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all; 1450 1451 /* Allocate memory for storing MAC addresses */ 1452 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", 1453 RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0); 1454 if (eth_dev->data->mac_addrs == NULL) { 1455 DPAA_PMD_ERR("Failed to allocate %d bytes needed to " 1456 "store MAC addresses", 1457 RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER); 1458 ret = -ENOMEM; 1459 goto free_tx; 1460 } 1461 1462 /* copy the primary mac address */ 1463 rte_ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]); 1464 rte_ether_format_addr(eth_buf, sizeof(eth_buf), &fman_intf->mac_addr); 1465 1466 DPAA_PMD_INFO("net: dpaa: %s: %s", dpaa_device->name, eth_buf); 1467 1468 /* Disable RX mode */ 1469 fman_if_discard_rx_errors(fman_intf); 1470 fman_if_disable_rx(fman_intf); 1471 /* Disable promiscuous mode */ 1472 fman_if_promiscuous_disable(fman_intf); 1473 /* Disable multicast */ 1474 fman_if_reset_mcast_filter_table(fman_intf); 1475 /* Reset interface statistics */ 1476 fman_if_stats_reset(fman_intf); 1477 /* Disable SG by default */ 1478 fman_if_set_sg(fman_intf, 0); 1479 fman_if_set_maxfrm(fman_intf, RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE); 1480 1481 return 0; 1482 1483 free_tx: 1484 rte_free(dpaa_intf->tx_queues); 1485 dpaa_intf->tx_queues = NULL; 1486 dpaa_intf->nb_tx_queues = 0; 1487 1488 free_rx: 1489 rte_free(dpaa_intf->cgr_rx); 1490 rte_free(dpaa_intf->rx_queues); 1491 dpaa_intf->rx_queues = NULL; 1492 dpaa_intf->nb_rx_queues = 0; 1493 return ret; 1494 } 1495 1496 static int 1497 dpaa_dev_uninit(struct rte_eth_dev *dev) 1498 { 1499 struct dpaa_if *dpaa_intf = dev->data->dev_private; 1500 int loop; 1501 1502 PMD_INIT_FUNC_TRACE(); 1503 1504 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1505 return -EPERM; 1506 1507 if (!dpaa_intf) { 1508 DPAA_PMD_WARN("Already closed or not started"); 1509 return -1; 1510 } 1511 1512 dpaa_eth_dev_close(dev); 1513 1514 /* release configuration memory */ 1515 if (dpaa_intf->fc_conf) 1516 rte_free(dpaa_intf->fc_conf); 1517 1518 /* Release RX congestion Groups */ 1519 if (dpaa_intf->cgr_rx) { 1520 for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++) 1521 qman_delete_cgr(&dpaa_intf->cgr_rx[loop]); 1522 1523 qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid, 1524 dpaa_intf->nb_rx_queues); 1525 } 1526 1527 rte_free(dpaa_intf->cgr_rx); 1528 dpaa_intf->cgr_rx = NULL; 1529 1530 rte_free(dpaa_intf->rx_queues); 1531 dpaa_intf->rx_queues = NULL; 1532 1533 rte_free(dpaa_intf->tx_queues); 1534 dpaa_intf->tx_queues = NULL; 1535 1536 dev->dev_ops = NULL; 1537 dev->rx_pkt_burst = NULL; 1538 dev->tx_pkt_burst = NULL; 1539 1540 return 0; 1541 } 1542 1543 static int 1544 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused, 1545 struct rte_dpaa_device *dpaa_dev) 1546 { 1547 int diag; 1548 int ret; 1549 struct rte_eth_dev *eth_dev; 1550 1551 PMD_INIT_FUNC_TRACE(); 1552 1553 if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) > 1554 RTE_PKTMBUF_HEADROOM) { 1555 DPAA_PMD_ERR( 1556 "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)", 1557 RTE_PKTMBUF_HEADROOM, 1558 DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE); 1559 1560 return -1; 1561 } 1562 1563 /* In case of secondary process, the device is already configured 1564 * and no further action is required, except portal initialization 1565 * and verifying secondary attachment to port name. 1566 */ 1567 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1568 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name); 1569 if (!eth_dev) 1570 return -ENOMEM; 1571 eth_dev->device = &dpaa_dev->device; 1572 eth_dev->dev_ops = &dpaa_devops; 1573 rte_eth_dev_probing_finish(eth_dev); 1574 return 0; 1575 } 1576 1577 if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) { 1578 if (access("/tmp/fmc.bin", F_OK) == -1) { 1579 DPAA_PMD_INFO("* FMC not configured.Enabling default mode"); 1580 default_q = 1; 1581 } 1582 1583 /* disabling the default push mode for LS1043 */ 1584 if (dpaa_svr_family == SVR_LS1043A_FAMILY) 1585 dpaa_push_mode_max_queue = 0; 1586 1587 /* if push mode queues to be enabled. Currenly we are allowing 1588 * only one queue per thread. 1589 */ 1590 if (getenv("DPAA_PUSH_QUEUES_NUMBER")) { 1591 dpaa_push_mode_max_queue = 1592 atoi(getenv("DPAA_PUSH_QUEUES_NUMBER")); 1593 if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE) 1594 dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; 1595 } 1596 1597 is_global_init = 1; 1598 } 1599 1600 if (unlikely(!RTE_PER_LCORE(dpaa_io))) { 1601 ret = rte_dpaa_portal_init((void *)1); 1602 if (ret) { 1603 DPAA_PMD_ERR("Unable to initialize portal"); 1604 return ret; 1605 } 1606 } 1607 1608 /* In case of secondary process, the device is already configured 1609 * and no further action is required, except portal initialization 1610 * and verifying secondary attachment to port name. 1611 */ 1612 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1613 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name); 1614 if (!eth_dev) 1615 return -ENOMEM; 1616 } else { 1617 eth_dev = rte_eth_dev_allocate(dpaa_dev->name); 1618 if (eth_dev == NULL) 1619 return -ENOMEM; 1620 1621 eth_dev->data->dev_private = rte_zmalloc( 1622 "ethdev private structure", 1623 sizeof(struct dpaa_if), 1624 RTE_CACHE_LINE_SIZE); 1625 if (!eth_dev->data->dev_private) { 1626 DPAA_PMD_ERR("Cannot allocate memzone for port data"); 1627 rte_eth_dev_release_port(eth_dev); 1628 return -ENOMEM; 1629 } 1630 } 1631 eth_dev->device = &dpaa_dev->device; 1632 dpaa_dev->eth_dev = eth_dev; 1633 1634 /* Invoke PMD device initialization function */ 1635 diag = dpaa_dev_init(eth_dev); 1636 if (diag == 0) { 1637 rte_eth_dev_probing_finish(eth_dev); 1638 return 0; 1639 } 1640 1641 rte_eth_dev_release_port(eth_dev); 1642 return diag; 1643 } 1644 1645 static int 1646 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev) 1647 { 1648 struct rte_eth_dev *eth_dev; 1649 1650 PMD_INIT_FUNC_TRACE(); 1651 1652 eth_dev = dpaa_dev->eth_dev; 1653 dpaa_dev_uninit(eth_dev); 1654 1655 rte_eth_dev_release_port(eth_dev); 1656 1657 return 0; 1658 } 1659 1660 static struct rte_dpaa_driver rte_dpaa_pmd = { 1661 .drv_type = FSL_DPAA_ETH, 1662 .probe = rte_dpaa_probe, 1663 .remove = rte_dpaa_remove, 1664 }; 1665 1666 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd); 1667 RTE_LOG_REGISTER(dpaa_logtype_pmd, pmd.net.dpaa, NOTICE); 1668