1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2017 NXP 5 * 6 */ 7 /* System headers */ 8 #include <stdio.h> 9 #include <inttypes.h> 10 #include <unistd.h> 11 #include <limits.h> 12 #include <sched.h> 13 #include <signal.h> 14 #include <pthread.h> 15 #include <sys/types.h> 16 #include <sys/syscall.h> 17 18 #include <rte_byteorder.h> 19 #include <rte_common.h> 20 #include <rte_interrupts.h> 21 #include <rte_log.h> 22 #include <rte_debug.h> 23 #include <rte_pci.h> 24 #include <rte_atomic.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_memory.h> 27 #include <rte_tailq.h> 28 #include <rte_eal.h> 29 #include <rte_alarm.h> 30 #include <rte_ether.h> 31 #include <rte_ethdev_driver.h> 32 #include <rte_malloc.h> 33 #include <rte_ring.h> 34 35 #include <rte_dpaa_bus.h> 36 #include <rte_dpaa_logs.h> 37 #include <dpaa_mempool.h> 38 39 #include <dpaa_ethdev.h> 40 #include <dpaa_rxtx.h> 41 #include <rte_pmd_dpaa.h> 42 43 #include <fsl_usd.h> 44 #include <fsl_qman.h> 45 #include <fsl_bman.h> 46 #include <fsl_fman.h> 47 48 /* Keep track of whether QMAN and BMAN have been globally initialized */ 49 static int is_global_init; 50 /* At present we only allow up to 4 push mode queues - as each of this queue 51 * need dedicated portal and we are short of portals. 52 */ 53 #define DPAA_MAX_PUSH_MODE_QUEUE 4 54 55 static int dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; 56 static int dpaa_push_queue_idx; /* Queue index which are in push mode*/ 57 58 59 /* Per FQ Taildrop in frame count */ 60 static unsigned int td_threshold = CGR_RX_PERFQ_THRESH; 61 62 struct rte_dpaa_xstats_name_off { 63 char name[RTE_ETH_XSTATS_NAME_SIZE]; 64 uint32_t offset; 65 }; 66 67 static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = { 68 {"rx_align_err", 69 offsetof(struct dpaa_if_stats, raln)}, 70 {"rx_valid_pause", 71 offsetof(struct dpaa_if_stats, rxpf)}, 72 {"rx_fcs_err", 73 offsetof(struct dpaa_if_stats, rfcs)}, 74 {"rx_vlan_frame", 75 offsetof(struct dpaa_if_stats, rvlan)}, 76 {"rx_frame_err", 77 offsetof(struct dpaa_if_stats, rerr)}, 78 {"rx_drop_err", 79 offsetof(struct dpaa_if_stats, rdrp)}, 80 {"rx_undersized", 81 offsetof(struct dpaa_if_stats, rund)}, 82 {"rx_oversize_err", 83 offsetof(struct dpaa_if_stats, rovr)}, 84 {"rx_fragment_pkt", 85 offsetof(struct dpaa_if_stats, rfrg)}, 86 {"tx_valid_pause", 87 offsetof(struct dpaa_if_stats, txpf)}, 88 {"tx_fcs_err", 89 offsetof(struct dpaa_if_stats, terr)}, 90 {"tx_vlan_frame", 91 offsetof(struct dpaa_if_stats, tvlan)}, 92 {"rx_undersized", 93 offsetof(struct dpaa_if_stats, tund)}, 94 }; 95 96 static struct rte_dpaa_driver rte_dpaa_pmd; 97 98 static void 99 dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); 100 101 static inline void 102 dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts) 103 { 104 memset(opts, 0, sizeof(struct qm_mcc_initfq)); 105 opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; 106 opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING | 107 QM_FQCTRL_PREFERINCACHE; 108 opts->fqd.context_a.stashing.exclusive = 0; 109 if (dpaa_svr_family != SVR_LS1046A_FAMILY) 110 opts->fqd.context_a.stashing.annotation_cl = 111 DPAA_IF_RX_ANNOTATION_STASH; 112 opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; 113 opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH; 114 } 115 116 static int 117 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 118 { 119 struct dpaa_if *dpaa_intf = dev->data->dev_private; 120 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN 121 + VLAN_TAG_SIZE; 122 123 PMD_INIT_FUNC_TRACE(); 124 125 if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN) 126 return -EINVAL; 127 if (frame_size > ETHER_MAX_LEN) 128 dev->data->dev_conf.rxmode.offloads &= 129 DEV_RX_OFFLOAD_JUMBO_FRAME; 130 else 131 dev->data->dev_conf.rxmode.offloads &= 132 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 133 134 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 135 136 fman_if_set_maxfrm(dpaa_intf->fif, frame_size); 137 138 return 0; 139 } 140 141 static int 142 dpaa_eth_dev_configure(struct rte_eth_dev *dev) 143 { 144 struct dpaa_if *dpaa_intf = dev->data->dev_private; 145 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 146 struct rte_eth_dev_info dev_info; 147 uint64_t rx_offloads = eth_conf->rxmode.offloads; 148 uint64_t tx_offloads = eth_conf->txmode.offloads; 149 150 PMD_INIT_FUNC_TRACE(); 151 152 dpaa_eth_dev_info(dev, &dev_info); 153 if (((~(dev_info.rx_offload_capa) & rx_offloads) != 0)) { 154 DPAA_PMD_ERR("Some Rx offloads are not supported " 155 "requested 0x%" PRIx64 " supported 0x%" PRIx64, 156 rx_offloads, dev_info.rx_offload_capa); 157 return -ENOTSUP; 158 } 159 160 if (((~(dev_info.tx_offload_capa) & tx_offloads) != 0)) { 161 DPAA_PMD_ERR("Some Tx offloads are not supported " 162 "requested 0x%" PRIx64 " supported 0x%" PRIx64, 163 tx_offloads, dev_info.tx_offload_capa); 164 return -ENOTSUP; 165 } 166 167 if (((rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) == 0) || 168 ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) == 0) || 169 ((rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) == 0) || 170 ((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) || 171 ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) || 172 ((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0)) { 173 DPAA_PMD_ERR(" Cksum offloading is enabled by default " 174 " Cannot be disabled. So ignoring this configuration "); 175 } 176 177 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 178 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= 179 DPAA_MAX_RX_PKT_LEN) { 180 fman_if_set_maxfrm(dpaa_intf->fif, 181 dev->data->dev_conf.rxmode.max_rx_pkt_len); 182 return 0; 183 } else { 184 return -1; 185 } 186 } 187 return 0; 188 } 189 190 static const uint32_t * 191 dpaa_supported_ptypes_get(struct rte_eth_dev *dev) 192 { 193 static const uint32_t ptypes[] = { 194 /*todo -= add more types */ 195 RTE_PTYPE_L2_ETHER, 196 RTE_PTYPE_L3_IPV4, 197 RTE_PTYPE_L3_IPV4_EXT, 198 RTE_PTYPE_L3_IPV6, 199 RTE_PTYPE_L3_IPV6_EXT, 200 RTE_PTYPE_L4_TCP, 201 RTE_PTYPE_L4_UDP, 202 RTE_PTYPE_L4_SCTP 203 }; 204 205 PMD_INIT_FUNC_TRACE(); 206 207 if (dev->rx_pkt_burst == dpaa_eth_queue_rx) 208 return ptypes; 209 return NULL; 210 } 211 212 static int dpaa_eth_dev_start(struct rte_eth_dev *dev) 213 { 214 struct dpaa_if *dpaa_intf = dev->data->dev_private; 215 216 PMD_INIT_FUNC_TRACE(); 217 218 /* Change tx callback to the real one */ 219 dev->tx_pkt_burst = dpaa_eth_queue_tx; 220 fman_if_enable_rx(dpaa_intf->fif); 221 222 return 0; 223 } 224 225 static void dpaa_eth_dev_stop(struct rte_eth_dev *dev) 226 { 227 struct dpaa_if *dpaa_intf = dev->data->dev_private; 228 229 PMD_INIT_FUNC_TRACE(); 230 231 fman_if_disable_rx(dpaa_intf->fif); 232 dev->tx_pkt_burst = dpaa_eth_tx_drop_all; 233 } 234 235 static void dpaa_eth_dev_close(struct rte_eth_dev *dev) 236 { 237 PMD_INIT_FUNC_TRACE(); 238 239 dpaa_eth_dev_stop(dev); 240 } 241 242 static int 243 dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused, 244 char *fw_version, 245 size_t fw_size) 246 { 247 int ret; 248 FILE *svr_file = NULL; 249 unsigned int svr_ver = 0; 250 251 PMD_INIT_FUNC_TRACE(); 252 253 svr_file = fopen(DPAA_SOC_ID_FILE, "r"); 254 if (!svr_file) { 255 DPAA_PMD_ERR("Unable to open SoC device"); 256 return -ENOTSUP; /* Not supported on this infra */ 257 } 258 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0) 259 dpaa_svr_family = svr_ver & SVR_MASK; 260 else 261 DPAA_PMD_ERR("Unable to read SoC device"); 262 263 fclose(svr_file); 264 265 ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x", 266 svr_ver, fman_ip_rev); 267 ret += 1; /* add the size of '\0' */ 268 269 if (fw_size < (uint32_t)ret) 270 return ret; 271 else 272 return 0; 273 } 274 275 static void dpaa_eth_dev_info(struct rte_eth_dev *dev, 276 struct rte_eth_dev_info *dev_info) 277 { 278 struct dpaa_if *dpaa_intf = dev->data->dev_private; 279 280 PMD_INIT_FUNC_TRACE(); 281 282 dev_info->max_rx_queues = dpaa_intf->nb_rx_queues; 283 dev_info->max_tx_queues = dpaa_intf->nb_tx_queues; 284 dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE; 285 dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN; 286 dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER; 287 dev_info->max_hash_mac_addrs = 0; 288 dev_info->max_vfs = 0; 289 dev_info->max_vmdq_pools = ETH_16_POOLS; 290 dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL; 291 dev_info->speed_capa = (ETH_LINK_SPEED_1G | 292 ETH_LINK_SPEED_10G); 293 dev_info->rx_offload_capa = 294 (DEV_RX_OFFLOAD_IPV4_CKSUM | 295 DEV_RX_OFFLOAD_UDP_CKSUM | 296 DEV_RX_OFFLOAD_TCP_CKSUM) | 297 DEV_RX_OFFLOAD_JUMBO_FRAME | 298 DEV_RX_OFFLOAD_SCATTER; 299 dev_info->tx_offload_capa = 300 (DEV_TX_OFFLOAD_IPV4_CKSUM | 301 DEV_TX_OFFLOAD_UDP_CKSUM | 302 DEV_TX_OFFLOAD_TCP_CKSUM) | 303 DEV_TX_OFFLOAD_MBUF_FAST_FREE | 304 DEV_TX_OFFLOAD_MULTI_SEGS; 305 } 306 307 static int dpaa_eth_link_update(struct rte_eth_dev *dev, 308 int wait_to_complete __rte_unused) 309 { 310 struct dpaa_if *dpaa_intf = dev->data->dev_private; 311 struct rte_eth_link *link = &dev->data->dev_link; 312 313 PMD_INIT_FUNC_TRACE(); 314 315 if (dpaa_intf->fif->mac_type == fman_mac_1g) 316 link->link_speed = ETH_SPEED_NUM_1G; 317 else if (dpaa_intf->fif->mac_type == fman_mac_10g) 318 link->link_speed = ETH_SPEED_NUM_10G; 319 else 320 DPAA_PMD_ERR("invalid link_speed: %s, %d", 321 dpaa_intf->name, dpaa_intf->fif->mac_type); 322 323 link->link_status = dpaa_intf->valid; 324 link->link_duplex = ETH_LINK_FULL_DUPLEX; 325 link->link_autoneg = ETH_LINK_AUTONEG; 326 return 0; 327 } 328 329 static int dpaa_eth_stats_get(struct rte_eth_dev *dev, 330 struct rte_eth_stats *stats) 331 { 332 struct dpaa_if *dpaa_intf = dev->data->dev_private; 333 334 PMD_INIT_FUNC_TRACE(); 335 336 fman_if_stats_get(dpaa_intf->fif, stats); 337 return 0; 338 } 339 340 static void dpaa_eth_stats_reset(struct rte_eth_dev *dev) 341 { 342 struct dpaa_if *dpaa_intf = dev->data->dev_private; 343 344 PMD_INIT_FUNC_TRACE(); 345 346 fman_if_stats_reset(dpaa_intf->fif); 347 } 348 349 static int 350 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 351 unsigned int n) 352 { 353 struct dpaa_if *dpaa_intf = dev->data->dev_private; 354 unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings); 355 uint64_t values[sizeof(struct dpaa_if_stats) / 8]; 356 357 if (xstats == NULL) 358 return 0; 359 360 if (n < num) 361 return num; 362 363 fman_if_stats_get_all(dpaa_intf->fif, values, 364 sizeof(struct dpaa_if_stats) / 8); 365 366 for (i = 0; i < num; i++) { 367 xstats[i].id = i; 368 xstats[i].value = values[dpaa_xstats_strings[i].offset / 8]; 369 } 370 return i; 371 } 372 373 static int 374 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 375 struct rte_eth_xstat_name *xstats_names, 376 unsigned int limit) 377 { 378 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 379 380 if (limit < stat_cnt) 381 return stat_cnt; 382 383 if (xstats_names != NULL) 384 for (i = 0; i < stat_cnt; i++) 385 snprintf(xstats_names[i].name, 386 sizeof(xstats_names[i].name), 387 "%s", 388 dpaa_xstats_strings[i].name); 389 390 return stat_cnt; 391 } 392 393 static int 394 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 395 uint64_t *values, unsigned int n) 396 { 397 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 398 uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8]; 399 400 if (!ids) { 401 struct dpaa_if *dpaa_intf = dev->data->dev_private; 402 403 if (n < stat_cnt) 404 return stat_cnt; 405 406 if (!values) 407 return 0; 408 409 fman_if_stats_get_all(dpaa_intf->fif, values_copy, 410 sizeof(struct dpaa_if_stats) / 8); 411 412 for (i = 0; i < stat_cnt; i++) 413 values[i] = 414 values_copy[dpaa_xstats_strings[i].offset / 8]; 415 416 return stat_cnt; 417 } 418 419 dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 420 421 for (i = 0; i < n; i++) { 422 if (ids[i] >= stat_cnt) { 423 DPAA_PMD_ERR("id value isn't valid"); 424 return -1; 425 } 426 values[i] = values_copy[ids[i]]; 427 } 428 return n; 429 } 430 431 static int 432 dpaa_xstats_get_names_by_id( 433 struct rte_eth_dev *dev, 434 struct rte_eth_xstat_name *xstats_names, 435 const uint64_t *ids, 436 unsigned int limit) 437 { 438 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 439 struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 440 441 if (!ids) 442 return dpaa_xstats_get_names(dev, xstats_names, limit); 443 444 dpaa_xstats_get_names(dev, xstats_names_copy, limit); 445 446 for (i = 0; i < limit; i++) { 447 if (ids[i] >= stat_cnt) { 448 DPAA_PMD_ERR("id value isn't valid"); 449 return -1; 450 } 451 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 452 } 453 return limit; 454 } 455 456 static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev) 457 { 458 struct dpaa_if *dpaa_intf = dev->data->dev_private; 459 460 PMD_INIT_FUNC_TRACE(); 461 462 fman_if_promiscuous_enable(dpaa_intf->fif); 463 } 464 465 static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev) 466 { 467 struct dpaa_if *dpaa_intf = dev->data->dev_private; 468 469 PMD_INIT_FUNC_TRACE(); 470 471 fman_if_promiscuous_disable(dpaa_intf->fif); 472 } 473 474 static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev) 475 { 476 struct dpaa_if *dpaa_intf = dev->data->dev_private; 477 478 PMD_INIT_FUNC_TRACE(); 479 480 fman_if_set_mcast_filter_table(dpaa_intf->fif); 481 } 482 483 static void dpaa_eth_multicast_disable(struct rte_eth_dev *dev) 484 { 485 struct dpaa_if *dpaa_intf = dev->data->dev_private; 486 487 PMD_INIT_FUNC_TRACE(); 488 489 fman_if_reset_mcast_filter_table(dpaa_intf->fif); 490 } 491 492 static 493 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 494 uint16_t nb_desc, 495 unsigned int socket_id __rte_unused, 496 const struct rte_eth_rxconf *rx_conf __rte_unused, 497 struct rte_mempool *mp) 498 { 499 struct dpaa_if *dpaa_intf = dev->data->dev_private; 500 struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx]; 501 struct qm_mcc_initfq opts = {0}; 502 u32 flags = 0; 503 int ret; 504 505 PMD_INIT_FUNC_TRACE(); 506 507 DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx); 508 509 if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) { 510 struct fman_if_ic_params icp; 511 uint32_t fd_offset; 512 uint32_t bp_size; 513 514 if (!mp->pool_data) { 515 DPAA_PMD_ERR("Not an offloaded buffer pool!"); 516 return -1; 517 } 518 dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 519 520 memset(&icp, 0, sizeof(icp)); 521 /* set ICEOF for to the default value , which is 0*/ 522 icp.iciof = DEFAULT_ICIOF; 523 icp.iceof = DEFAULT_RX_ICEOF; 524 icp.icsz = DEFAULT_ICSZ; 525 fman_if_set_ic_params(dpaa_intf->fif, &icp); 526 527 fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE; 528 fman_if_set_fdoff(dpaa_intf->fif, fd_offset); 529 530 /* Buffer pool size should be equal to Dataroom Size*/ 531 bp_size = rte_pktmbuf_data_room_size(mp); 532 fman_if_set_bp(dpaa_intf->fif, mp->size, 533 dpaa_intf->bp_info->bpid, bp_size); 534 dpaa_intf->valid = 1; 535 DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d", 536 dpaa_intf->name, fd_offset, 537 fman_if_get_fdoff(dpaa_intf->fif)); 538 } 539 /* checking if push mode only, no error check for now */ 540 if (dpaa_push_mode_max_queue > dpaa_push_queue_idx) { 541 dpaa_push_queue_idx++; 542 opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; 543 opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | 544 QM_FQCTRL_CTXASTASHING | 545 QM_FQCTRL_PREFERINCACHE; 546 opts.fqd.context_a.stashing.exclusive = 0; 547 /* In muticore scenario stashing becomes a bottleneck on LS1046. 548 * So do not enable stashing in this case 549 */ 550 if (dpaa_svr_family != SVR_LS1046A_FAMILY) 551 opts.fqd.context_a.stashing.annotation_cl = 552 DPAA_IF_RX_ANNOTATION_STASH; 553 opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; 554 opts.fqd.context_a.stashing.context_cl = 555 DPAA_IF_RX_CONTEXT_STASH; 556 557 /*Create a channel and associate given queue with the channel*/ 558 qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0); 559 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; 560 opts.fqd.dest.channel = rxq->ch_id; 561 opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY; 562 flags = QMAN_INITFQ_FLAG_SCHED; 563 564 /* Configure tail drop */ 565 if (dpaa_intf->cgr_rx) { 566 opts.we_mask |= QM_INITFQ_WE_CGID; 567 opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid; 568 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 569 } 570 ret = qman_init_fq(rxq, flags, &opts); 571 if (ret) 572 DPAA_PMD_ERR("Channel/Queue association failed. fqid %d" 573 " ret: %d", rxq->fqid, ret); 574 rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb; 575 rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare; 576 rxq->is_static = true; 577 } 578 dev->data->rx_queues[queue_idx] = rxq; 579 580 /* configure the CGR size as per the desc size */ 581 if (dpaa_intf->cgr_rx) { 582 struct qm_mcc_initcgr cgr_opts = {0}; 583 584 /* Enable tail drop with cgr on this queue */ 585 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0); 586 ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts); 587 if (ret) { 588 DPAA_PMD_WARN( 589 "rx taildrop modify fail on fqid %d (ret=%d)", 590 rxq->fqid, ret); 591 } 592 } 593 594 return 0; 595 } 596 597 int __rte_experimental 598 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, 599 int eth_rx_queue_id, 600 u16 ch_id, 601 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 602 { 603 int ret; 604 u32 flags = 0; 605 struct dpaa_if *dpaa_intf = dev->data->dev_private; 606 struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id]; 607 struct qm_mcc_initfq opts = {0}; 608 609 if (dpaa_push_mode_max_queue) 610 DPAA_PMD_WARN("PUSH mode already enabled for first %d queues.\n" 611 "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n", 612 dpaa_push_mode_max_queue); 613 614 dpaa_poll_queue_default_config(&opts); 615 616 switch (queue_conf->ev.sched_type) { 617 case RTE_SCHED_TYPE_ATOMIC: 618 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; 619 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary 620 * configuration with HOLD_ACTIVE setting 621 */ 622 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK); 623 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic; 624 break; 625 case RTE_SCHED_TYPE_ORDERED: 626 DPAA_PMD_ERR("Ordered queue schedule type is not supported\n"); 627 return -1; 628 default: 629 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK; 630 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel; 631 break; 632 } 633 634 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; 635 opts.fqd.dest.channel = ch_id; 636 opts.fqd.dest.wq = queue_conf->ev.priority; 637 638 if (dpaa_intf->cgr_rx) { 639 opts.we_mask |= QM_INITFQ_WE_CGID; 640 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid; 641 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 642 } 643 644 flags = QMAN_INITFQ_FLAG_SCHED; 645 646 ret = qman_init_fq(rxq, flags, &opts); 647 if (ret) { 648 DPAA_PMD_ERR("Channel/Queue association failed. fqid %d ret:%d", 649 rxq->fqid, ret); 650 return ret; 651 } 652 653 /* copy configuration which needs to be filled during dequeue */ 654 memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event)); 655 dev->data->rx_queues[eth_rx_queue_id] = rxq; 656 657 return ret; 658 } 659 660 int __rte_experimental 661 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, 662 int eth_rx_queue_id) 663 { 664 struct qm_mcc_initfq opts; 665 int ret; 666 u32 flags = 0; 667 struct dpaa_if *dpaa_intf = dev->data->dev_private; 668 struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id]; 669 670 dpaa_poll_queue_default_config(&opts); 671 672 if (dpaa_intf->cgr_rx) { 673 opts.we_mask |= QM_INITFQ_WE_CGID; 674 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid; 675 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 676 } 677 678 ret = qman_init_fq(rxq, flags, &opts); 679 if (ret) { 680 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", 681 rxq->fqid, ret); 682 } 683 684 rxq->cb.dqrr_dpdk_cb = NULL; 685 dev->data->rx_queues[eth_rx_queue_id] = NULL; 686 687 return 0; 688 } 689 690 static 691 void dpaa_eth_rx_queue_release(void *rxq __rte_unused) 692 { 693 PMD_INIT_FUNC_TRACE(); 694 } 695 696 static 697 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 698 uint16_t nb_desc __rte_unused, 699 unsigned int socket_id __rte_unused, 700 const struct rte_eth_txconf *tx_conf __rte_unused) 701 { 702 struct dpaa_if *dpaa_intf = dev->data->dev_private; 703 704 PMD_INIT_FUNC_TRACE(); 705 706 DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx); 707 dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx]; 708 return 0; 709 } 710 711 static void dpaa_eth_tx_queue_release(void *txq __rte_unused) 712 { 713 PMD_INIT_FUNC_TRACE(); 714 } 715 716 static uint32_t 717 dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 718 { 719 struct dpaa_if *dpaa_intf = dev->data->dev_private; 720 struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id]; 721 u32 frm_cnt = 0; 722 723 PMD_INIT_FUNC_TRACE(); 724 725 if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) { 726 RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n", 727 rx_queue_id, frm_cnt); 728 } 729 return frm_cnt; 730 } 731 732 static int dpaa_link_down(struct rte_eth_dev *dev) 733 { 734 PMD_INIT_FUNC_TRACE(); 735 736 dpaa_eth_dev_stop(dev); 737 return 0; 738 } 739 740 static int dpaa_link_up(struct rte_eth_dev *dev) 741 { 742 PMD_INIT_FUNC_TRACE(); 743 744 dpaa_eth_dev_start(dev); 745 return 0; 746 } 747 748 static int 749 dpaa_flow_ctrl_set(struct rte_eth_dev *dev, 750 struct rte_eth_fc_conf *fc_conf) 751 { 752 struct dpaa_if *dpaa_intf = dev->data->dev_private; 753 struct rte_eth_fc_conf *net_fc; 754 755 PMD_INIT_FUNC_TRACE(); 756 757 if (!(dpaa_intf->fc_conf)) { 758 dpaa_intf->fc_conf = rte_zmalloc(NULL, 759 sizeof(struct rte_eth_fc_conf), MAX_CACHELINE); 760 if (!dpaa_intf->fc_conf) { 761 DPAA_PMD_ERR("unable to save flow control info"); 762 return -ENOMEM; 763 } 764 } 765 net_fc = dpaa_intf->fc_conf; 766 767 if (fc_conf->high_water < fc_conf->low_water) { 768 DPAA_PMD_ERR("Incorrect Flow Control Configuration"); 769 return -EINVAL; 770 } 771 772 if (fc_conf->mode == RTE_FC_NONE) { 773 return 0; 774 } else if (fc_conf->mode == RTE_FC_TX_PAUSE || 775 fc_conf->mode == RTE_FC_FULL) { 776 fman_if_set_fc_threshold(dpaa_intf->fif, fc_conf->high_water, 777 fc_conf->low_water, 778 dpaa_intf->bp_info->bpid); 779 if (fc_conf->pause_time) 780 fman_if_set_fc_quanta(dpaa_intf->fif, 781 fc_conf->pause_time); 782 } 783 784 /* Save the information in dpaa device */ 785 net_fc->pause_time = fc_conf->pause_time; 786 net_fc->high_water = fc_conf->high_water; 787 net_fc->low_water = fc_conf->low_water; 788 net_fc->send_xon = fc_conf->send_xon; 789 net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd; 790 net_fc->mode = fc_conf->mode; 791 net_fc->autoneg = fc_conf->autoneg; 792 793 return 0; 794 } 795 796 static int 797 dpaa_flow_ctrl_get(struct rte_eth_dev *dev, 798 struct rte_eth_fc_conf *fc_conf) 799 { 800 struct dpaa_if *dpaa_intf = dev->data->dev_private; 801 struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf; 802 int ret; 803 804 PMD_INIT_FUNC_TRACE(); 805 806 if (net_fc) { 807 fc_conf->pause_time = net_fc->pause_time; 808 fc_conf->high_water = net_fc->high_water; 809 fc_conf->low_water = net_fc->low_water; 810 fc_conf->send_xon = net_fc->send_xon; 811 fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd; 812 fc_conf->mode = net_fc->mode; 813 fc_conf->autoneg = net_fc->autoneg; 814 return 0; 815 } 816 ret = fman_if_get_fc_threshold(dpaa_intf->fif); 817 if (ret) { 818 fc_conf->mode = RTE_FC_TX_PAUSE; 819 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif); 820 } else { 821 fc_conf->mode = RTE_FC_NONE; 822 } 823 824 return 0; 825 } 826 827 static int 828 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev, 829 struct ether_addr *addr, 830 uint32_t index, 831 __rte_unused uint32_t pool) 832 { 833 int ret; 834 struct dpaa_if *dpaa_intf = dev->data->dev_private; 835 836 PMD_INIT_FUNC_TRACE(); 837 838 ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, index); 839 840 if (ret) 841 RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:" 842 " err = %d", ret); 843 return 0; 844 } 845 846 static void 847 dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev, 848 uint32_t index) 849 { 850 struct dpaa_if *dpaa_intf = dev->data->dev_private; 851 852 PMD_INIT_FUNC_TRACE(); 853 854 fman_if_clear_mac_addr(dpaa_intf->fif, index); 855 } 856 857 static int 858 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, 859 struct ether_addr *addr) 860 { 861 int ret; 862 struct dpaa_if *dpaa_intf = dev->data->dev_private; 863 864 PMD_INIT_FUNC_TRACE(); 865 866 ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0); 867 if (ret) 868 RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret); 869 870 return ret; 871 } 872 873 static struct eth_dev_ops dpaa_devops = { 874 .dev_configure = dpaa_eth_dev_configure, 875 .dev_start = dpaa_eth_dev_start, 876 .dev_stop = dpaa_eth_dev_stop, 877 .dev_close = dpaa_eth_dev_close, 878 .dev_infos_get = dpaa_eth_dev_info, 879 .dev_supported_ptypes_get = dpaa_supported_ptypes_get, 880 881 .rx_queue_setup = dpaa_eth_rx_queue_setup, 882 .tx_queue_setup = dpaa_eth_tx_queue_setup, 883 .rx_queue_release = dpaa_eth_rx_queue_release, 884 .tx_queue_release = dpaa_eth_tx_queue_release, 885 .rx_queue_count = dpaa_dev_rx_queue_count, 886 887 .flow_ctrl_get = dpaa_flow_ctrl_get, 888 .flow_ctrl_set = dpaa_flow_ctrl_set, 889 890 .link_update = dpaa_eth_link_update, 891 .stats_get = dpaa_eth_stats_get, 892 .xstats_get = dpaa_dev_xstats_get, 893 .xstats_get_by_id = dpaa_xstats_get_by_id, 894 .xstats_get_names_by_id = dpaa_xstats_get_names_by_id, 895 .xstats_get_names = dpaa_xstats_get_names, 896 .xstats_reset = dpaa_eth_stats_reset, 897 .stats_reset = dpaa_eth_stats_reset, 898 .promiscuous_enable = dpaa_eth_promiscuous_enable, 899 .promiscuous_disable = dpaa_eth_promiscuous_disable, 900 .allmulticast_enable = dpaa_eth_multicast_enable, 901 .allmulticast_disable = dpaa_eth_multicast_disable, 902 .mtu_set = dpaa_mtu_set, 903 .dev_set_link_down = dpaa_link_down, 904 .dev_set_link_up = dpaa_link_up, 905 .mac_addr_add = dpaa_dev_add_mac_addr, 906 .mac_addr_remove = dpaa_dev_remove_mac_addr, 907 .mac_addr_set = dpaa_dev_set_mac_addr, 908 909 .fw_version_get = dpaa_fw_version_get, 910 }; 911 912 static bool 913 is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv) 914 { 915 if (strcmp(dev->device->driver->name, 916 drv->driver.name)) 917 return false; 918 919 return true; 920 } 921 922 static bool 923 is_dpaa_supported(struct rte_eth_dev *dev) 924 { 925 return is_device_supported(dev, &rte_dpaa_pmd); 926 } 927 928 int __rte_experimental 929 rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on) 930 { 931 struct rte_eth_dev *dev; 932 struct dpaa_if *dpaa_intf; 933 934 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); 935 936 dev = &rte_eth_devices[port]; 937 938 if (!is_dpaa_supported(dev)) 939 return -ENOTSUP; 940 941 dpaa_intf = dev->data->dev_private; 942 943 if (on) 944 fman_if_loopback_enable(dpaa_intf->fif); 945 else 946 fman_if_loopback_disable(dpaa_intf->fif); 947 948 return 0; 949 } 950 951 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf) 952 { 953 struct rte_eth_fc_conf *fc_conf; 954 int ret; 955 956 PMD_INIT_FUNC_TRACE(); 957 958 if (!(dpaa_intf->fc_conf)) { 959 dpaa_intf->fc_conf = rte_zmalloc(NULL, 960 sizeof(struct rte_eth_fc_conf), MAX_CACHELINE); 961 if (!dpaa_intf->fc_conf) { 962 DPAA_PMD_ERR("unable to save flow control info"); 963 return -ENOMEM; 964 } 965 } 966 fc_conf = dpaa_intf->fc_conf; 967 ret = fman_if_get_fc_threshold(dpaa_intf->fif); 968 if (ret) { 969 fc_conf->mode = RTE_FC_TX_PAUSE; 970 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif); 971 } else { 972 fc_conf->mode = RTE_FC_NONE; 973 } 974 975 return 0; 976 } 977 978 /* Initialise an Rx FQ */ 979 static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, 980 uint32_t fqid) 981 { 982 struct qm_mcc_initfq opts = {0}; 983 int ret; 984 u32 flags = 0; 985 struct qm_mcc_initcgr cgr_opts = { 986 .we_mask = QM_CGR_WE_CS_THRES | 987 QM_CGR_WE_CSTD_EN | 988 QM_CGR_WE_MODE, 989 .cgr = { 990 .cstd_en = QM_CGR_EN, 991 .mode = QMAN_CGR_MODE_FRAME 992 } 993 }; 994 995 PMD_INIT_FUNC_TRACE(); 996 997 ret = qman_reserve_fqid(fqid); 998 if (ret) { 999 DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d", 1000 fqid, ret); 1001 return -EINVAL; 1002 } 1003 1004 DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid); 1005 ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq); 1006 if (ret) { 1007 DPAA_PMD_ERR("create rx fqid %d failed with ret: %d", 1008 fqid, ret); 1009 return ret; 1010 } 1011 fq->is_static = false; 1012 1013 dpaa_poll_queue_default_config(&opts); 1014 1015 if (cgr_rx) { 1016 /* Enable tail drop with cgr on this queue */ 1017 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0); 1018 cgr_rx->cb = NULL; 1019 ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT, 1020 &cgr_opts); 1021 if (ret) { 1022 DPAA_PMD_WARN( 1023 "rx taildrop init fail on rx fqid %d (ret=%d)", 1024 fqid, ret); 1025 goto without_cgr; 1026 } 1027 opts.we_mask |= QM_INITFQ_WE_CGID; 1028 opts.fqd.cgid = cgr_rx->cgrid; 1029 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 1030 } 1031 without_cgr: 1032 ret = qman_init_fq(fq, flags, &opts); 1033 if (ret) 1034 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret); 1035 return ret; 1036 } 1037 1038 /* Initialise a Tx FQ */ 1039 static int dpaa_tx_queue_init(struct qman_fq *fq, 1040 struct fman_if *fman_intf) 1041 { 1042 struct qm_mcc_initfq opts = {0}; 1043 int ret; 1044 1045 PMD_INIT_FUNC_TRACE(); 1046 1047 ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID | 1048 QMAN_FQ_FLAG_TO_DCPORTAL, fq); 1049 if (ret) { 1050 DPAA_PMD_ERR("create tx fq failed with ret: %d", ret); 1051 return ret; 1052 } 1053 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 1054 QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA; 1055 opts.fqd.dest.channel = fman_intf->tx_channel_id; 1056 opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY; 1057 opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; 1058 opts.fqd.context_b = 0; 1059 /* no tx-confirmation */ 1060 opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi; 1061 opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo; 1062 DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid); 1063 ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); 1064 if (ret) 1065 DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret); 1066 return ret; 1067 } 1068 1069 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 1070 /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */ 1071 static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid) 1072 { 1073 struct qm_mcc_initfq opts = {0}; 1074 int ret; 1075 1076 PMD_INIT_FUNC_TRACE(); 1077 1078 ret = qman_reserve_fqid(fqid); 1079 if (ret) { 1080 DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d", 1081 fqid, ret); 1082 return -EINVAL; 1083 } 1084 /* "map" this Rx FQ to one of the interfaces Tx FQID */ 1085 DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid); 1086 ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq); 1087 if (ret) { 1088 DPAA_PMD_ERR("create debug fqid %d failed with ret: %d", 1089 fqid, ret); 1090 return ret; 1091 } 1092 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL; 1093 opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY; 1094 ret = qman_init_fq(fq, 0, &opts); 1095 if (ret) 1096 DPAA_PMD_ERR("init debug fqid %d failed with ret: %d", 1097 fqid, ret); 1098 return ret; 1099 } 1100 #endif 1101 1102 /* Initialise a network interface */ 1103 static int 1104 dpaa_dev_init(struct rte_eth_dev *eth_dev) 1105 { 1106 int num_cores, num_rx_fqs, fqid; 1107 int loop, ret = 0; 1108 int dev_id; 1109 struct rte_dpaa_device *dpaa_device; 1110 struct dpaa_if *dpaa_intf; 1111 struct fm_eth_port_cfg *cfg; 1112 struct fman_if *fman_intf; 1113 struct fman_if_bpool *bp, *tmp_bp; 1114 uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES]; 1115 1116 PMD_INIT_FUNC_TRACE(); 1117 1118 /* For secondary processes, the primary has done all the work */ 1119 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1120 return 0; 1121 1122 dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device); 1123 dev_id = dpaa_device->id.dev_id; 1124 dpaa_intf = eth_dev->data->dev_private; 1125 cfg = &dpaa_netcfg->port_cfg[dev_id]; 1126 fman_intf = cfg->fman_if; 1127 1128 dpaa_intf->name = dpaa_device->name; 1129 1130 /* save fman_if & cfg in the interface struture */ 1131 dpaa_intf->fif = fman_intf; 1132 dpaa_intf->ifid = dev_id; 1133 dpaa_intf->cfg = cfg; 1134 1135 /* Initialize Rx FQ's */ 1136 if (getenv("DPAA_NUM_RX_QUEUES")) 1137 num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES")); 1138 else 1139 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; 1140 1141 /* if push mode queues to be enabled. Currenly we are allowing only 1142 * one queue per thread. 1143 */ 1144 if (getenv("DPAA_PUSH_QUEUES_NUMBER")) { 1145 dpaa_push_mode_max_queue = 1146 atoi(getenv("DPAA_PUSH_QUEUES_NUMBER")); 1147 if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE) 1148 dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; 1149 } 1150 1151 /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX 1152 * queues. 1153 */ 1154 if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) { 1155 DPAA_PMD_ERR("Invalid number of RX queues\n"); 1156 return -EINVAL; 1157 } 1158 1159 dpaa_intf->rx_queues = rte_zmalloc(NULL, 1160 sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE); 1161 if (!dpaa_intf->rx_queues) { 1162 DPAA_PMD_ERR("Failed to alloc mem for RX queues\n"); 1163 return -ENOMEM; 1164 } 1165 1166 /* If congestion control is enabled globally*/ 1167 if (td_threshold) { 1168 dpaa_intf->cgr_rx = rte_zmalloc(NULL, 1169 sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE); 1170 if (!dpaa_intf->cgr_rx) { 1171 DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n"); 1172 ret = -ENOMEM; 1173 goto free_rx; 1174 } 1175 1176 ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0); 1177 if (ret != num_rx_fqs) { 1178 DPAA_PMD_WARN("insufficient CGRIDs available"); 1179 ret = -EINVAL; 1180 goto free_rx; 1181 } 1182 } else { 1183 dpaa_intf->cgr_rx = NULL; 1184 } 1185 1186 for (loop = 0; loop < num_rx_fqs; loop++) { 1187 fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid * 1188 DPAA_PCD_FQID_MULTIPLIER + loop; 1189 1190 if (dpaa_intf->cgr_rx) 1191 dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop]; 1192 1193 ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], 1194 dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL, 1195 fqid); 1196 if (ret) 1197 goto free_rx; 1198 dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf; 1199 } 1200 dpaa_intf->nb_rx_queues = num_rx_fqs; 1201 1202 /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */ 1203 num_cores = rte_lcore_count(); 1204 dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) * 1205 num_cores, MAX_CACHELINE); 1206 if (!dpaa_intf->tx_queues) { 1207 DPAA_PMD_ERR("Failed to alloc mem for TX queues\n"); 1208 ret = -ENOMEM; 1209 goto free_rx; 1210 } 1211 1212 for (loop = 0; loop < num_cores; loop++) { 1213 ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop], 1214 fman_intf); 1215 if (ret) 1216 goto free_tx; 1217 dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf; 1218 } 1219 dpaa_intf->nb_tx_queues = num_cores; 1220 1221 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 1222 dpaa_debug_queue_init(&dpaa_intf->debug_queues[ 1223 DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err); 1224 dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf; 1225 dpaa_debug_queue_init(&dpaa_intf->debug_queues[ 1226 DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err); 1227 dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf; 1228 #endif 1229 1230 DPAA_PMD_DEBUG("All frame queues created"); 1231 1232 /* Get the initial configuration for flow control */ 1233 dpaa_fc_set_default(dpaa_intf); 1234 1235 /* reset bpool list, initialize bpool dynamically */ 1236 list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) { 1237 list_del(&bp->node); 1238 free(bp); 1239 } 1240 1241 /* Populate ethdev structure */ 1242 eth_dev->dev_ops = &dpaa_devops; 1243 eth_dev->rx_pkt_burst = dpaa_eth_queue_rx; 1244 eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all; 1245 1246 /* Allocate memory for storing MAC addresses */ 1247 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", 1248 ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0); 1249 if (eth_dev->data->mac_addrs == NULL) { 1250 DPAA_PMD_ERR("Failed to allocate %d bytes needed to " 1251 "store MAC addresses", 1252 ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER); 1253 ret = -ENOMEM; 1254 goto free_tx; 1255 } 1256 1257 /* copy the primary mac address */ 1258 ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]); 1259 1260 RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n", 1261 dpaa_device->name, 1262 fman_intf->mac_addr.addr_bytes[0], 1263 fman_intf->mac_addr.addr_bytes[1], 1264 fman_intf->mac_addr.addr_bytes[2], 1265 fman_intf->mac_addr.addr_bytes[3], 1266 fman_intf->mac_addr.addr_bytes[4], 1267 fman_intf->mac_addr.addr_bytes[5]); 1268 1269 /* Disable RX mode */ 1270 fman_if_discard_rx_errors(fman_intf); 1271 fman_if_disable_rx(fman_intf); 1272 /* Disable promiscuous mode */ 1273 fman_if_promiscuous_disable(fman_intf); 1274 /* Disable multicast */ 1275 fman_if_reset_mcast_filter_table(fman_intf); 1276 /* Reset interface statistics */ 1277 fman_if_stats_reset(fman_intf); 1278 1279 return 0; 1280 1281 free_tx: 1282 rte_free(dpaa_intf->tx_queues); 1283 dpaa_intf->tx_queues = NULL; 1284 dpaa_intf->nb_tx_queues = 0; 1285 1286 free_rx: 1287 rte_free(dpaa_intf->cgr_rx); 1288 rte_free(dpaa_intf->rx_queues); 1289 dpaa_intf->rx_queues = NULL; 1290 dpaa_intf->nb_rx_queues = 0; 1291 return ret; 1292 } 1293 1294 static int 1295 dpaa_dev_uninit(struct rte_eth_dev *dev) 1296 { 1297 struct dpaa_if *dpaa_intf = dev->data->dev_private; 1298 int loop; 1299 1300 PMD_INIT_FUNC_TRACE(); 1301 1302 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1303 return -EPERM; 1304 1305 if (!dpaa_intf) { 1306 DPAA_PMD_WARN("Already closed or not started"); 1307 return -1; 1308 } 1309 1310 dpaa_eth_dev_close(dev); 1311 1312 /* release configuration memory */ 1313 if (dpaa_intf->fc_conf) 1314 rte_free(dpaa_intf->fc_conf); 1315 1316 /* Release RX congestion Groups */ 1317 if (dpaa_intf->cgr_rx) { 1318 for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++) 1319 qman_delete_cgr(&dpaa_intf->cgr_rx[loop]); 1320 1321 qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid, 1322 dpaa_intf->nb_rx_queues); 1323 } 1324 1325 rte_free(dpaa_intf->cgr_rx); 1326 dpaa_intf->cgr_rx = NULL; 1327 1328 rte_free(dpaa_intf->rx_queues); 1329 dpaa_intf->rx_queues = NULL; 1330 1331 rte_free(dpaa_intf->tx_queues); 1332 dpaa_intf->tx_queues = NULL; 1333 1334 /* free memory for storing MAC addresses */ 1335 rte_free(dev->data->mac_addrs); 1336 dev->data->mac_addrs = NULL; 1337 1338 dev->dev_ops = NULL; 1339 dev->rx_pkt_burst = NULL; 1340 dev->tx_pkt_burst = NULL; 1341 1342 return 0; 1343 } 1344 1345 static int 1346 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, 1347 struct rte_dpaa_device *dpaa_dev) 1348 { 1349 int diag; 1350 int ret; 1351 struct rte_eth_dev *eth_dev; 1352 1353 PMD_INIT_FUNC_TRACE(); 1354 1355 /* In case of secondary process, the device is already configured 1356 * and no further action is required, except portal initialization 1357 * and verifying secondary attachment to port name. 1358 */ 1359 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1360 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name); 1361 if (!eth_dev) 1362 return -ENOMEM; 1363 return 0; 1364 } 1365 1366 if (!is_global_init) { 1367 /* One time load of Qman/Bman drivers */ 1368 ret = qman_global_init(); 1369 if (ret) { 1370 DPAA_PMD_ERR("QMAN initialization failed: %d", 1371 ret); 1372 return ret; 1373 } 1374 ret = bman_global_init(); 1375 if (ret) { 1376 DPAA_PMD_ERR("BMAN initialization failed: %d", 1377 ret); 1378 return ret; 1379 } 1380 1381 is_global_init = 1; 1382 } 1383 1384 if (unlikely(!RTE_PER_LCORE(dpaa_io))) { 1385 ret = rte_dpaa_portal_init((void *)1); 1386 if (ret) { 1387 DPAA_PMD_ERR("Unable to initialize portal"); 1388 return ret; 1389 } 1390 } 1391 1392 eth_dev = rte_eth_dev_allocate(dpaa_dev->name); 1393 if (eth_dev == NULL) 1394 return -ENOMEM; 1395 1396 eth_dev->data->dev_private = rte_zmalloc( 1397 "ethdev private structure", 1398 sizeof(struct dpaa_if), 1399 RTE_CACHE_LINE_SIZE); 1400 if (!eth_dev->data->dev_private) { 1401 DPAA_PMD_ERR("Cannot allocate memzone for port data"); 1402 rte_eth_dev_release_port(eth_dev); 1403 return -ENOMEM; 1404 } 1405 1406 eth_dev->device = &dpaa_dev->device; 1407 eth_dev->device->driver = &dpaa_drv->driver; 1408 dpaa_dev->eth_dev = eth_dev; 1409 1410 /* Invoke PMD device initialization function */ 1411 diag = dpaa_dev_init(eth_dev); 1412 if (diag == 0) 1413 return 0; 1414 1415 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1416 rte_free(eth_dev->data->dev_private); 1417 1418 rte_eth_dev_release_port(eth_dev); 1419 return diag; 1420 } 1421 1422 static int 1423 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev) 1424 { 1425 struct rte_eth_dev *eth_dev; 1426 1427 PMD_INIT_FUNC_TRACE(); 1428 1429 eth_dev = dpaa_dev->eth_dev; 1430 dpaa_dev_uninit(eth_dev); 1431 1432 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1433 rte_free(eth_dev->data->dev_private); 1434 1435 rte_eth_dev_release_port(eth_dev); 1436 1437 return 0; 1438 } 1439 1440 static struct rte_dpaa_driver rte_dpaa_pmd = { 1441 .drv_type = FSL_DPAA_ETH, 1442 .probe = rte_dpaa_probe, 1443 .remove = rte_dpaa_remove, 1444 }; 1445 1446 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd); 1447