1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2017 NXP 5 * 6 */ 7 /* System headers */ 8 #include <stdio.h> 9 #include <inttypes.h> 10 #include <unistd.h> 11 #include <limits.h> 12 #include <sched.h> 13 #include <signal.h> 14 #include <pthread.h> 15 #include <sys/types.h> 16 #include <sys/syscall.h> 17 18 #include <rte_byteorder.h> 19 #include <rte_common.h> 20 #include <rte_interrupts.h> 21 #include <rte_log.h> 22 #include <rte_debug.h> 23 #include <rte_pci.h> 24 #include <rte_atomic.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_memory.h> 27 #include <rte_tailq.h> 28 #include <rte_eal.h> 29 #include <rte_alarm.h> 30 #include <rte_ether.h> 31 #include <rte_ethdev_driver.h> 32 #include <rte_malloc.h> 33 #include <rte_ring.h> 34 35 #include <rte_dpaa_bus.h> 36 #include <rte_dpaa_logs.h> 37 #include <dpaa_mempool.h> 38 39 #include <dpaa_ethdev.h> 40 #include <dpaa_rxtx.h> 41 #include <rte_pmd_dpaa.h> 42 43 #include <fsl_usd.h> 44 #include <fsl_qman.h> 45 #include <fsl_bman.h> 46 #include <fsl_fman.h> 47 48 /* Keep track of whether QMAN and BMAN have been globally initialized */ 49 static int is_global_init; 50 /* At present we only allow up to 4 push mode queues - as each of this queue 51 * need dedicated portal and we are short of portals. 52 */ 53 #define DPAA_MAX_PUSH_MODE_QUEUE 4 54 55 static int dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; 56 static int dpaa_push_queue_idx; /* Queue index which are in push mode*/ 57 58 59 /* Per FQ Taildrop in frame count */ 60 static unsigned int td_threshold = CGR_RX_PERFQ_THRESH; 61 62 struct rte_dpaa_xstats_name_off { 63 char name[RTE_ETH_XSTATS_NAME_SIZE]; 64 uint32_t offset; 65 }; 66 67 static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = { 68 {"rx_align_err", 69 offsetof(struct dpaa_if_stats, raln)}, 70 {"rx_valid_pause", 71 offsetof(struct dpaa_if_stats, rxpf)}, 72 {"rx_fcs_err", 73 offsetof(struct dpaa_if_stats, rfcs)}, 74 {"rx_vlan_frame", 75 offsetof(struct dpaa_if_stats, rvlan)}, 76 {"rx_frame_err", 77 offsetof(struct dpaa_if_stats, rerr)}, 78 {"rx_drop_err", 79 offsetof(struct dpaa_if_stats, rdrp)}, 80 {"rx_undersized", 81 offsetof(struct dpaa_if_stats, rund)}, 82 {"rx_oversize_err", 83 offsetof(struct dpaa_if_stats, rovr)}, 84 {"rx_fragment_pkt", 85 offsetof(struct dpaa_if_stats, rfrg)}, 86 {"tx_valid_pause", 87 offsetof(struct dpaa_if_stats, txpf)}, 88 {"tx_fcs_err", 89 offsetof(struct dpaa_if_stats, terr)}, 90 {"tx_vlan_frame", 91 offsetof(struct dpaa_if_stats, tvlan)}, 92 {"rx_undersized", 93 offsetof(struct dpaa_if_stats, tund)}, 94 }; 95 96 static struct rte_dpaa_driver rte_dpaa_pmd; 97 98 static inline void 99 dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts) 100 { 101 memset(opts, 0, sizeof(struct qm_mcc_initfq)); 102 opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; 103 opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING | 104 QM_FQCTRL_PREFERINCACHE; 105 opts->fqd.context_a.stashing.exclusive = 0; 106 if (dpaa_svr_family != SVR_LS1046A_FAMILY) 107 opts->fqd.context_a.stashing.annotation_cl = 108 DPAA_IF_RX_ANNOTATION_STASH; 109 opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; 110 opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH; 111 } 112 113 static int 114 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 115 { 116 struct dpaa_if *dpaa_intf = dev->data->dev_private; 117 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN 118 + VLAN_TAG_SIZE; 119 120 PMD_INIT_FUNC_TRACE(); 121 122 if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN) 123 return -EINVAL; 124 if (frame_size > ETHER_MAX_LEN) 125 dev->data->dev_conf.rxmode.jumbo_frame = 1; 126 else 127 dev->data->dev_conf.rxmode.jumbo_frame = 0; 128 129 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 130 131 fman_if_set_maxfrm(dpaa_intf->fif, frame_size); 132 133 return 0; 134 } 135 136 static int 137 dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused) 138 { 139 struct dpaa_if *dpaa_intf = dev->data->dev_private; 140 141 PMD_INIT_FUNC_TRACE(); 142 143 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) { 144 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= 145 DPAA_MAX_RX_PKT_LEN) { 146 fman_if_set_maxfrm(dpaa_intf->fif, 147 dev->data->dev_conf.rxmode.max_rx_pkt_len); 148 return 0; 149 } else { 150 return -1; 151 } 152 } 153 return 0; 154 } 155 156 static const uint32_t * 157 dpaa_supported_ptypes_get(struct rte_eth_dev *dev) 158 { 159 static const uint32_t ptypes[] = { 160 /*todo -= add more types */ 161 RTE_PTYPE_L2_ETHER, 162 RTE_PTYPE_L3_IPV4, 163 RTE_PTYPE_L3_IPV4_EXT, 164 RTE_PTYPE_L3_IPV6, 165 RTE_PTYPE_L3_IPV6_EXT, 166 RTE_PTYPE_L4_TCP, 167 RTE_PTYPE_L4_UDP, 168 RTE_PTYPE_L4_SCTP 169 }; 170 171 PMD_INIT_FUNC_TRACE(); 172 173 if (dev->rx_pkt_burst == dpaa_eth_queue_rx) 174 return ptypes; 175 return NULL; 176 } 177 178 static int dpaa_eth_dev_start(struct rte_eth_dev *dev) 179 { 180 struct dpaa_if *dpaa_intf = dev->data->dev_private; 181 182 PMD_INIT_FUNC_TRACE(); 183 184 /* Change tx callback to the real one */ 185 dev->tx_pkt_burst = dpaa_eth_queue_tx; 186 fman_if_enable_rx(dpaa_intf->fif); 187 188 return 0; 189 } 190 191 static void dpaa_eth_dev_stop(struct rte_eth_dev *dev) 192 { 193 struct dpaa_if *dpaa_intf = dev->data->dev_private; 194 195 PMD_INIT_FUNC_TRACE(); 196 197 fman_if_disable_rx(dpaa_intf->fif); 198 dev->tx_pkt_burst = dpaa_eth_tx_drop_all; 199 } 200 201 static void dpaa_eth_dev_close(struct rte_eth_dev *dev) 202 { 203 PMD_INIT_FUNC_TRACE(); 204 205 dpaa_eth_dev_stop(dev); 206 } 207 208 static int 209 dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused, 210 char *fw_version, 211 size_t fw_size) 212 { 213 int ret; 214 FILE *svr_file = NULL; 215 unsigned int svr_ver = 0; 216 217 PMD_INIT_FUNC_TRACE(); 218 219 svr_file = fopen(DPAA_SOC_ID_FILE, "r"); 220 if (!svr_file) { 221 DPAA_PMD_ERR("Unable to open SoC device"); 222 return -ENOTSUP; /* Not supported on this infra */ 223 } 224 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0) 225 dpaa_svr_family = svr_ver & SVR_MASK; 226 else 227 DPAA_PMD_ERR("Unable to read SoC device"); 228 229 fclose(svr_file); 230 231 ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x", 232 svr_ver, fman_ip_rev); 233 ret += 1; /* add the size of '\0' */ 234 235 if (fw_size < (uint32_t)ret) 236 return ret; 237 else 238 return 0; 239 } 240 241 static void dpaa_eth_dev_info(struct rte_eth_dev *dev, 242 struct rte_eth_dev_info *dev_info) 243 { 244 struct dpaa_if *dpaa_intf = dev->data->dev_private; 245 246 PMD_INIT_FUNC_TRACE(); 247 248 dev_info->max_rx_queues = dpaa_intf->nb_rx_queues; 249 dev_info->max_tx_queues = dpaa_intf->nb_tx_queues; 250 dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE; 251 dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN; 252 dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER; 253 dev_info->max_hash_mac_addrs = 0; 254 dev_info->max_vfs = 0; 255 dev_info->max_vmdq_pools = ETH_16_POOLS; 256 dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL; 257 dev_info->speed_capa = (ETH_LINK_SPEED_1G | 258 ETH_LINK_SPEED_10G); 259 dev_info->rx_offload_capa = 260 (DEV_RX_OFFLOAD_IPV4_CKSUM | 261 DEV_RX_OFFLOAD_UDP_CKSUM | 262 DEV_RX_OFFLOAD_TCP_CKSUM); 263 dev_info->tx_offload_capa = 264 (DEV_TX_OFFLOAD_IPV4_CKSUM | 265 DEV_TX_OFFLOAD_UDP_CKSUM | 266 DEV_TX_OFFLOAD_TCP_CKSUM); 267 } 268 269 static int dpaa_eth_link_update(struct rte_eth_dev *dev, 270 int wait_to_complete __rte_unused) 271 { 272 struct dpaa_if *dpaa_intf = dev->data->dev_private; 273 struct rte_eth_link *link = &dev->data->dev_link; 274 275 PMD_INIT_FUNC_TRACE(); 276 277 if (dpaa_intf->fif->mac_type == fman_mac_1g) 278 link->link_speed = 1000; 279 else if (dpaa_intf->fif->mac_type == fman_mac_10g) 280 link->link_speed = 10000; 281 else 282 DPAA_PMD_ERR("invalid link_speed: %s, %d", 283 dpaa_intf->name, dpaa_intf->fif->mac_type); 284 285 link->link_status = dpaa_intf->valid; 286 link->link_duplex = ETH_LINK_FULL_DUPLEX; 287 link->link_autoneg = ETH_LINK_AUTONEG; 288 return 0; 289 } 290 291 static int dpaa_eth_stats_get(struct rte_eth_dev *dev, 292 struct rte_eth_stats *stats) 293 { 294 struct dpaa_if *dpaa_intf = dev->data->dev_private; 295 296 PMD_INIT_FUNC_TRACE(); 297 298 fman_if_stats_get(dpaa_intf->fif, stats); 299 return 0; 300 } 301 302 static void dpaa_eth_stats_reset(struct rte_eth_dev *dev) 303 { 304 struct dpaa_if *dpaa_intf = dev->data->dev_private; 305 306 PMD_INIT_FUNC_TRACE(); 307 308 fman_if_stats_reset(dpaa_intf->fif); 309 } 310 311 static int 312 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 313 unsigned int n) 314 { 315 struct dpaa_if *dpaa_intf = dev->data->dev_private; 316 unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings); 317 uint64_t values[sizeof(struct dpaa_if_stats) / 8]; 318 319 if (xstats == NULL) 320 return 0; 321 322 if (n < num) 323 return num; 324 325 fman_if_stats_get_all(dpaa_intf->fif, values, 326 sizeof(struct dpaa_if_stats) / 8); 327 328 for (i = 0; i < num; i++) { 329 xstats[i].id = i; 330 xstats[i].value = values[dpaa_xstats_strings[i].offset / 8]; 331 } 332 return i; 333 } 334 335 static int 336 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 337 struct rte_eth_xstat_name *xstats_names, 338 __rte_unused unsigned int limit) 339 { 340 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 341 342 if (xstats_names != NULL) 343 for (i = 0; i < stat_cnt; i++) 344 snprintf(xstats_names[i].name, 345 sizeof(xstats_names[i].name), 346 "%s", 347 dpaa_xstats_strings[i].name); 348 349 return stat_cnt; 350 } 351 352 static int 353 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 354 uint64_t *values, unsigned int n) 355 { 356 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 357 uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8]; 358 359 if (!ids) { 360 struct dpaa_if *dpaa_intf = dev->data->dev_private; 361 362 if (n < stat_cnt) 363 return stat_cnt; 364 365 if (!values) 366 return 0; 367 368 fman_if_stats_get_all(dpaa_intf->fif, values_copy, 369 sizeof(struct dpaa_if_stats)); 370 371 for (i = 0; i < stat_cnt; i++) 372 values[i] = 373 values_copy[dpaa_xstats_strings[i].offset / 8]; 374 375 return stat_cnt; 376 } 377 378 dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 379 380 for (i = 0; i < n; i++) { 381 if (ids[i] >= stat_cnt) { 382 DPAA_PMD_ERR("id value isn't valid"); 383 return -1; 384 } 385 values[i] = values_copy[ids[i]]; 386 } 387 return n; 388 } 389 390 static int 391 dpaa_xstats_get_names_by_id( 392 struct rte_eth_dev *dev, 393 struct rte_eth_xstat_name *xstats_names, 394 const uint64_t *ids, 395 unsigned int limit) 396 { 397 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); 398 struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 399 400 if (!ids) 401 return dpaa_xstats_get_names(dev, xstats_names, limit); 402 403 dpaa_xstats_get_names(dev, xstats_names_copy, limit); 404 405 for (i = 0; i < limit; i++) { 406 if (ids[i] >= stat_cnt) { 407 DPAA_PMD_ERR("id value isn't valid"); 408 return -1; 409 } 410 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 411 } 412 return limit; 413 } 414 415 static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev) 416 { 417 struct dpaa_if *dpaa_intf = dev->data->dev_private; 418 419 PMD_INIT_FUNC_TRACE(); 420 421 fman_if_promiscuous_enable(dpaa_intf->fif); 422 } 423 424 static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev) 425 { 426 struct dpaa_if *dpaa_intf = dev->data->dev_private; 427 428 PMD_INIT_FUNC_TRACE(); 429 430 fman_if_promiscuous_disable(dpaa_intf->fif); 431 } 432 433 static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev) 434 { 435 struct dpaa_if *dpaa_intf = dev->data->dev_private; 436 437 PMD_INIT_FUNC_TRACE(); 438 439 fman_if_set_mcast_filter_table(dpaa_intf->fif); 440 } 441 442 static void dpaa_eth_multicast_disable(struct rte_eth_dev *dev) 443 { 444 struct dpaa_if *dpaa_intf = dev->data->dev_private; 445 446 PMD_INIT_FUNC_TRACE(); 447 448 fman_if_reset_mcast_filter_table(dpaa_intf->fif); 449 } 450 451 static 452 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 453 uint16_t nb_desc, 454 unsigned int socket_id __rte_unused, 455 const struct rte_eth_rxconf *rx_conf __rte_unused, 456 struct rte_mempool *mp) 457 { 458 struct dpaa_if *dpaa_intf = dev->data->dev_private; 459 struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx]; 460 struct qm_mcc_initfq opts = {0}; 461 u32 flags = 0; 462 int ret; 463 464 PMD_INIT_FUNC_TRACE(); 465 466 DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx); 467 468 if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) { 469 struct fman_if_ic_params icp; 470 uint32_t fd_offset; 471 uint32_t bp_size; 472 473 if (!mp->pool_data) { 474 DPAA_PMD_ERR("Not an offloaded buffer pool!"); 475 return -1; 476 } 477 dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 478 479 memset(&icp, 0, sizeof(icp)); 480 /* set ICEOF for to the default value , which is 0*/ 481 icp.iciof = DEFAULT_ICIOF; 482 icp.iceof = DEFAULT_RX_ICEOF; 483 icp.icsz = DEFAULT_ICSZ; 484 fman_if_set_ic_params(dpaa_intf->fif, &icp); 485 486 fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE; 487 fman_if_set_fdoff(dpaa_intf->fif, fd_offset); 488 489 /* Buffer pool size should be equal to Dataroom Size*/ 490 bp_size = rte_pktmbuf_data_room_size(mp); 491 fman_if_set_bp(dpaa_intf->fif, mp->size, 492 dpaa_intf->bp_info->bpid, bp_size); 493 dpaa_intf->valid = 1; 494 DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d", 495 dpaa_intf->name, fd_offset, 496 fman_if_get_fdoff(dpaa_intf->fif)); 497 } 498 /* checking if push mode only, no error check for now */ 499 if (dpaa_push_mode_max_queue > dpaa_push_queue_idx) { 500 dpaa_push_queue_idx++; 501 opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; 502 opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | 503 QM_FQCTRL_CTXASTASHING | 504 QM_FQCTRL_PREFERINCACHE; 505 opts.fqd.context_a.stashing.exclusive = 0; 506 /* In muticore scenario stashing becomes a bottleneck on LS1046. 507 * So do not enable stashing in this case 508 */ 509 if (dpaa_svr_family != SVR_LS1046A_FAMILY) 510 opts.fqd.context_a.stashing.annotation_cl = 511 DPAA_IF_RX_ANNOTATION_STASH; 512 opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; 513 opts.fqd.context_a.stashing.context_cl = 514 DPAA_IF_RX_CONTEXT_STASH; 515 516 /*Create a channel and associate given queue with the channel*/ 517 qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0); 518 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; 519 opts.fqd.dest.channel = rxq->ch_id; 520 opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY; 521 flags = QMAN_INITFQ_FLAG_SCHED; 522 523 /* Configure tail drop */ 524 if (dpaa_intf->cgr_rx) { 525 opts.we_mask |= QM_INITFQ_WE_CGID; 526 opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid; 527 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 528 } 529 ret = qman_init_fq(rxq, flags, &opts); 530 if (ret) 531 DPAA_PMD_ERR("Channel/Queue association failed. fqid %d" 532 " ret: %d", rxq->fqid, ret); 533 rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb; 534 rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare; 535 rxq->is_static = true; 536 } 537 dev->data->rx_queues[queue_idx] = rxq; 538 539 /* configure the CGR size as per the desc size */ 540 if (dpaa_intf->cgr_rx) { 541 struct qm_mcc_initcgr cgr_opts = {0}; 542 543 /* Enable tail drop with cgr on this queue */ 544 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0); 545 ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts); 546 if (ret) { 547 DPAA_PMD_WARN( 548 "rx taildrop modify fail on fqid %d (ret=%d)", 549 rxq->fqid, ret); 550 } 551 } 552 553 return 0; 554 } 555 556 int __rte_experimental 557 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, 558 int eth_rx_queue_id, 559 u16 ch_id, 560 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 561 { 562 int ret; 563 u32 flags = 0; 564 struct dpaa_if *dpaa_intf = dev->data->dev_private; 565 struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id]; 566 struct qm_mcc_initfq opts = {0}; 567 568 if (dpaa_push_mode_max_queue) 569 DPAA_PMD_WARN("PUSH mode already enabled for first %d queues.\n" 570 "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n", 571 dpaa_push_mode_max_queue); 572 573 dpaa_poll_queue_default_config(&opts); 574 575 switch (queue_conf->ev.sched_type) { 576 case RTE_SCHED_TYPE_ATOMIC: 577 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; 578 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary 579 * configuration with HOLD_ACTIVE setting 580 */ 581 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK); 582 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic; 583 break; 584 case RTE_SCHED_TYPE_ORDERED: 585 DPAA_PMD_ERR("Ordered queue schedule type is not supported\n"); 586 return -1; 587 default: 588 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK; 589 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel; 590 break; 591 } 592 593 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; 594 opts.fqd.dest.channel = ch_id; 595 opts.fqd.dest.wq = queue_conf->ev.priority; 596 597 if (dpaa_intf->cgr_rx) { 598 opts.we_mask |= QM_INITFQ_WE_CGID; 599 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid; 600 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 601 } 602 603 flags = QMAN_INITFQ_FLAG_SCHED; 604 605 ret = qman_init_fq(rxq, flags, &opts); 606 if (ret) { 607 DPAA_PMD_ERR("Channel/Queue association failed. fqid %d ret:%d", 608 rxq->fqid, ret); 609 return ret; 610 } 611 612 /* copy configuration which needs to be filled during dequeue */ 613 memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event)); 614 dev->data->rx_queues[eth_rx_queue_id] = rxq; 615 616 return ret; 617 } 618 619 int __rte_experimental 620 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, 621 int eth_rx_queue_id) 622 { 623 struct qm_mcc_initfq opts; 624 int ret; 625 u32 flags = 0; 626 struct dpaa_if *dpaa_intf = dev->data->dev_private; 627 struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id]; 628 629 dpaa_poll_queue_default_config(&opts); 630 631 if (dpaa_intf->cgr_rx) { 632 opts.we_mask |= QM_INITFQ_WE_CGID; 633 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid; 634 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 635 } 636 637 ret = qman_init_fq(rxq, flags, &opts); 638 if (ret) { 639 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", 640 rxq->fqid, ret); 641 } 642 643 rxq->cb.dqrr_dpdk_cb = NULL; 644 dev->data->rx_queues[eth_rx_queue_id] = NULL; 645 646 return 0; 647 } 648 649 static 650 void dpaa_eth_rx_queue_release(void *rxq __rte_unused) 651 { 652 PMD_INIT_FUNC_TRACE(); 653 } 654 655 static 656 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 657 uint16_t nb_desc __rte_unused, 658 unsigned int socket_id __rte_unused, 659 const struct rte_eth_txconf *tx_conf __rte_unused) 660 { 661 struct dpaa_if *dpaa_intf = dev->data->dev_private; 662 663 PMD_INIT_FUNC_TRACE(); 664 665 DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx); 666 dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx]; 667 return 0; 668 } 669 670 static void dpaa_eth_tx_queue_release(void *txq __rte_unused) 671 { 672 PMD_INIT_FUNC_TRACE(); 673 } 674 675 static uint32_t 676 dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 677 { 678 struct dpaa_if *dpaa_intf = dev->data->dev_private; 679 struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id]; 680 u32 frm_cnt = 0; 681 682 PMD_INIT_FUNC_TRACE(); 683 684 if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) { 685 RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n", 686 rx_queue_id, frm_cnt); 687 } 688 return frm_cnt; 689 } 690 691 static int dpaa_link_down(struct rte_eth_dev *dev) 692 { 693 PMD_INIT_FUNC_TRACE(); 694 695 dpaa_eth_dev_stop(dev); 696 return 0; 697 } 698 699 static int dpaa_link_up(struct rte_eth_dev *dev) 700 { 701 PMD_INIT_FUNC_TRACE(); 702 703 dpaa_eth_dev_start(dev); 704 return 0; 705 } 706 707 static int 708 dpaa_flow_ctrl_set(struct rte_eth_dev *dev, 709 struct rte_eth_fc_conf *fc_conf) 710 { 711 struct dpaa_if *dpaa_intf = dev->data->dev_private; 712 struct rte_eth_fc_conf *net_fc; 713 714 PMD_INIT_FUNC_TRACE(); 715 716 if (!(dpaa_intf->fc_conf)) { 717 dpaa_intf->fc_conf = rte_zmalloc(NULL, 718 sizeof(struct rte_eth_fc_conf), MAX_CACHELINE); 719 if (!dpaa_intf->fc_conf) { 720 DPAA_PMD_ERR("unable to save flow control info"); 721 return -ENOMEM; 722 } 723 } 724 net_fc = dpaa_intf->fc_conf; 725 726 if (fc_conf->high_water < fc_conf->low_water) { 727 DPAA_PMD_ERR("Incorrect Flow Control Configuration"); 728 return -EINVAL; 729 } 730 731 if (fc_conf->mode == RTE_FC_NONE) { 732 return 0; 733 } else if (fc_conf->mode == RTE_FC_TX_PAUSE || 734 fc_conf->mode == RTE_FC_FULL) { 735 fman_if_set_fc_threshold(dpaa_intf->fif, fc_conf->high_water, 736 fc_conf->low_water, 737 dpaa_intf->bp_info->bpid); 738 if (fc_conf->pause_time) 739 fman_if_set_fc_quanta(dpaa_intf->fif, 740 fc_conf->pause_time); 741 } 742 743 /* Save the information in dpaa device */ 744 net_fc->pause_time = fc_conf->pause_time; 745 net_fc->high_water = fc_conf->high_water; 746 net_fc->low_water = fc_conf->low_water; 747 net_fc->send_xon = fc_conf->send_xon; 748 net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd; 749 net_fc->mode = fc_conf->mode; 750 net_fc->autoneg = fc_conf->autoneg; 751 752 return 0; 753 } 754 755 static int 756 dpaa_flow_ctrl_get(struct rte_eth_dev *dev, 757 struct rte_eth_fc_conf *fc_conf) 758 { 759 struct dpaa_if *dpaa_intf = dev->data->dev_private; 760 struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf; 761 int ret; 762 763 PMD_INIT_FUNC_TRACE(); 764 765 if (net_fc) { 766 fc_conf->pause_time = net_fc->pause_time; 767 fc_conf->high_water = net_fc->high_water; 768 fc_conf->low_water = net_fc->low_water; 769 fc_conf->send_xon = net_fc->send_xon; 770 fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd; 771 fc_conf->mode = net_fc->mode; 772 fc_conf->autoneg = net_fc->autoneg; 773 return 0; 774 } 775 ret = fman_if_get_fc_threshold(dpaa_intf->fif); 776 if (ret) { 777 fc_conf->mode = RTE_FC_TX_PAUSE; 778 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif); 779 } else { 780 fc_conf->mode = RTE_FC_NONE; 781 } 782 783 return 0; 784 } 785 786 static int 787 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev, 788 struct ether_addr *addr, 789 uint32_t index, 790 __rte_unused uint32_t pool) 791 { 792 int ret; 793 struct dpaa_if *dpaa_intf = dev->data->dev_private; 794 795 PMD_INIT_FUNC_TRACE(); 796 797 ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, index); 798 799 if (ret) 800 RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:" 801 " err = %d", ret); 802 return 0; 803 } 804 805 static void 806 dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev, 807 uint32_t index) 808 { 809 struct dpaa_if *dpaa_intf = dev->data->dev_private; 810 811 PMD_INIT_FUNC_TRACE(); 812 813 fman_if_clear_mac_addr(dpaa_intf->fif, index); 814 } 815 816 static void 817 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, 818 struct ether_addr *addr) 819 { 820 int ret; 821 struct dpaa_if *dpaa_intf = dev->data->dev_private; 822 823 PMD_INIT_FUNC_TRACE(); 824 825 ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0); 826 if (ret) 827 RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret); 828 } 829 830 static struct eth_dev_ops dpaa_devops = { 831 .dev_configure = dpaa_eth_dev_configure, 832 .dev_start = dpaa_eth_dev_start, 833 .dev_stop = dpaa_eth_dev_stop, 834 .dev_close = dpaa_eth_dev_close, 835 .dev_infos_get = dpaa_eth_dev_info, 836 .dev_supported_ptypes_get = dpaa_supported_ptypes_get, 837 838 .rx_queue_setup = dpaa_eth_rx_queue_setup, 839 .tx_queue_setup = dpaa_eth_tx_queue_setup, 840 .rx_queue_release = dpaa_eth_rx_queue_release, 841 .tx_queue_release = dpaa_eth_tx_queue_release, 842 .rx_queue_count = dpaa_dev_rx_queue_count, 843 844 .flow_ctrl_get = dpaa_flow_ctrl_get, 845 .flow_ctrl_set = dpaa_flow_ctrl_set, 846 847 .link_update = dpaa_eth_link_update, 848 .stats_get = dpaa_eth_stats_get, 849 .xstats_get = dpaa_dev_xstats_get, 850 .xstats_get_by_id = dpaa_xstats_get_by_id, 851 .xstats_get_names_by_id = dpaa_xstats_get_names_by_id, 852 .xstats_get_names = dpaa_xstats_get_names, 853 .xstats_reset = dpaa_eth_stats_reset, 854 .stats_reset = dpaa_eth_stats_reset, 855 .promiscuous_enable = dpaa_eth_promiscuous_enable, 856 .promiscuous_disable = dpaa_eth_promiscuous_disable, 857 .allmulticast_enable = dpaa_eth_multicast_enable, 858 .allmulticast_disable = dpaa_eth_multicast_disable, 859 .mtu_set = dpaa_mtu_set, 860 .dev_set_link_down = dpaa_link_down, 861 .dev_set_link_up = dpaa_link_up, 862 .mac_addr_add = dpaa_dev_add_mac_addr, 863 .mac_addr_remove = dpaa_dev_remove_mac_addr, 864 .mac_addr_set = dpaa_dev_set_mac_addr, 865 866 .fw_version_get = dpaa_fw_version_get, 867 }; 868 869 static bool 870 is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv) 871 { 872 if (strcmp(dev->device->driver->name, 873 drv->driver.name)) 874 return false; 875 876 return true; 877 } 878 879 static bool 880 is_dpaa_supported(struct rte_eth_dev *dev) 881 { 882 return is_device_supported(dev, &rte_dpaa_pmd); 883 } 884 885 int __rte_experimental 886 rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on) 887 { 888 struct rte_eth_dev *dev; 889 struct dpaa_if *dpaa_intf; 890 891 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); 892 893 dev = &rte_eth_devices[port]; 894 895 if (!is_dpaa_supported(dev)) 896 return -ENOTSUP; 897 898 dpaa_intf = dev->data->dev_private; 899 900 if (on) 901 fman_if_loopback_enable(dpaa_intf->fif); 902 else 903 fman_if_loopback_disable(dpaa_intf->fif); 904 905 return 0; 906 } 907 908 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf) 909 { 910 struct rte_eth_fc_conf *fc_conf; 911 int ret; 912 913 PMD_INIT_FUNC_TRACE(); 914 915 if (!(dpaa_intf->fc_conf)) { 916 dpaa_intf->fc_conf = rte_zmalloc(NULL, 917 sizeof(struct rte_eth_fc_conf), MAX_CACHELINE); 918 if (!dpaa_intf->fc_conf) { 919 DPAA_PMD_ERR("unable to save flow control info"); 920 return -ENOMEM; 921 } 922 } 923 fc_conf = dpaa_intf->fc_conf; 924 ret = fman_if_get_fc_threshold(dpaa_intf->fif); 925 if (ret) { 926 fc_conf->mode = RTE_FC_TX_PAUSE; 927 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif); 928 } else { 929 fc_conf->mode = RTE_FC_NONE; 930 } 931 932 return 0; 933 } 934 935 /* Initialise an Rx FQ */ 936 static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, 937 uint32_t fqid) 938 { 939 struct qm_mcc_initfq opts = {0}; 940 int ret; 941 u32 flags = 0; 942 struct qm_mcc_initcgr cgr_opts = { 943 .we_mask = QM_CGR_WE_CS_THRES | 944 QM_CGR_WE_CSTD_EN | 945 QM_CGR_WE_MODE, 946 .cgr = { 947 .cstd_en = QM_CGR_EN, 948 .mode = QMAN_CGR_MODE_FRAME 949 } 950 }; 951 952 PMD_INIT_FUNC_TRACE(); 953 954 ret = qman_reserve_fqid(fqid); 955 if (ret) { 956 DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d", 957 fqid, ret); 958 return -EINVAL; 959 } 960 961 DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid); 962 ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq); 963 if (ret) { 964 DPAA_PMD_ERR("create rx fqid %d failed with ret: %d", 965 fqid, ret); 966 return ret; 967 } 968 fq->is_static = false; 969 970 dpaa_poll_queue_default_config(&opts); 971 972 if (cgr_rx) { 973 /* Enable tail drop with cgr on this queue */ 974 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0); 975 cgr_rx->cb = NULL; 976 ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT, 977 &cgr_opts); 978 if (ret) { 979 DPAA_PMD_WARN( 980 "rx taildrop init fail on rx fqid %d (ret=%d)", 981 fqid, ret); 982 goto without_cgr; 983 } 984 opts.we_mask |= QM_INITFQ_WE_CGID; 985 opts.fqd.cgid = cgr_rx->cgrid; 986 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; 987 } 988 without_cgr: 989 ret = qman_init_fq(fq, flags, &opts); 990 if (ret) 991 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret); 992 return ret; 993 } 994 995 /* Initialise a Tx FQ */ 996 static int dpaa_tx_queue_init(struct qman_fq *fq, 997 struct fman_if *fman_intf) 998 { 999 struct qm_mcc_initfq opts = {0}; 1000 int ret; 1001 1002 PMD_INIT_FUNC_TRACE(); 1003 1004 ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID | 1005 QMAN_FQ_FLAG_TO_DCPORTAL, fq); 1006 if (ret) { 1007 DPAA_PMD_ERR("create tx fq failed with ret: %d", ret); 1008 return ret; 1009 } 1010 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 1011 QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA; 1012 opts.fqd.dest.channel = fman_intf->tx_channel_id; 1013 opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY; 1014 opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; 1015 opts.fqd.context_b = 0; 1016 /* no tx-confirmation */ 1017 opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi; 1018 opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo; 1019 DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid); 1020 ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); 1021 if (ret) 1022 DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret); 1023 return ret; 1024 } 1025 1026 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 1027 /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */ 1028 static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid) 1029 { 1030 struct qm_mcc_initfq opts = {0}; 1031 int ret; 1032 1033 PMD_INIT_FUNC_TRACE(); 1034 1035 ret = qman_reserve_fqid(fqid); 1036 if (ret) { 1037 DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d", 1038 fqid, ret); 1039 return -EINVAL; 1040 } 1041 /* "map" this Rx FQ to one of the interfaces Tx FQID */ 1042 DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid); 1043 ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq); 1044 if (ret) { 1045 DPAA_PMD_ERR("create debug fqid %d failed with ret: %d", 1046 fqid, ret); 1047 return ret; 1048 } 1049 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL; 1050 opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY; 1051 ret = qman_init_fq(fq, 0, &opts); 1052 if (ret) 1053 DPAA_PMD_ERR("init debug fqid %d failed with ret: %d", 1054 fqid, ret); 1055 return ret; 1056 } 1057 #endif 1058 1059 /* Initialise a network interface */ 1060 static int 1061 dpaa_dev_init(struct rte_eth_dev *eth_dev) 1062 { 1063 int num_cores, num_rx_fqs, fqid; 1064 int loop, ret = 0; 1065 int dev_id; 1066 struct rte_dpaa_device *dpaa_device; 1067 struct dpaa_if *dpaa_intf; 1068 struct fm_eth_port_cfg *cfg; 1069 struct fman_if *fman_intf; 1070 struct fman_if_bpool *bp, *tmp_bp; 1071 uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES]; 1072 1073 PMD_INIT_FUNC_TRACE(); 1074 1075 /* For secondary processes, the primary has done all the work */ 1076 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1077 return 0; 1078 1079 dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device); 1080 dev_id = dpaa_device->id.dev_id; 1081 dpaa_intf = eth_dev->data->dev_private; 1082 cfg = &dpaa_netcfg->port_cfg[dev_id]; 1083 fman_intf = cfg->fman_if; 1084 1085 dpaa_intf->name = dpaa_device->name; 1086 1087 /* save fman_if & cfg in the interface struture */ 1088 dpaa_intf->fif = fman_intf; 1089 dpaa_intf->ifid = dev_id; 1090 dpaa_intf->cfg = cfg; 1091 1092 /* Initialize Rx FQ's */ 1093 if (getenv("DPAA_NUM_RX_QUEUES")) 1094 num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES")); 1095 else 1096 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; 1097 1098 /* if push mode queues to be enabled. Currenly we are allowing only 1099 * one queue per thread. 1100 */ 1101 if (getenv("DPAA_PUSH_QUEUES_NUMBER")) { 1102 dpaa_push_mode_max_queue = 1103 atoi(getenv("DPAA_PUSH_QUEUES_NUMBER")); 1104 if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE) 1105 dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; 1106 } 1107 1108 /* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX 1109 * queues. 1110 */ 1111 if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) { 1112 DPAA_PMD_ERR("Invalid number of RX queues\n"); 1113 return -EINVAL; 1114 } 1115 1116 dpaa_intf->rx_queues = rte_zmalloc(NULL, 1117 sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE); 1118 if (!dpaa_intf->rx_queues) { 1119 DPAA_PMD_ERR("Failed to alloc mem for RX queues\n"); 1120 return -ENOMEM; 1121 } 1122 1123 /* If congestion control is enabled globally*/ 1124 if (td_threshold) { 1125 dpaa_intf->cgr_rx = rte_zmalloc(NULL, 1126 sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE); 1127 if (!dpaa_intf->cgr_rx) { 1128 DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n"); 1129 ret = -ENOMEM; 1130 goto free_rx; 1131 } 1132 1133 ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0); 1134 if (ret != num_rx_fqs) { 1135 DPAA_PMD_WARN("insufficient CGRIDs available"); 1136 ret = -EINVAL; 1137 goto free_rx; 1138 } 1139 } else { 1140 dpaa_intf->cgr_rx = NULL; 1141 } 1142 1143 for (loop = 0; loop < num_rx_fqs; loop++) { 1144 fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid * 1145 DPAA_PCD_FQID_MULTIPLIER + loop; 1146 1147 if (dpaa_intf->cgr_rx) 1148 dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop]; 1149 1150 ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], 1151 dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL, 1152 fqid); 1153 if (ret) 1154 goto free_rx; 1155 dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf; 1156 } 1157 dpaa_intf->nb_rx_queues = num_rx_fqs; 1158 1159 /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */ 1160 num_cores = rte_lcore_count(); 1161 dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) * 1162 num_cores, MAX_CACHELINE); 1163 if (!dpaa_intf->tx_queues) { 1164 DPAA_PMD_ERR("Failed to alloc mem for TX queues\n"); 1165 ret = -ENOMEM; 1166 goto free_rx; 1167 } 1168 1169 for (loop = 0; loop < num_cores; loop++) { 1170 ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop], 1171 fman_intf); 1172 if (ret) 1173 goto free_tx; 1174 dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf; 1175 } 1176 dpaa_intf->nb_tx_queues = num_cores; 1177 1178 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 1179 dpaa_debug_queue_init(&dpaa_intf->debug_queues[ 1180 DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err); 1181 dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf; 1182 dpaa_debug_queue_init(&dpaa_intf->debug_queues[ 1183 DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err); 1184 dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf; 1185 #endif 1186 1187 DPAA_PMD_DEBUG("All frame queues created"); 1188 1189 /* Get the initial configuration for flow control */ 1190 dpaa_fc_set_default(dpaa_intf); 1191 1192 /* reset bpool list, initialize bpool dynamically */ 1193 list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) { 1194 list_del(&bp->node); 1195 free(bp); 1196 } 1197 1198 /* Populate ethdev structure */ 1199 eth_dev->dev_ops = &dpaa_devops; 1200 eth_dev->rx_pkt_burst = dpaa_eth_queue_rx; 1201 eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all; 1202 1203 /* Allocate memory for storing MAC addresses */ 1204 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", 1205 ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0); 1206 if (eth_dev->data->mac_addrs == NULL) { 1207 DPAA_PMD_ERR("Failed to allocate %d bytes needed to " 1208 "store MAC addresses", 1209 ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER); 1210 ret = -ENOMEM; 1211 goto free_tx; 1212 } 1213 1214 /* copy the primary mac address */ 1215 ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]); 1216 1217 RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n", 1218 dpaa_device->name, 1219 fman_intf->mac_addr.addr_bytes[0], 1220 fman_intf->mac_addr.addr_bytes[1], 1221 fman_intf->mac_addr.addr_bytes[2], 1222 fman_intf->mac_addr.addr_bytes[3], 1223 fman_intf->mac_addr.addr_bytes[4], 1224 fman_intf->mac_addr.addr_bytes[5]); 1225 1226 /* Disable RX mode */ 1227 fman_if_discard_rx_errors(fman_intf); 1228 fman_if_disable_rx(fman_intf); 1229 /* Disable promiscuous mode */ 1230 fman_if_promiscuous_disable(fman_intf); 1231 /* Disable multicast */ 1232 fman_if_reset_mcast_filter_table(fman_intf); 1233 /* Reset interface statistics */ 1234 fman_if_stats_reset(fman_intf); 1235 1236 return 0; 1237 1238 free_tx: 1239 rte_free(dpaa_intf->tx_queues); 1240 dpaa_intf->tx_queues = NULL; 1241 dpaa_intf->nb_tx_queues = 0; 1242 1243 free_rx: 1244 rte_free(dpaa_intf->cgr_rx); 1245 rte_free(dpaa_intf->rx_queues); 1246 dpaa_intf->rx_queues = NULL; 1247 dpaa_intf->nb_rx_queues = 0; 1248 return ret; 1249 } 1250 1251 static int 1252 dpaa_dev_uninit(struct rte_eth_dev *dev) 1253 { 1254 struct dpaa_if *dpaa_intf = dev->data->dev_private; 1255 int loop; 1256 1257 PMD_INIT_FUNC_TRACE(); 1258 1259 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1260 return -EPERM; 1261 1262 if (!dpaa_intf) { 1263 DPAA_PMD_WARN("Already closed or not started"); 1264 return -1; 1265 } 1266 1267 dpaa_eth_dev_close(dev); 1268 1269 /* release configuration memory */ 1270 if (dpaa_intf->fc_conf) 1271 rte_free(dpaa_intf->fc_conf); 1272 1273 /* Release RX congestion Groups */ 1274 if (dpaa_intf->cgr_rx) { 1275 for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++) 1276 qman_delete_cgr(&dpaa_intf->cgr_rx[loop]); 1277 1278 qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid, 1279 dpaa_intf->nb_rx_queues); 1280 } 1281 1282 rte_free(dpaa_intf->cgr_rx); 1283 dpaa_intf->cgr_rx = NULL; 1284 1285 rte_free(dpaa_intf->rx_queues); 1286 dpaa_intf->rx_queues = NULL; 1287 1288 rte_free(dpaa_intf->tx_queues); 1289 dpaa_intf->tx_queues = NULL; 1290 1291 /* free memory for storing MAC addresses */ 1292 rte_free(dev->data->mac_addrs); 1293 dev->data->mac_addrs = NULL; 1294 1295 dev->dev_ops = NULL; 1296 dev->rx_pkt_burst = NULL; 1297 dev->tx_pkt_burst = NULL; 1298 1299 return 0; 1300 } 1301 1302 static int 1303 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, 1304 struct rte_dpaa_device *dpaa_dev) 1305 { 1306 int diag; 1307 int ret; 1308 struct rte_eth_dev *eth_dev; 1309 1310 PMD_INIT_FUNC_TRACE(); 1311 1312 /* In case of secondary process, the device is already configured 1313 * and no further action is required, except portal initialization 1314 * and verifying secondary attachment to port name. 1315 */ 1316 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1317 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name); 1318 if (!eth_dev) 1319 return -ENOMEM; 1320 return 0; 1321 } 1322 1323 if (!is_global_init) { 1324 /* One time load of Qman/Bman drivers */ 1325 ret = qman_global_init(); 1326 if (ret) { 1327 DPAA_PMD_ERR("QMAN initialization failed: %d", 1328 ret); 1329 return ret; 1330 } 1331 ret = bman_global_init(); 1332 if (ret) { 1333 DPAA_PMD_ERR("BMAN initialization failed: %d", 1334 ret); 1335 return ret; 1336 } 1337 1338 is_global_init = 1; 1339 } 1340 1341 if (unlikely(!RTE_PER_LCORE(dpaa_io))) { 1342 ret = rte_dpaa_portal_init((void *)1); 1343 if (ret) { 1344 DPAA_PMD_ERR("Unable to initialize portal"); 1345 return ret; 1346 } 1347 } 1348 1349 eth_dev = rte_eth_dev_allocate(dpaa_dev->name); 1350 if (eth_dev == NULL) 1351 return -ENOMEM; 1352 1353 eth_dev->data->dev_private = rte_zmalloc( 1354 "ethdev private structure", 1355 sizeof(struct dpaa_if), 1356 RTE_CACHE_LINE_SIZE); 1357 if (!eth_dev->data->dev_private) { 1358 DPAA_PMD_ERR("Cannot allocate memzone for port data"); 1359 rte_eth_dev_release_port(eth_dev); 1360 return -ENOMEM; 1361 } 1362 1363 eth_dev->device = &dpaa_dev->device; 1364 eth_dev->device->driver = &dpaa_drv->driver; 1365 dpaa_dev->eth_dev = eth_dev; 1366 1367 /* Invoke PMD device initialization function */ 1368 diag = dpaa_dev_init(eth_dev); 1369 if (diag == 0) 1370 return 0; 1371 1372 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1373 rte_free(eth_dev->data->dev_private); 1374 1375 rte_eth_dev_release_port(eth_dev); 1376 return diag; 1377 } 1378 1379 static int 1380 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev) 1381 { 1382 struct rte_eth_dev *eth_dev; 1383 1384 PMD_INIT_FUNC_TRACE(); 1385 1386 eth_dev = dpaa_dev->eth_dev; 1387 dpaa_dev_uninit(eth_dev); 1388 1389 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1390 rte_free(eth_dev->data->dev_private); 1391 1392 rte_eth_dev_release_port(eth_dev); 1393 1394 return 0; 1395 } 1396 1397 static struct rte_dpaa_driver rte_dpaa_pmd = { 1398 .drv_type = FSL_DPAA_ETH, 1399 .probe = rte_dpaa_probe, 1400 .remove = rte_dpaa_remove, 1401 }; 1402 1403 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd); 1404