1 /* * SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 11 #include <rte_mbuf.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_memcpy.h> 15 #include <rte_string_fns.h> 16 #include <rte_cycles.h> 17 #include <rte_kvargs.h> 18 #include <rte_dev.h> 19 #include <rte_fslmc.h> 20 21 #include "dpaa2_pmd_logs.h" 22 #include <fslmc_vfio.h> 23 #include <dpaa2_hw_pvt.h> 24 #include <dpaa2_hw_mempool.h> 25 #include <dpaa2_hw_dpio.h> 26 #include <mc/fsl_dpmng.h> 27 #include "dpaa2_ethdev.h" 28 #include <fsl_qbman_debug.h> 29 30 /* Supported Rx offloads */ 31 static uint64_t dev_rx_offloads_sup = 32 DEV_RX_OFFLOAD_VLAN_STRIP | 33 DEV_RX_OFFLOAD_IPV4_CKSUM | 34 DEV_RX_OFFLOAD_UDP_CKSUM | 35 DEV_RX_OFFLOAD_TCP_CKSUM | 36 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 37 DEV_RX_OFFLOAD_VLAN_FILTER | 38 DEV_RX_OFFLOAD_JUMBO_FRAME; 39 40 /* Rx offloads which cannot be disabled */ 41 static uint64_t dev_rx_offloads_nodis = 42 DEV_RX_OFFLOAD_SCATTER; 43 44 /* Supported Tx offloads */ 45 static uint64_t dev_tx_offloads_sup = 46 DEV_TX_OFFLOAD_VLAN_INSERT | 47 DEV_TX_OFFLOAD_IPV4_CKSUM | 48 DEV_TX_OFFLOAD_UDP_CKSUM | 49 DEV_TX_OFFLOAD_TCP_CKSUM | 50 DEV_TX_OFFLOAD_SCTP_CKSUM | 51 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 52 53 /* Tx offloads which cannot be disabled */ 54 static uint64_t dev_tx_offloads_nodis = 55 DEV_TX_OFFLOAD_MULTI_SEGS | 56 DEV_TX_OFFLOAD_MT_LOCKFREE | 57 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 58 59 struct rte_dpaa2_xstats_name_off { 60 char name[RTE_ETH_XSTATS_NAME_SIZE]; 61 uint8_t page_id; /* dpni statistics page id */ 62 uint8_t stats_id; /* stats id in the given page */ 63 }; 64 65 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { 66 {"ingress_multicast_frames", 0, 2}, 67 {"ingress_multicast_bytes", 0, 3}, 68 {"ingress_broadcast_frames", 0, 4}, 69 {"ingress_broadcast_bytes", 0, 5}, 70 {"egress_multicast_frames", 1, 2}, 71 {"egress_multicast_bytes", 1, 3}, 72 {"egress_broadcast_frames", 1, 4}, 73 {"egress_broadcast_bytes", 1, 5}, 74 {"ingress_filtered_frames", 2, 0}, 75 {"ingress_discarded_frames", 2, 1}, 76 {"ingress_nobuffer_discards", 2, 2}, 77 {"egress_discarded_frames", 2, 3}, 78 {"egress_confirmed_frames", 2, 4}, 79 }; 80 81 static struct rte_dpaa2_driver rte_dpaa2_pmd; 82 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev); 83 static int dpaa2_dev_link_update(struct rte_eth_dev *dev, 84 int wait_to_complete); 85 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); 86 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); 87 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 88 89 int dpaa2_logtype_pmd; 90 91 static int 92 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 93 { 94 int ret; 95 struct dpaa2_dev_priv *priv = dev->data->dev_private; 96 struct fsl_mc_io *dpni = priv->hw; 97 98 PMD_INIT_FUNC_TRACE(); 99 100 if (dpni == NULL) { 101 DPAA2_PMD_ERR("dpni is NULL"); 102 return -1; 103 } 104 105 if (on) 106 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, 107 priv->token, vlan_id); 108 else 109 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, 110 priv->token, vlan_id); 111 112 if (ret < 0) 113 DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d", 114 ret, vlan_id, priv->hw_id); 115 116 return ret; 117 } 118 119 static int 120 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) 121 { 122 struct dpaa2_dev_priv *priv = dev->data->dev_private; 123 struct fsl_mc_io *dpni = priv->hw; 124 int ret; 125 126 PMD_INIT_FUNC_TRACE(); 127 128 if (mask & ETH_VLAN_FILTER_MASK) { 129 /* VLAN Filter not avaialble */ 130 if (!priv->max_vlan_filters) { 131 DPAA2_PMD_INFO("VLAN filter not available"); 132 goto next_mask; 133 } 134 135 if (dev->data->dev_conf.rxmode.offloads & 136 DEV_RX_OFFLOAD_VLAN_FILTER) 137 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 138 priv->token, true); 139 else 140 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 141 priv->token, false); 142 if (ret < 0) 143 DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret); 144 } 145 next_mask: 146 if (mask & ETH_VLAN_EXTEND_MASK) { 147 if (dev->data->dev_conf.rxmode.offloads & 148 DEV_RX_OFFLOAD_VLAN_EXTEND) 149 DPAA2_PMD_INFO("VLAN extend offload not supported"); 150 } 151 152 return 0; 153 } 154 155 static int 156 dpaa2_fw_version_get(struct rte_eth_dev *dev, 157 char *fw_version, 158 size_t fw_size) 159 { 160 int ret; 161 struct dpaa2_dev_priv *priv = dev->data->dev_private; 162 struct fsl_mc_io *dpni = priv->hw; 163 struct mc_soc_version mc_plat_info = {0}; 164 struct mc_version mc_ver_info = {0}; 165 166 PMD_INIT_FUNC_TRACE(); 167 168 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 169 DPAA2_PMD_WARN("\tmc_get_soc_version failed"); 170 171 if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) 172 DPAA2_PMD_WARN("\tmc_get_version failed"); 173 174 ret = snprintf(fw_version, fw_size, 175 "%x-%d.%d.%d", 176 mc_plat_info.svr, 177 mc_ver_info.major, 178 mc_ver_info.minor, 179 mc_ver_info.revision); 180 181 ret += 1; /* add the size of '\0' */ 182 if (fw_size < (uint32_t)ret) 183 return ret; 184 else 185 return 0; 186 } 187 188 static void 189 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 190 { 191 struct dpaa2_dev_priv *priv = dev->data->dev_private; 192 193 PMD_INIT_FUNC_TRACE(); 194 195 dev_info->if_index = priv->hw_id; 196 197 dev_info->max_mac_addrs = priv->max_mac_filters; 198 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; 199 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; 200 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; 201 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; 202 dev_info->rx_offload_capa = dev_rx_offloads_sup | 203 dev_rx_offloads_nodis; 204 dev_info->tx_offload_capa = dev_tx_offloads_sup | 205 dev_tx_offloads_nodis; 206 dev_info->speed_capa = ETH_LINK_SPEED_1G | 207 ETH_LINK_SPEED_2_5G | 208 ETH_LINK_SPEED_10G; 209 210 dev_info->max_hash_mac_addrs = 0; 211 dev_info->max_vfs = 0; 212 dev_info->max_vmdq_pools = ETH_16_POOLS; 213 dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL; 214 } 215 216 static int 217 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) 218 { 219 struct dpaa2_dev_priv *priv = dev->data->dev_private; 220 uint16_t dist_idx; 221 uint32_t vq_id; 222 struct dpaa2_queue *mc_q, *mcq; 223 uint32_t tot_queues; 224 int i; 225 struct dpaa2_queue *dpaa2_q; 226 227 PMD_INIT_FUNC_TRACE(); 228 229 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; 230 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, 231 RTE_CACHE_LINE_SIZE); 232 if (!mc_q) { 233 DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues"); 234 return -1; 235 } 236 237 for (i = 0; i < priv->nb_rx_queues; i++) { 238 mc_q->dev = dev; 239 priv->rx_vq[i] = mc_q++; 240 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 241 dpaa2_q->q_storage = rte_malloc("dq_storage", 242 sizeof(struct queue_storage_info_t), 243 RTE_CACHE_LINE_SIZE); 244 if (!dpaa2_q->q_storage) 245 goto fail; 246 247 memset(dpaa2_q->q_storage, 0, 248 sizeof(struct queue_storage_info_t)); 249 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) 250 goto fail; 251 } 252 253 for (i = 0; i < priv->nb_tx_queues; i++) { 254 mc_q->dev = dev; 255 mc_q->flow_id = 0xffff; 256 priv->tx_vq[i] = mc_q++; 257 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 258 dpaa2_q->cscn = rte_malloc(NULL, 259 sizeof(struct qbman_result), 16); 260 if (!dpaa2_q->cscn) 261 goto fail_tx; 262 } 263 264 vq_id = 0; 265 for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { 266 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; 267 mcq->tc_index = DPAA2_DEF_TC; 268 mcq->flow_id = dist_idx; 269 vq_id++; 270 } 271 272 return 0; 273 fail_tx: 274 i -= 1; 275 while (i >= 0) { 276 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 277 rte_free(dpaa2_q->cscn); 278 priv->tx_vq[i--] = NULL; 279 } 280 i = priv->nb_rx_queues; 281 fail: 282 i -= 1; 283 mc_q = priv->rx_vq[0]; 284 while (i >= 0) { 285 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 286 dpaa2_free_dq_storage(dpaa2_q->q_storage); 287 rte_free(dpaa2_q->q_storage); 288 priv->rx_vq[i--] = NULL; 289 } 290 rte_free(mc_q); 291 return -1; 292 } 293 294 static void 295 dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev) 296 { 297 struct dpaa2_dev_priv *priv = dev->data->dev_private; 298 struct dpaa2_queue *dpaa2_q; 299 int i; 300 301 PMD_INIT_FUNC_TRACE(); 302 303 /* Queue allocation base */ 304 if (priv->rx_vq[0]) { 305 /* cleaning up queue storage */ 306 for (i = 0; i < priv->nb_rx_queues; i++) { 307 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 308 if (dpaa2_q->q_storage) 309 rte_free(dpaa2_q->q_storage); 310 } 311 /* cleanup tx queue cscn */ 312 for (i = 0; i < priv->nb_tx_queues; i++) { 313 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 314 if (!dpaa2_q->cscn) 315 rte_free(dpaa2_q->cscn); 316 } 317 /*free memory for all queues (RX+TX) */ 318 rte_free(priv->rx_vq[0]); 319 priv->rx_vq[0] = NULL; 320 } 321 } 322 323 static int 324 dpaa2_eth_dev_configure(struct rte_eth_dev *dev) 325 { 326 struct dpaa2_dev_priv *priv = dev->data->dev_private; 327 struct fsl_mc_io *dpni = priv->hw; 328 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 329 uint64_t rx_offloads = eth_conf->rxmode.offloads; 330 uint64_t tx_offloads = eth_conf->txmode.offloads; 331 int rx_l3_csum_offload = false; 332 int rx_l4_csum_offload = false; 333 int tx_l3_csum_offload = false; 334 int tx_l4_csum_offload = false; 335 int ret; 336 337 PMD_INIT_FUNC_TRACE(); 338 339 /* Rx offloads validation */ 340 if (dev_rx_offloads_nodis & ~rx_offloads) { 341 DPAA2_PMD_WARN( 342 "Rx offloads non configurable - requested 0x%" PRIx64 343 " ignored 0x%" PRIx64, 344 rx_offloads, dev_rx_offloads_nodis); 345 } 346 347 /* Tx offloads validation */ 348 if (dev_tx_offloads_nodis & ~tx_offloads) { 349 DPAA2_PMD_WARN( 350 "Tx offloads non configurable - requested 0x%" PRIx64 351 " ignored 0x%" PRIx64, 352 tx_offloads, dev_tx_offloads_nodis); 353 } 354 355 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 356 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { 357 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, 358 priv->token, eth_conf->rxmode.max_rx_pkt_len); 359 if (ret) { 360 DPAA2_PMD_ERR( 361 "Unable to set mtu. check config"); 362 return ret; 363 } 364 } else { 365 return -1; 366 } 367 } 368 369 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { 370 ret = dpaa2_setup_flow_dist(dev, 371 eth_conf->rx_adv_conf.rss_conf.rss_hf); 372 if (ret) { 373 DPAA2_PMD_ERR("Unable to set flow distribution." 374 "Check queue config"); 375 return ret; 376 } 377 } 378 379 if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) 380 rx_l3_csum_offload = true; 381 382 if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) || 383 (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM)) 384 rx_l4_csum_offload = true; 385 386 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 387 DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload); 388 if (ret) { 389 DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret); 390 return ret; 391 } 392 393 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 394 DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload); 395 if (ret) { 396 DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret); 397 return ret; 398 } 399 400 if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) 401 tx_l3_csum_offload = true; 402 403 if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) || 404 (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) || 405 (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)) 406 tx_l4_csum_offload = true; 407 408 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 409 DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload); 410 if (ret) { 411 DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret); 412 return ret; 413 } 414 415 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 416 DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload); 417 if (ret) { 418 DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret); 419 return ret; 420 } 421 422 /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in 423 * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC] 424 * to 0 for LS2 in the hardware thus disabling data/annotation 425 * stashing. For LX2 this is fixed in hardware and thus hash result and 426 * parse results can be received in FD using this option. 427 */ 428 if (dpaa2_svr_family == SVR_LX2160A) { 429 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 430 DPNI_FLCTYPE_HASH, true); 431 if (ret) { 432 DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret); 433 return ret; 434 } 435 } 436 437 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 438 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); 439 440 /* update the current status */ 441 dpaa2_dev_link_update(dev, 0); 442 443 return 0; 444 } 445 446 /* Function to setup RX flow information. It contains traffic class ID, 447 * flow ID, destination configuration etc. 448 */ 449 static int 450 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, 451 uint16_t rx_queue_id, 452 uint16_t nb_rx_desc __rte_unused, 453 unsigned int socket_id __rte_unused, 454 const struct rte_eth_rxconf *rx_conf __rte_unused, 455 struct rte_mempool *mb_pool) 456 { 457 struct dpaa2_dev_priv *priv = dev->data->dev_private; 458 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 459 struct dpaa2_queue *dpaa2_q; 460 struct dpni_queue cfg; 461 uint8_t options = 0; 462 uint8_t flow_id; 463 uint32_t bpid; 464 int ret; 465 466 PMD_INIT_FUNC_TRACE(); 467 468 DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p", 469 dev, rx_queue_id, mb_pool, rx_conf); 470 471 if (!priv->bp_list || priv->bp_list->mp != mb_pool) { 472 bpid = mempool_to_bpid(mb_pool); 473 ret = dpaa2_attach_bp_list(priv, 474 rte_dpaa2_bpid_info[bpid].bp_list); 475 if (ret) 476 return ret; 477 } 478 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 479 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ 480 481 /*Get the flow id from given VQ id*/ 482 flow_id = rx_queue_id % priv->nb_rx_queues; 483 memset(&cfg, 0, sizeof(struct dpni_queue)); 484 485 options = options | DPNI_QUEUE_OPT_USER_CTX; 486 cfg.user_context = (size_t)(dpaa2_q); 487 488 /*if ls2088 or rev2 device, enable the stashing */ 489 490 if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) { 491 options |= DPNI_QUEUE_OPT_FLC; 492 cfg.flc.stash_control = true; 493 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0; 494 /* 00 00 00 - last 6 bit represent annotation, context stashing, 495 * data stashing setting 01 01 00 (0x14) 496 * (in following order ->DS AS CS) 497 * to enable 1 line data, 1 line annotation. 498 * For LX2, this setting should be 01 00 00 (0x10) 499 */ 500 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A) 501 cfg.flc.value |= 0x10; 502 else 503 cfg.flc.value |= 0x14; 504 } 505 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, 506 dpaa2_q->tc_index, flow_id, options, &cfg); 507 if (ret) { 508 DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret); 509 return -1; 510 } 511 512 if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { 513 struct dpni_taildrop taildrop; 514 515 taildrop.enable = 1; 516 /*enabling per rx queue congestion control */ 517 taildrop.threshold = CONG_THRESHOLD_RX_Q; 518 taildrop.units = DPNI_CONGESTION_UNIT_BYTES; 519 taildrop.oal = CONG_RX_OAL; 520 DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d", 521 rx_queue_id); 522 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 523 DPNI_CP_QUEUE, DPNI_QUEUE_RX, 524 dpaa2_q->tc_index, flow_id, &taildrop); 525 if (ret) { 526 DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", 527 ret); 528 return -1; 529 } 530 } 531 532 dev->data->rx_queues[rx_queue_id] = dpaa2_q; 533 return 0; 534 } 535 536 static int 537 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, 538 uint16_t tx_queue_id, 539 uint16_t nb_tx_desc __rte_unused, 540 unsigned int socket_id __rte_unused, 541 const struct rte_eth_txconf *tx_conf __rte_unused) 542 { 543 struct dpaa2_dev_priv *priv = dev->data->dev_private; 544 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) 545 priv->tx_vq[tx_queue_id]; 546 struct fsl_mc_io *dpni = priv->hw; 547 struct dpni_queue tx_conf_cfg; 548 struct dpni_queue tx_flow_cfg; 549 uint8_t options = 0, flow_id; 550 uint32_t tc_id; 551 int ret; 552 553 PMD_INIT_FUNC_TRACE(); 554 555 /* Return if queue already configured */ 556 if (dpaa2_q->flow_id != 0xffff) { 557 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 558 return 0; 559 } 560 561 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); 562 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); 563 564 tc_id = tx_queue_id; 565 flow_id = 0; 566 567 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, 568 tc_id, flow_id, options, &tx_flow_cfg); 569 if (ret) { 570 DPAA2_PMD_ERR("Error in setting the tx flow: " 571 "tc_id=%d, flow=%d err=%d", 572 tc_id, flow_id, ret); 573 return -1; 574 } 575 576 dpaa2_q->flow_id = flow_id; 577 578 if (tx_queue_id == 0) { 579 /*Set tx-conf and error configuration*/ 580 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 581 priv->token, 582 DPNI_CONF_DISABLE); 583 if (ret) { 584 DPAA2_PMD_ERR("Error in set tx conf mode settings: " 585 "err=%d", ret); 586 return -1; 587 } 588 } 589 dpaa2_q->tc_index = tc_id; 590 591 if (!(priv->flags & DPAA2_TX_CGR_OFF)) { 592 struct dpni_congestion_notification_cfg cong_notif_cfg; 593 594 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; 595 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; 596 /* Notify that the queue is not congested when the data in 597 * the queue is below this thershold. 598 */ 599 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; 600 cong_notif_cfg.message_ctx = 0; 601 cong_notif_cfg.message_iova = 602 (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn); 603 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; 604 cong_notif_cfg.notification_mode = 605 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | 606 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | 607 DPNI_CONG_OPT_COHERENT_WRITE; 608 609 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, 610 priv->token, 611 DPNI_QUEUE_TX, 612 tc_id, 613 &cong_notif_cfg); 614 if (ret) { 615 DPAA2_PMD_ERR( 616 "Error in setting tx congestion notification: " 617 "err=%d", ret); 618 return -ret; 619 } 620 } 621 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 622 return 0; 623 } 624 625 static void 626 dpaa2_dev_rx_queue_release(void *q __rte_unused) 627 { 628 PMD_INIT_FUNC_TRACE(); 629 } 630 631 static void 632 dpaa2_dev_tx_queue_release(void *q __rte_unused) 633 { 634 PMD_INIT_FUNC_TRACE(); 635 } 636 637 static uint32_t 638 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 639 { 640 int32_t ret; 641 struct dpaa2_dev_priv *priv = dev->data->dev_private; 642 struct dpaa2_queue *dpaa2_q; 643 struct qbman_swp *swp; 644 struct qbman_fq_query_np_rslt state; 645 uint32_t frame_cnt = 0; 646 647 PMD_INIT_FUNC_TRACE(); 648 649 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 650 ret = dpaa2_affine_qbman_swp(); 651 if (ret) { 652 DPAA2_PMD_ERR("Failure in affining portal"); 653 return -EINVAL; 654 } 655 } 656 swp = DPAA2_PER_LCORE_PORTAL; 657 658 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 659 660 if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) { 661 frame_cnt = qbman_fq_state_frame_count(&state); 662 DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u", 663 rx_queue_id, frame_cnt); 664 } 665 return frame_cnt; 666 } 667 668 static const uint32_t * 669 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) 670 { 671 static const uint32_t ptypes[] = { 672 /*todo -= add more types */ 673 RTE_PTYPE_L2_ETHER, 674 RTE_PTYPE_L3_IPV4, 675 RTE_PTYPE_L3_IPV4_EXT, 676 RTE_PTYPE_L3_IPV6, 677 RTE_PTYPE_L3_IPV6_EXT, 678 RTE_PTYPE_L4_TCP, 679 RTE_PTYPE_L4_UDP, 680 RTE_PTYPE_L4_SCTP, 681 RTE_PTYPE_L4_ICMP, 682 RTE_PTYPE_UNKNOWN 683 }; 684 685 if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx) 686 return ptypes; 687 return NULL; 688 } 689 690 /** 691 * Dpaa2 link Interrupt handler 692 * 693 * @param param 694 * The address of parameter (struct rte_eth_dev *) regsitered before. 695 * 696 * @return 697 * void 698 */ 699 static void 700 dpaa2_interrupt_handler(void *param) 701 { 702 struct rte_eth_dev *dev = param; 703 struct dpaa2_dev_priv *priv = dev->data->dev_private; 704 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 705 int ret; 706 int irq_index = DPNI_IRQ_INDEX; 707 unsigned int status = 0, clear = 0; 708 709 PMD_INIT_FUNC_TRACE(); 710 711 if (dpni == NULL) { 712 DPAA2_PMD_ERR("dpni is NULL"); 713 return; 714 } 715 716 ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, 717 irq_index, &status); 718 if (unlikely(ret)) { 719 DPAA2_PMD_ERR("Can't get irq status (err %d)", ret); 720 clear = 0xffffffff; 721 goto out; 722 } 723 724 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { 725 clear = DPNI_IRQ_EVENT_LINK_CHANGED; 726 dpaa2_dev_link_update(dev, 0); 727 /* calling all the apps registered for link status event */ 728 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 729 NULL); 730 } 731 out: 732 ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, 733 irq_index, clear); 734 if (unlikely(ret)) 735 DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret); 736 } 737 738 static int 739 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) 740 { 741 int err = 0; 742 struct dpaa2_dev_priv *priv = dev->data->dev_private; 743 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 744 int irq_index = DPNI_IRQ_INDEX; 745 unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; 746 747 PMD_INIT_FUNC_TRACE(); 748 749 err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, 750 irq_index, mask); 751 if (err < 0) { 752 DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err, 753 strerror(-err)); 754 return err; 755 } 756 757 err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, 758 irq_index, enable); 759 if (err < 0) 760 DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err, 761 strerror(-err)); 762 763 return err; 764 } 765 766 static int 767 dpaa2_dev_start(struct rte_eth_dev *dev) 768 { 769 struct rte_device *rdev = dev->device; 770 struct rte_dpaa2_device *dpaa2_dev; 771 struct rte_eth_dev_data *data = dev->data; 772 struct dpaa2_dev_priv *priv = data->dev_private; 773 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 774 struct dpni_queue cfg; 775 struct dpni_error_cfg err_cfg; 776 uint16_t qdid; 777 struct dpni_queue_id qid; 778 struct dpaa2_queue *dpaa2_q; 779 int ret, i; 780 struct rte_intr_handle *intr_handle; 781 782 dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); 783 intr_handle = &dpaa2_dev->intr_handle; 784 785 PMD_INIT_FUNC_TRACE(); 786 787 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 788 if (ret) { 789 DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d", 790 priv->hw_id, ret); 791 return ret; 792 } 793 794 /* Power up the phy. Needed to make the link go UP */ 795 dpaa2_dev_set_link_up(dev); 796 797 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, 798 DPNI_QUEUE_TX, &qdid); 799 if (ret) { 800 DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret); 801 return ret; 802 } 803 priv->qdid = qdid; 804 805 for (i = 0; i < data->nb_rx_queues; i++) { 806 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; 807 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 808 DPNI_QUEUE_RX, dpaa2_q->tc_index, 809 dpaa2_q->flow_id, &cfg, &qid); 810 if (ret) { 811 DPAA2_PMD_ERR("Error in getting flow information: " 812 "err=%d", ret); 813 return ret; 814 } 815 dpaa2_q->fqid = qid.fqid; 816 } 817 818 /*checksum errors, send them to normal path and set it in annotation */ 819 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; 820 821 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; 822 err_cfg.set_frame_annotation = true; 823 824 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, 825 priv->token, &err_cfg); 826 if (ret) { 827 DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d", 828 ret); 829 return ret; 830 } 831 832 /* if the interrupts were configured on this devices*/ 833 if (intr_handle && (intr_handle->fd) && 834 (dev->data->dev_conf.intr_conf.lsc != 0)) { 835 /* Registering LSC interrupt handler */ 836 rte_intr_callback_register(intr_handle, 837 dpaa2_interrupt_handler, 838 (void *)dev); 839 840 /* enable vfio intr/eventfd mapping 841 * Interrupt index 0 is required, so we can not use 842 * rte_intr_enable. 843 */ 844 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX); 845 846 /* enable dpni_irqs */ 847 dpaa2_eth_setup_irqs(dev, 1); 848 } 849 850 return 0; 851 } 852 853 /** 854 * This routine disables all traffic on the adapter by issuing a 855 * global reset on the MAC. 856 */ 857 static void 858 dpaa2_dev_stop(struct rte_eth_dev *dev) 859 { 860 struct dpaa2_dev_priv *priv = dev->data->dev_private; 861 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 862 int ret; 863 struct rte_eth_link link; 864 struct rte_intr_handle *intr_handle = dev->intr_handle; 865 866 PMD_INIT_FUNC_TRACE(); 867 868 /* reset interrupt callback */ 869 if (intr_handle && (intr_handle->fd) && 870 (dev->data->dev_conf.intr_conf.lsc != 0)) { 871 /*disable dpni irqs */ 872 dpaa2_eth_setup_irqs(dev, 0); 873 874 /* disable vfio intr before callback unregister */ 875 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX); 876 877 /* Unregistering LSC interrupt handler */ 878 rte_intr_callback_unregister(intr_handle, 879 dpaa2_interrupt_handler, 880 (void *)dev); 881 } 882 883 dpaa2_dev_set_link_down(dev); 884 885 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); 886 if (ret) { 887 DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev", 888 ret, priv->hw_id); 889 return; 890 } 891 892 /* clear the recorded link status */ 893 memset(&link, 0, sizeof(link)); 894 rte_eth_linkstatus_set(dev, &link); 895 } 896 897 static void 898 dpaa2_dev_close(struct rte_eth_dev *dev) 899 { 900 struct dpaa2_dev_priv *priv = dev->data->dev_private; 901 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 902 int ret; 903 struct rte_eth_link link; 904 905 PMD_INIT_FUNC_TRACE(); 906 907 /* Clean the device first */ 908 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); 909 if (ret) { 910 DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret); 911 return; 912 } 913 914 memset(&link, 0, sizeof(link)); 915 rte_eth_linkstatus_set(dev, &link); 916 } 917 918 static void 919 dpaa2_dev_promiscuous_enable( 920 struct rte_eth_dev *dev) 921 { 922 int ret; 923 struct dpaa2_dev_priv *priv = dev->data->dev_private; 924 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 925 926 PMD_INIT_FUNC_TRACE(); 927 928 if (dpni == NULL) { 929 DPAA2_PMD_ERR("dpni is NULL"); 930 return; 931 } 932 933 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 934 if (ret < 0) 935 DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret); 936 937 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 938 if (ret < 0) 939 DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret); 940 } 941 942 static void 943 dpaa2_dev_promiscuous_disable( 944 struct rte_eth_dev *dev) 945 { 946 int ret; 947 struct dpaa2_dev_priv *priv = dev->data->dev_private; 948 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 949 950 PMD_INIT_FUNC_TRACE(); 951 952 if (dpni == NULL) { 953 DPAA2_PMD_ERR("dpni is NULL"); 954 return; 955 } 956 957 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 958 if (ret < 0) 959 DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret); 960 961 if (dev->data->all_multicast == 0) { 962 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, 963 priv->token, false); 964 if (ret < 0) 965 DPAA2_PMD_ERR("Unable to disable M promisc mode %d", 966 ret); 967 } 968 } 969 970 static void 971 dpaa2_dev_allmulticast_enable( 972 struct rte_eth_dev *dev) 973 { 974 int ret; 975 struct dpaa2_dev_priv *priv = dev->data->dev_private; 976 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 977 978 PMD_INIT_FUNC_TRACE(); 979 980 if (dpni == NULL) { 981 DPAA2_PMD_ERR("dpni is NULL"); 982 return; 983 } 984 985 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 986 if (ret < 0) 987 DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret); 988 } 989 990 static void 991 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) 992 { 993 int ret; 994 struct dpaa2_dev_priv *priv = dev->data->dev_private; 995 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 996 997 PMD_INIT_FUNC_TRACE(); 998 999 if (dpni == NULL) { 1000 DPAA2_PMD_ERR("dpni is NULL"); 1001 return; 1002 } 1003 1004 /* must remain on for all promiscuous */ 1005 if (dev->data->promiscuous == 1) 1006 return; 1007 1008 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 1009 if (ret < 0) 1010 DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret); 1011 } 1012 1013 static int 1014 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1015 { 1016 int ret; 1017 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1018 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1019 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN 1020 + VLAN_TAG_SIZE; 1021 1022 PMD_INIT_FUNC_TRACE(); 1023 1024 if (dpni == NULL) { 1025 DPAA2_PMD_ERR("dpni is NULL"); 1026 return -EINVAL; 1027 } 1028 1029 /* check that mtu is within the allowed range */ 1030 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) 1031 return -EINVAL; 1032 1033 if (frame_size > ETHER_MAX_LEN) 1034 dev->data->dev_conf.rxmode.offloads &= 1035 DEV_RX_OFFLOAD_JUMBO_FRAME; 1036 else 1037 dev->data->dev_conf.rxmode.offloads &= 1038 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1039 1040 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1041 1042 /* Set the Max Rx frame length as 'mtu' + 1043 * Maximum Ethernet header length 1044 */ 1045 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 1046 frame_size); 1047 if (ret) { 1048 DPAA2_PMD_ERR("Setting the max frame length failed"); 1049 return -1; 1050 } 1051 DPAA2_PMD_INFO("MTU configured for the device: %d", mtu); 1052 return 0; 1053 } 1054 1055 static int 1056 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, 1057 struct ether_addr *addr, 1058 __rte_unused uint32_t index, 1059 __rte_unused uint32_t pool) 1060 { 1061 int ret; 1062 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1063 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1064 1065 PMD_INIT_FUNC_TRACE(); 1066 1067 if (dpni == NULL) { 1068 DPAA2_PMD_ERR("dpni is NULL"); 1069 return -1; 1070 } 1071 1072 ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, 1073 priv->token, addr->addr_bytes); 1074 if (ret) 1075 DPAA2_PMD_ERR( 1076 "error: Adding the MAC ADDR failed: err = %d", ret); 1077 return 0; 1078 } 1079 1080 static void 1081 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, 1082 uint32_t index) 1083 { 1084 int ret; 1085 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1086 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1087 struct rte_eth_dev_data *data = dev->data; 1088 struct ether_addr *macaddr; 1089 1090 PMD_INIT_FUNC_TRACE(); 1091 1092 macaddr = &data->mac_addrs[index]; 1093 1094 if (dpni == NULL) { 1095 DPAA2_PMD_ERR("dpni is NULL"); 1096 return; 1097 } 1098 1099 ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, 1100 priv->token, macaddr->addr_bytes); 1101 if (ret) 1102 DPAA2_PMD_ERR( 1103 "error: Removing the MAC ADDR failed: err = %d", ret); 1104 } 1105 1106 static int 1107 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, 1108 struct ether_addr *addr) 1109 { 1110 int ret; 1111 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1112 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1113 1114 PMD_INIT_FUNC_TRACE(); 1115 1116 if (dpni == NULL) { 1117 DPAA2_PMD_ERR("dpni is NULL"); 1118 return -EINVAL; 1119 } 1120 1121 ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, 1122 priv->token, addr->addr_bytes); 1123 1124 if (ret) 1125 DPAA2_PMD_ERR( 1126 "error: Setting the MAC ADDR failed %d", ret); 1127 1128 return ret; 1129 } 1130 1131 static 1132 int dpaa2_dev_stats_get(struct rte_eth_dev *dev, 1133 struct rte_eth_stats *stats) 1134 { 1135 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1136 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1137 int32_t retcode; 1138 uint8_t page0 = 0, page1 = 1, page2 = 2; 1139 union dpni_statistics value; 1140 int i; 1141 struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq; 1142 1143 memset(&value, 0, sizeof(union dpni_statistics)); 1144 1145 PMD_INIT_FUNC_TRACE(); 1146 1147 if (!dpni) { 1148 DPAA2_PMD_ERR("dpni is NULL"); 1149 return -EINVAL; 1150 } 1151 1152 if (!stats) { 1153 DPAA2_PMD_ERR("stats is NULL"); 1154 return -EINVAL; 1155 } 1156 1157 /*Get Counters from page_0*/ 1158 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1159 page0, 0, &value); 1160 if (retcode) 1161 goto err; 1162 1163 stats->ipackets = value.page_0.ingress_all_frames; 1164 stats->ibytes = value.page_0.ingress_all_bytes; 1165 1166 /*Get Counters from page_1*/ 1167 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1168 page1, 0, &value); 1169 if (retcode) 1170 goto err; 1171 1172 stats->opackets = value.page_1.egress_all_frames; 1173 stats->obytes = value.page_1.egress_all_bytes; 1174 1175 /*Get Counters from page_2*/ 1176 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1177 page2, 0, &value); 1178 if (retcode) 1179 goto err; 1180 1181 /* Ingress drop frame count due to configured rules */ 1182 stats->ierrors = value.page_2.ingress_filtered_frames; 1183 /* Ingress drop frame count due to error */ 1184 stats->ierrors += value.page_2.ingress_discarded_frames; 1185 1186 stats->oerrors = value.page_2.egress_discarded_frames; 1187 stats->imissed = value.page_2.ingress_nobuffer_discards; 1188 1189 /* Fill in per queue stats */ 1190 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) && 1191 (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) { 1192 dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i]; 1193 dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i]; 1194 if (dpaa2_rxq) 1195 stats->q_ipackets[i] = dpaa2_rxq->rx_pkts; 1196 if (dpaa2_txq) 1197 stats->q_opackets[i] = dpaa2_txq->tx_pkts; 1198 1199 /* Byte counting is not implemented */ 1200 stats->q_ibytes[i] = 0; 1201 stats->q_obytes[i] = 0; 1202 } 1203 1204 return 0; 1205 1206 err: 1207 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 1208 return retcode; 1209 }; 1210 1211 static int 1212 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1213 unsigned int n) 1214 { 1215 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1216 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1217 int32_t retcode; 1218 union dpni_statistics value[3] = {}; 1219 unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); 1220 1221 if (n < num) 1222 return num; 1223 1224 if (xstats == NULL) 1225 return 0; 1226 1227 /* Get Counters from page_0*/ 1228 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1229 0, 0, &value[0]); 1230 if (retcode) 1231 goto err; 1232 1233 /* Get Counters from page_1*/ 1234 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1235 1, 0, &value[1]); 1236 if (retcode) 1237 goto err; 1238 1239 /* Get Counters from page_2*/ 1240 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1241 2, 0, &value[2]); 1242 if (retcode) 1243 goto err; 1244 1245 for (i = 0; i < num; i++) { 1246 xstats[i].id = i; 1247 xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. 1248 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1249 } 1250 return i; 1251 err: 1252 DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode); 1253 return retcode; 1254 } 1255 1256 static int 1257 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1258 struct rte_eth_xstat_name *xstats_names, 1259 unsigned int limit) 1260 { 1261 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1262 1263 if (limit < stat_cnt) 1264 return stat_cnt; 1265 1266 if (xstats_names != NULL) 1267 for (i = 0; i < stat_cnt; i++) 1268 snprintf(xstats_names[i].name, 1269 sizeof(xstats_names[i].name), 1270 "%s", 1271 dpaa2_xstats_strings[i].name); 1272 1273 return stat_cnt; 1274 } 1275 1276 static int 1277 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1278 uint64_t *values, unsigned int n) 1279 { 1280 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1281 uint64_t values_copy[stat_cnt]; 1282 1283 if (!ids) { 1284 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1285 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1286 int32_t retcode; 1287 union dpni_statistics value[3] = {}; 1288 1289 if (n < stat_cnt) 1290 return stat_cnt; 1291 1292 if (!values) 1293 return 0; 1294 1295 /* Get Counters from page_0*/ 1296 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1297 0, 0, &value[0]); 1298 if (retcode) 1299 return 0; 1300 1301 /* Get Counters from page_1*/ 1302 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1303 1, 0, &value[1]); 1304 if (retcode) 1305 return 0; 1306 1307 /* Get Counters from page_2*/ 1308 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1309 2, 0, &value[2]); 1310 if (retcode) 1311 return 0; 1312 1313 for (i = 0; i < stat_cnt; i++) { 1314 values[i] = value[dpaa2_xstats_strings[i].page_id]. 1315 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1316 } 1317 return stat_cnt; 1318 } 1319 1320 dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 1321 1322 for (i = 0; i < n; i++) { 1323 if (ids[i] >= stat_cnt) { 1324 DPAA2_PMD_ERR("xstats id value isn't valid"); 1325 return -1; 1326 } 1327 values[i] = values_copy[ids[i]]; 1328 } 1329 return n; 1330 } 1331 1332 static int 1333 dpaa2_xstats_get_names_by_id( 1334 struct rte_eth_dev *dev, 1335 struct rte_eth_xstat_name *xstats_names, 1336 const uint64_t *ids, 1337 unsigned int limit) 1338 { 1339 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1340 struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 1341 1342 if (!ids) 1343 return dpaa2_xstats_get_names(dev, xstats_names, limit); 1344 1345 dpaa2_xstats_get_names(dev, xstats_names_copy, limit); 1346 1347 for (i = 0; i < limit; i++) { 1348 if (ids[i] >= stat_cnt) { 1349 DPAA2_PMD_ERR("xstats id value isn't valid"); 1350 return -1; 1351 } 1352 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 1353 } 1354 return limit; 1355 } 1356 1357 static void 1358 dpaa2_dev_stats_reset(struct rte_eth_dev *dev) 1359 { 1360 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1361 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1362 int32_t retcode; 1363 int i; 1364 struct dpaa2_queue *dpaa2_q; 1365 1366 PMD_INIT_FUNC_TRACE(); 1367 1368 if (dpni == NULL) { 1369 DPAA2_PMD_ERR("dpni is NULL"); 1370 return; 1371 } 1372 1373 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); 1374 if (retcode) 1375 goto error; 1376 1377 /* Reset the per queue stats in dpaa2_queue structure */ 1378 for (i = 0; i < priv->nb_rx_queues; i++) { 1379 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 1380 if (dpaa2_q) 1381 dpaa2_q->rx_pkts = 0; 1382 } 1383 1384 for (i = 0; i < priv->nb_tx_queues; i++) { 1385 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 1386 if (dpaa2_q) 1387 dpaa2_q->tx_pkts = 0; 1388 } 1389 1390 return; 1391 1392 error: 1393 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 1394 return; 1395 }; 1396 1397 /* return 0 means link status changed, -1 means not changed */ 1398 static int 1399 dpaa2_dev_link_update(struct rte_eth_dev *dev, 1400 int wait_to_complete __rte_unused) 1401 { 1402 int ret; 1403 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1404 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1405 struct rte_eth_link link; 1406 struct dpni_link_state state = {0}; 1407 1408 if (dpni == NULL) { 1409 DPAA2_PMD_ERR("dpni is NULL"); 1410 return 0; 1411 } 1412 1413 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1414 if (ret < 0) { 1415 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); 1416 return -1; 1417 } 1418 1419 memset(&link, 0, sizeof(struct rte_eth_link)); 1420 link.link_status = state.up; 1421 link.link_speed = state.rate; 1422 1423 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) 1424 link.link_duplex = ETH_LINK_HALF_DUPLEX; 1425 else 1426 link.link_duplex = ETH_LINK_FULL_DUPLEX; 1427 1428 ret = rte_eth_linkstatus_set(dev, &link); 1429 if (ret == -1) 1430 DPAA2_PMD_DEBUG("No change in status"); 1431 else 1432 DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id, 1433 link.link_status ? "Up" : "Down"); 1434 1435 return ret; 1436 } 1437 1438 /** 1439 * Toggle the DPNI to enable, if not already enabled. 1440 * This is not strictly PHY up/down - it is more of logical toggling. 1441 */ 1442 static int 1443 dpaa2_dev_set_link_up(struct rte_eth_dev *dev) 1444 { 1445 int ret = -EINVAL; 1446 struct dpaa2_dev_priv *priv; 1447 struct fsl_mc_io *dpni; 1448 int en = 0; 1449 struct dpni_link_state state = {0}; 1450 1451 priv = dev->data->dev_private; 1452 dpni = (struct fsl_mc_io *)priv->hw; 1453 1454 if (dpni == NULL) { 1455 DPAA2_PMD_ERR("dpni is NULL"); 1456 return ret; 1457 } 1458 1459 /* Check if DPNI is currently enabled */ 1460 ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); 1461 if (ret) { 1462 /* Unable to obtain dpni status; Not continuing */ 1463 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1464 return -EINVAL; 1465 } 1466 1467 /* Enable link if not already enabled */ 1468 if (!en) { 1469 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 1470 if (ret) { 1471 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1472 return -EINVAL; 1473 } 1474 } 1475 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1476 if (ret < 0) { 1477 DPAA2_PMD_ERR("Unable to get link state (%d)", ret); 1478 return -1; 1479 } 1480 1481 /* changing tx burst function to start enqueues */ 1482 dev->tx_pkt_burst = dpaa2_dev_tx; 1483 dev->data->dev_link.link_status = state.up; 1484 1485 if (state.up) 1486 DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id); 1487 else 1488 DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id); 1489 return ret; 1490 } 1491 1492 /** 1493 * Toggle the DPNI to disable, if not already disabled. 1494 * This is not strictly PHY up/down - it is more of logical toggling. 1495 */ 1496 static int 1497 dpaa2_dev_set_link_down(struct rte_eth_dev *dev) 1498 { 1499 int ret = -EINVAL; 1500 struct dpaa2_dev_priv *priv; 1501 struct fsl_mc_io *dpni; 1502 int dpni_enabled = 0; 1503 int retries = 10; 1504 1505 PMD_INIT_FUNC_TRACE(); 1506 1507 priv = dev->data->dev_private; 1508 dpni = (struct fsl_mc_io *)priv->hw; 1509 1510 if (dpni == NULL) { 1511 DPAA2_PMD_ERR("Device has not yet been configured"); 1512 return ret; 1513 } 1514 1515 /*changing tx burst function to avoid any more enqueues */ 1516 dev->tx_pkt_burst = dummy_dev_tx; 1517 1518 /* Loop while dpni_disable() attempts to drain the egress FQs 1519 * and confirm them back to us. 1520 */ 1521 do { 1522 ret = dpni_disable(dpni, 0, priv->token); 1523 if (ret) { 1524 DPAA2_PMD_ERR("dpni disable failed (%d)", ret); 1525 return ret; 1526 } 1527 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); 1528 if (ret) { 1529 DPAA2_PMD_ERR("dpni enable check failed (%d)", ret); 1530 return ret; 1531 } 1532 if (dpni_enabled) 1533 /* Allow the MC some slack */ 1534 rte_delay_us(100 * 1000); 1535 } while (dpni_enabled && --retries); 1536 1537 if (!retries) { 1538 DPAA2_PMD_WARN("Retry count exceeded disabling dpni"); 1539 /* todo- we may have to manually cleanup queues. 1540 */ 1541 } else { 1542 DPAA2_PMD_INFO("Port %d Link DOWN successful", 1543 dev->data->port_id); 1544 } 1545 1546 dev->data->dev_link.link_status = 0; 1547 1548 return ret; 1549 } 1550 1551 static int 1552 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1553 { 1554 int ret = -EINVAL; 1555 struct dpaa2_dev_priv *priv; 1556 struct fsl_mc_io *dpni; 1557 struct dpni_link_state state = {0}; 1558 1559 PMD_INIT_FUNC_TRACE(); 1560 1561 priv = dev->data->dev_private; 1562 dpni = (struct fsl_mc_io *)priv->hw; 1563 1564 if (dpni == NULL || fc_conf == NULL) { 1565 DPAA2_PMD_ERR("device not configured"); 1566 return ret; 1567 } 1568 1569 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1570 if (ret) { 1571 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); 1572 return ret; 1573 } 1574 1575 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); 1576 if (state.options & DPNI_LINK_OPT_PAUSE) { 1577 /* DPNI_LINK_OPT_PAUSE set 1578 * if ASYM_PAUSE not set, 1579 * RX Side flow control (handle received Pause frame) 1580 * TX side flow control (send Pause frame) 1581 * if ASYM_PAUSE set, 1582 * RX Side flow control (handle received Pause frame) 1583 * No TX side flow control (send Pause frame disabled) 1584 */ 1585 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) 1586 fc_conf->mode = RTE_FC_FULL; 1587 else 1588 fc_conf->mode = RTE_FC_RX_PAUSE; 1589 } else { 1590 /* DPNI_LINK_OPT_PAUSE not set 1591 * if ASYM_PAUSE set, 1592 * TX side flow control (send Pause frame) 1593 * No RX side flow control (No action on pause frame rx) 1594 * if ASYM_PAUSE not set, 1595 * Flow control disabled 1596 */ 1597 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) 1598 fc_conf->mode = RTE_FC_TX_PAUSE; 1599 else 1600 fc_conf->mode = RTE_FC_NONE; 1601 } 1602 1603 return ret; 1604 } 1605 1606 static int 1607 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1608 { 1609 int ret = -EINVAL; 1610 struct dpaa2_dev_priv *priv; 1611 struct fsl_mc_io *dpni; 1612 struct dpni_link_state state = {0}; 1613 struct dpni_link_cfg cfg = {0}; 1614 1615 PMD_INIT_FUNC_TRACE(); 1616 1617 priv = dev->data->dev_private; 1618 dpni = (struct fsl_mc_io *)priv->hw; 1619 1620 if (dpni == NULL) { 1621 DPAA2_PMD_ERR("dpni is NULL"); 1622 return ret; 1623 } 1624 1625 /* It is necessary to obtain the current state before setting fc_conf 1626 * as MC would return error in case rate, autoneg or duplex values are 1627 * different. 1628 */ 1629 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1630 if (ret) { 1631 DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret); 1632 return -1; 1633 } 1634 1635 /* Disable link before setting configuration */ 1636 dpaa2_dev_set_link_down(dev); 1637 1638 /* Based on fc_conf, update cfg */ 1639 cfg.rate = state.rate; 1640 cfg.options = state.options; 1641 1642 /* update cfg with fc_conf */ 1643 switch (fc_conf->mode) { 1644 case RTE_FC_FULL: 1645 /* Full flow control; 1646 * OPT_PAUSE set, ASYM_PAUSE not set 1647 */ 1648 cfg.options |= DPNI_LINK_OPT_PAUSE; 1649 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1650 break; 1651 case RTE_FC_TX_PAUSE: 1652 /* Enable RX flow control 1653 * OPT_PAUSE not set; 1654 * ASYM_PAUSE set; 1655 */ 1656 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1657 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1658 break; 1659 case RTE_FC_RX_PAUSE: 1660 /* Enable TX Flow control 1661 * OPT_PAUSE set 1662 * ASYM_PAUSE set 1663 */ 1664 cfg.options |= DPNI_LINK_OPT_PAUSE; 1665 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1666 break; 1667 case RTE_FC_NONE: 1668 /* Disable Flow control 1669 * OPT_PAUSE not set 1670 * ASYM_PAUSE not set 1671 */ 1672 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1673 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1674 break; 1675 default: 1676 DPAA2_PMD_ERR("Incorrect Flow control flag (%d)", 1677 fc_conf->mode); 1678 return -1; 1679 } 1680 1681 ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); 1682 if (ret) 1683 DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)", 1684 ret); 1685 1686 /* Enable link */ 1687 dpaa2_dev_set_link_up(dev); 1688 1689 return ret; 1690 } 1691 1692 static int 1693 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, 1694 struct rte_eth_rss_conf *rss_conf) 1695 { 1696 struct rte_eth_dev_data *data = dev->data; 1697 struct rte_eth_conf *eth_conf = &data->dev_conf; 1698 int ret; 1699 1700 PMD_INIT_FUNC_TRACE(); 1701 1702 if (rss_conf->rss_hf) { 1703 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf); 1704 if (ret) { 1705 DPAA2_PMD_ERR("Unable to set flow dist"); 1706 return ret; 1707 } 1708 } else { 1709 ret = dpaa2_remove_flow_dist(dev, 0); 1710 if (ret) { 1711 DPAA2_PMD_ERR("Unable to remove flow dist"); 1712 return ret; 1713 } 1714 } 1715 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; 1716 return 0; 1717 } 1718 1719 static int 1720 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1721 struct rte_eth_rss_conf *rss_conf) 1722 { 1723 struct rte_eth_dev_data *data = dev->data; 1724 struct rte_eth_conf *eth_conf = &data->dev_conf; 1725 1726 /* dpaa2 does not support rss_key, so length should be 0*/ 1727 rss_conf->rss_key_len = 0; 1728 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; 1729 return 0; 1730 } 1731 1732 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, 1733 int eth_rx_queue_id, 1734 uint16_t dpcon_id, 1735 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 1736 { 1737 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1738 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1739 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1740 uint8_t flow_id = dpaa2_ethq->flow_id; 1741 struct dpni_queue cfg; 1742 uint8_t options; 1743 int ret; 1744 1745 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 1746 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; 1747 else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) 1748 dpaa2_ethq->cb = dpaa2_dev_process_atomic_event; 1749 else 1750 return -EINVAL; 1751 1752 memset(&cfg, 0, sizeof(struct dpni_queue)); 1753 options = DPNI_QUEUE_OPT_DEST; 1754 cfg.destination.type = DPNI_DEST_DPCON; 1755 cfg.destination.id = dpcon_id; 1756 cfg.destination.priority = queue_conf->ev.priority; 1757 1758 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { 1759 options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; 1760 cfg.destination.hold_active = 1; 1761 } 1762 1763 options |= DPNI_QUEUE_OPT_USER_CTX; 1764 cfg.user_context = (size_t)(dpaa2_ethq); 1765 1766 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1767 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1768 if (ret) { 1769 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 1770 return ret; 1771 } 1772 1773 memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event)); 1774 1775 return 0; 1776 } 1777 1778 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, 1779 int eth_rx_queue_id) 1780 { 1781 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1782 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1783 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1784 uint8_t flow_id = dpaa2_ethq->flow_id; 1785 struct dpni_queue cfg; 1786 uint8_t options; 1787 int ret; 1788 1789 memset(&cfg, 0, sizeof(struct dpni_queue)); 1790 options = DPNI_QUEUE_OPT_DEST; 1791 cfg.destination.type = DPNI_DEST_NONE; 1792 1793 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1794 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1795 if (ret) 1796 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 1797 1798 return ret; 1799 } 1800 1801 static struct eth_dev_ops dpaa2_ethdev_ops = { 1802 .dev_configure = dpaa2_eth_dev_configure, 1803 .dev_start = dpaa2_dev_start, 1804 .dev_stop = dpaa2_dev_stop, 1805 .dev_close = dpaa2_dev_close, 1806 .promiscuous_enable = dpaa2_dev_promiscuous_enable, 1807 .promiscuous_disable = dpaa2_dev_promiscuous_disable, 1808 .allmulticast_enable = dpaa2_dev_allmulticast_enable, 1809 .allmulticast_disable = dpaa2_dev_allmulticast_disable, 1810 .dev_set_link_up = dpaa2_dev_set_link_up, 1811 .dev_set_link_down = dpaa2_dev_set_link_down, 1812 .link_update = dpaa2_dev_link_update, 1813 .stats_get = dpaa2_dev_stats_get, 1814 .xstats_get = dpaa2_dev_xstats_get, 1815 .xstats_get_by_id = dpaa2_xstats_get_by_id, 1816 .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id, 1817 .xstats_get_names = dpaa2_xstats_get_names, 1818 .stats_reset = dpaa2_dev_stats_reset, 1819 .xstats_reset = dpaa2_dev_stats_reset, 1820 .fw_version_get = dpaa2_fw_version_get, 1821 .dev_infos_get = dpaa2_dev_info_get, 1822 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, 1823 .mtu_set = dpaa2_dev_mtu_set, 1824 .vlan_filter_set = dpaa2_vlan_filter_set, 1825 .vlan_offload_set = dpaa2_vlan_offload_set, 1826 .rx_queue_setup = dpaa2_dev_rx_queue_setup, 1827 .rx_queue_release = dpaa2_dev_rx_queue_release, 1828 .tx_queue_setup = dpaa2_dev_tx_queue_setup, 1829 .tx_queue_release = dpaa2_dev_tx_queue_release, 1830 .rx_queue_count = dpaa2_dev_rx_queue_count, 1831 .flow_ctrl_get = dpaa2_flow_ctrl_get, 1832 .flow_ctrl_set = dpaa2_flow_ctrl_set, 1833 .mac_addr_add = dpaa2_dev_add_mac_addr, 1834 .mac_addr_remove = dpaa2_dev_remove_mac_addr, 1835 .mac_addr_set = dpaa2_dev_set_mac_addr, 1836 .rss_hash_update = dpaa2_dev_rss_hash_update, 1837 .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, 1838 }; 1839 1840 /* Populate the mac address from physically available (u-boot/firmware) and/or 1841 * one set by higher layers like MC (restool) etc. 1842 * Returns the table of MAC entries (multiple entries) 1843 */ 1844 static int 1845 populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv, 1846 struct ether_addr *mac_entry) 1847 { 1848 int ret; 1849 struct ether_addr phy_mac, prime_mac; 1850 1851 memset(&phy_mac, 0, sizeof(struct ether_addr)); 1852 memset(&prime_mac, 0, sizeof(struct ether_addr)); 1853 1854 /* Get the physical device MAC address */ 1855 ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, 1856 phy_mac.addr_bytes); 1857 if (ret) { 1858 DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret); 1859 goto cleanup; 1860 } 1861 1862 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, 1863 prime_mac.addr_bytes); 1864 if (ret) { 1865 DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret); 1866 goto cleanup; 1867 } 1868 1869 /* Now that both MAC have been obtained, do: 1870 * if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy 1871 * and return phy 1872 * If empty_mac(phy), return prime. 1873 * if both are empty, create random MAC, set as prime and return 1874 */ 1875 if (!is_zero_ether_addr(&phy_mac)) { 1876 /* If the addresses are not same, overwrite prime */ 1877 if (!is_same_ether_addr(&phy_mac, &prime_mac)) { 1878 ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 1879 priv->token, 1880 phy_mac.addr_bytes); 1881 if (ret) { 1882 DPAA2_PMD_ERR("Unable to set MAC Address: %d", 1883 ret); 1884 goto cleanup; 1885 } 1886 memcpy(&prime_mac, &phy_mac, sizeof(struct ether_addr)); 1887 } 1888 } else if (is_zero_ether_addr(&prime_mac)) { 1889 /* In case phys and prime, both are zero, create random MAC */ 1890 eth_random_addr(prime_mac.addr_bytes); 1891 ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 1892 priv->token, 1893 prime_mac.addr_bytes); 1894 if (ret) { 1895 DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret); 1896 goto cleanup; 1897 } 1898 } 1899 1900 /* prime_mac the final MAC address */ 1901 memcpy(mac_entry, &prime_mac, sizeof(struct ether_addr)); 1902 return 0; 1903 1904 cleanup: 1905 return -1; 1906 } 1907 1908 static int 1909 dpaa2_dev_init(struct rte_eth_dev *eth_dev) 1910 { 1911 struct rte_device *dev = eth_dev->device; 1912 struct rte_dpaa2_device *dpaa2_dev; 1913 struct fsl_mc_io *dpni_dev; 1914 struct dpni_attr attr; 1915 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1916 struct dpni_buffer_layout layout; 1917 int ret, hw_id; 1918 1919 PMD_INIT_FUNC_TRACE(); 1920 1921 /* For secondary processes, the primary has done all the work */ 1922 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1923 return 0; 1924 1925 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 1926 1927 hw_id = dpaa2_dev->object_id; 1928 1929 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); 1930 if (!dpni_dev) { 1931 DPAA2_PMD_ERR("Memory allocation failed for dpni device"); 1932 return -1; 1933 } 1934 1935 dpni_dev->regs = rte_mcp_ptr_list[0]; 1936 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); 1937 if (ret) { 1938 DPAA2_PMD_ERR( 1939 "Failure in opening dpni@%d with err code %d", 1940 hw_id, ret); 1941 rte_free(dpni_dev); 1942 return -1; 1943 } 1944 1945 /* Clean the device first */ 1946 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); 1947 if (ret) { 1948 DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d", 1949 hw_id, ret); 1950 goto init_err; 1951 } 1952 1953 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); 1954 if (ret) { 1955 DPAA2_PMD_ERR( 1956 "Failure in get dpni@%d attribute, err code %d", 1957 hw_id, ret); 1958 goto init_err; 1959 } 1960 1961 priv->num_rx_tc = attr.num_rx_tcs; 1962 1963 /* Resetting the "num_rx_queues" to equal number of queues in first TC 1964 * as only one TC is supported on Rx Side. Once Multiple TCs will be 1965 * in use for Rx processing then this will be changed or removed. 1966 */ 1967 priv->nb_rx_queues = attr.num_queues; 1968 1969 /* Using number of TX queues as number of TX TCs */ 1970 priv->nb_tx_queues = attr.num_tx_tcs; 1971 1972 DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", 1973 priv->num_rx_tc, priv->nb_rx_queues, 1974 priv->nb_tx_queues); 1975 1976 priv->hw = dpni_dev; 1977 priv->hw_id = hw_id; 1978 priv->options = attr.options; 1979 priv->max_mac_filters = attr.mac_filter_entries; 1980 priv->max_vlan_filters = attr.vlan_filter_entries; 1981 priv->flags = 0; 1982 1983 /* Allocate memory for hardware structure for queues */ 1984 ret = dpaa2_alloc_rx_tx_queues(eth_dev); 1985 if (ret) { 1986 DPAA2_PMD_ERR("Queue allocation Failed"); 1987 goto init_err; 1988 } 1989 1990 /* Allocate memory for storing MAC addresses. 1991 * Table of mac_filter_entries size is allocated so that RTE ether lib 1992 * can add MAC entries when rte_eth_dev_mac_addr_add is called. 1993 */ 1994 eth_dev->data->mac_addrs = rte_zmalloc("dpni", 1995 ETHER_ADDR_LEN * attr.mac_filter_entries, 0); 1996 if (eth_dev->data->mac_addrs == NULL) { 1997 DPAA2_PMD_ERR( 1998 "Failed to allocate %d bytes needed to store MAC addresses", 1999 ETHER_ADDR_LEN * attr.mac_filter_entries); 2000 ret = -ENOMEM; 2001 goto init_err; 2002 } 2003 2004 ret = populate_mac_addr(dpni_dev, priv, ð_dev->data->mac_addrs[0]); 2005 if (ret) { 2006 DPAA2_PMD_ERR("Unable to fetch MAC Address for device"); 2007 rte_free(eth_dev->data->mac_addrs); 2008 eth_dev->data->mac_addrs = NULL; 2009 goto init_err; 2010 } 2011 2012 /* ... tx buffer layout ... */ 2013 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 2014 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 2015 layout.pass_frame_status = 1; 2016 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 2017 DPNI_QUEUE_TX, &layout); 2018 if (ret) { 2019 DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret); 2020 goto init_err; 2021 } 2022 2023 /* ... tx-conf and error buffer layout ... */ 2024 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 2025 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 2026 layout.pass_frame_status = 1; 2027 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 2028 DPNI_QUEUE_TX_CONFIRM, &layout); 2029 if (ret) { 2030 DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout", 2031 ret); 2032 goto init_err; 2033 } 2034 2035 eth_dev->dev_ops = &dpaa2_ethdev_ops; 2036 2037 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 2038 eth_dev->tx_pkt_burst = dpaa2_dev_tx; 2039 2040 RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); 2041 return 0; 2042 init_err: 2043 dpaa2_dev_uninit(eth_dev); 2044 return ret; 2045 } 2046 2047 static int 2048 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) 2049 { 2050 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 2051 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 2052 int ret; 2053 2054 PMD_INIT_FUNC_TRACE(); 2055 2056 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2057 return 0; 2058 2059 if (!dpni) { 2060 DPAA2_PMD_WARN("Already closed or not started"); 2061 return -1; 2062 } 2063 2064 dpaa2_dev_close(eth_dev); 2065 2066 dpaa2_free_rx_tx_queues(eth_dev); 2067 2068 /* free memory for storing MAC addresses */ 2069 if (eth_dev->data->mac_addrs) { 2070 rte_free(eth_dev->data->mac_addrs); 2071 eth_dev->data->mac_addrs = NULL; 2072 } 2073 2074 /* Close the device at underlying layer*/ 2075 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); 2076 if (ret) { 2077 DPAA2_PMD_ERR( 2078 "Failure closing dpni device with err code %d", 2079 ret); 2080 } 2081 2082 /* Free the allocated memory for ethernet private data and dpni*/ 2083 priv->hw = NULL; 2084 rte_free(dpni); 2085 2086 eth_dev->dev_ops = NULL; 2087 eth_dev->rx_pkt_burst = NULL; 2088 eth_dev->tx_pkt_burst = NULL; 2089 2090 DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name); 2091 return 0; 2092 } 2093 2094 static int 2095 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, 2096 struct rte_dpaa2_device *dpaa2_dev) 2097 { 2098 struct rte_eth_dev *eth_dev; 2099 int diag; 2100 2101 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 2102 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); 2103 if (!eth_dev) 2104 return -ENODEV; 2105 eth_dev->data->dev_private = rte_zmalloc( 2106 "ethdev private structure", 2107 sizeof(struct dpaa2_dev_priv), 2108 RTE_CACHE_LINE_SIZE); 2109 if (eth_dev->data->dev_private == NULL) { 2110 DPAA2_PMD_CRIT( 2111 "Unable to allocate memory for private data"); 2112 rte_eth_dev_release_port(eth_dev); 2113 return -ENOMEM; 2114 } 2115 } else { 2116 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); 2117 if (!eth_dev) 2118 return -ENODEV; 2119 } 2120 2121 eth_dev->device = &dpaa2_dev->device; 2122 2123 dpaa2_dev->eth_dev = eth_dev; 2124 eth_dev->data->rx_mbuf_alloc_failed = 0; 2125 2126 if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC) 2127 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 2128 2129 /* Invoke PMD device initialization function */ 2130 diag = dpaa2_dev_init(eth_dev); 2131 if (diag == 0) { 2132 rte_eth_dev_probing_finish(eth_dev); 2133 return 0; 2134 } 2135 2136 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 2137 rte_free(eth_dev->data->dev_private); 2138 rte_eth_dev_release_port(eth_dev); 2139 return diag; 2140 } 2141 2142 static int 2143 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) 2144 { 2145 struct rte_eth_dev *eth_dev; 2146 2147 eth_dev = dpaa2_dev->eth_dev; 2148 dpaa2_dev_uninit(eth_dev); 2149 2150 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 2151 rte_free(eth_dev->data->dev_private); 2152 rte_eth_dev_release_port(eth_dev); 2153 2154 return 0; 2155 } 2156 2157 static struct rte_dpaa2_driver rte_dpaa2_pmd = { 2158 .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA, 2159 .drv_type = DPAA2_ETH, 2160 .probe = rte_dpaa2_probe, 2161 .remove = rte_dpaa2_remove, 2162 }; 2163 2164 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); 2165 2166 RTE_INIT(dpaa2_pmd_init_log) 2167 { 2168 dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2"); 2169 if (dpaa2_logtype_pmd >= 0) 2170 rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE); 2171 } 2172