1 /* * SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 11 #include <rte_mbuf.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_memcpy.h> 15 #include <rte_string_fns.h> 16 #include <rte_cycles.h> 17 #include <rte_kvargs.h> 18 #include <rte_dev.h> 19 #include <rte_fslmc.h> 20 21 #include "dpaa2_pmd_logs.h" 22 #include <fslmc_vfio.h> 23 #include <dpaa2_hw_pvt.h> 24 #include <dpaa2_hw_mempool.h> 25 #include <dpaa2_hw_dpio.h> 26 #include <mc/fsl_dpmng.h> 27 #include "dpaa2_ethdev.h" 28 #include <fsl_qbman_debug.h> 29 30 /* Supported Rx offloads */ 31 static uint64_t dev_rx_offloads_sup = 32 DEV_RX_OFFLOAD_VLAN_STRIP | 33 DEV_RX_OFFLOAD_IPV4_CKSUM | 34 DEV_RX_OFFLOAD_UDP_CKSUM | 35 DEV_RX_OFFLOAD_TCP_CKSUM | 36 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 37 DEV_RX_OFFLOAD_VLAN_FILTER | 38 DEV_RX_OFFLOAD_JUMBO_FRAME; 39 40 /* Rx offloads which cannot be disabled */ 41 static uint64_t dev_rx_offloads_nodis = 42 DEV_RX_OFFLOAD_CRC_STRIP | 43 DEV_RX_OFFLOAD_SCATTER; 44 45 /* Supported Tx offloads */ 46 static uint64_t dev_tx_offloads_sup = 47 DEV_TX_OFFLOAD_VLAN_INSERT | 48 DEV_TX_OFFLOAD_IPV4_CKSUM | 49 DEV_TX_OFFLOAD_UDP_CKSUM | 50 DEV_TX_OFFLOAD_TCP_CKSUM | 51 DEV_TX_OFFLOAD_SCTP_CKSUM | 52 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 53 54 /* Tx offloads which cannot be disabled */ 55 static uint64_t dev_tx_offloads_nodis = 56 DEV_TX_OFFLOAD_MULTI_SEGS | 57 DEV_TX_OFFLOAD_MT_LOCKFREE | 58 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 59 60 struct rte_dpaa2_xstats_name_off { 61 char name[RTE_ETH_XSTATS_NAME_SIZE]; 62 uint8_t page_id; /* dpni statistics page id */ 63 uint8_t stats_id; /* stats id in the given page */ 64 }; 65 66 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { 67 {"ingress_multicast_frames", 0, 2}, 68 {"ingress_multicast_bytes", 0, 3}, 69 {"ingress_broadcast_frames", 0, 4}, 70 {"ingress_broadcast_bytes", 0, 5}, 71 {"egress_multicast_frames", 1, 2}, 72 {"egress_multicast_bytes", 1, 3}, 73 {"egress_broadcast_frames", 1, 4}, 74 {"egress_broadcast_bytes", 1, 5}, 75 {"ingress_filtered_frames", 2, 0}, 76 {"ingress_discarded_frames", 2, 1}, 77 {"ingress_nobuffer_discards", 2, 2}, 78 {"egress_discarded_frames", 2, 3}, 79 {"egress_confirmed_frames", 2, 4}, 80 }; 81 82 static struct rte_dpaa2_driver rte_dpaa2_pmd; 83 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev); 84 static int dpaa2_dev_link_update(struct rte_eth_dev *dev, 85 int wait_to_complete); 86 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); 87 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); 88 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 89 90 int dpaa2_logtype_pmd; 91 92 static int 93 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 94 { 95 int ret; 96 struct dpaa2_dev_priv *priv = dev->data->dev_private; 97 struct fsl_mc_io *dpni = priv->hw; 98 99 PMD_INIT_FUNC_TRACE(); 100 101 if (dpni == NULL) { 102 DPAA2_PMD_ERR("dpni is NULL"); 103 return -1; 104 } 105 106 if (on) 107 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, 108 priv->token, vlan_id); 109 else 110 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, 111 priv->token, vlan_id); 112 113 if (ret < 0) 114 DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d", 115 ret, vlan_id, priv->hw_id); 116 117 return ret; 118 } 119 120 static int 121 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) 122 { 123 struct dpaa2_dev_priv *priv = dev->data->dev_private; 124 struct fsl_mc_io *dpni = priv->hw; 125 int ret; 126 127 PMD_INIT_FUNC_TRACE(); 128 129 if (mask & ETH_VLAN_FILTER_MASK) { 130 /* VLAN Filter not avaialble */ 131 if (!priv->max_vlan_filters) { 132 DPAA2_PMD_INFO("VLAN filter not available"); 133 goto next_mask; 134 } 135 136 if (dev->data->dev_conf.rxmode.offloads & 137 DEV_RX_OFFLOAD_VLAN_FILTER) 138 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 139 priv->token, true); 140 else 141 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 142 priv->token, false); 143 if (ret < 0) 144 DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret); 145 } 146 next_mask: 147 if (mask & ETH_VLAN_EXTEND_MASK) { 148 if (dev->data->dev_conf.rxmode.offloads & 149 DEV_RX_OFFLOAD_VLAN_EXTEND) 150 DPAA2_PMD_INFO("VLAN extend offload not supported"); 151 } 152 153 return 0; 154 } 155 156 static int 157 dpaa2_fw_version_get(struct rte_eth_dev *dev, 158 char *fw_version, 159 size_t fw_size) 160 { 161 int ret; 162 struct dpaa2_dev_priv *priv = dev->data->dev_private; 163 struct fsl_mc_io *dpni = priv->hw; 164 struct mc_soc_version mc_plat_info = {0}; 165 struct mc_version mc_ver_info = {0}; 166 167 PMD_INIT_FUNC_TRACE(); 168 169 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 170 DPAA2_PMD_WARN("\tmc_get_soc_version failed"); 171 172 if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) 173 DPAA2_PMD_WARN("\tmc_get_version failed"); 174 175 ret = snprintf(fw_version, fw_size, 176 "%x-%d.%d.%d", 177 mc_plat_info.svr, 178 mc_ver_info.major, 179 mc_ver_info.minor, 180 mc_ver_info.revision); 181 182 ret += 1; /* add the size of '\0' */ 183 if (fw_size < (uint32_t)ret) 184 return ret; 185 else 186 return 0; 187 } 188 189 static void 190 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 191 { 192 struct dpaa2_dev_priv *priv = dev->data->dev_private; 193 194 PMD_INIT_FUNC_TRACE(); 195 196 dev_info->if_index = priv->hw_id; 197 198 dev_info->max_mac_addrs = priv->max_mac_filters; 199 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; 200 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; 201 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; 202 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; 203 dev_info->rx_offload_capa = dev_rx_offloads_sup | 204 dev_rx_offloads_nodis; 205 dev_info->tx_offload_capa = dev_tx_offloads_sup | 206 dev_tx_offloads_nodis; 207 dev_info->speed_capa = ETH_LINK_SPEED_1G | 208 ETH_LINK_SPEED_2_5G | 209 ETH_LINK_SPEED_10G; 210 211 dev_info->max_hash_mac_addrs = 0; 212 dev_info->max_vfs = 0; 213 dev_info->max_vmdq_pools = ETH_16_POOLS; 214 dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL; 215 } 216 217 static int 218 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) 219 { 220 struct dpaa2_dev_priv *priv = dev->data->dev_private; 221 uint16_t dist_idx; 222 uint32_t vq_id; 223 struct dpaa2_queue *mc_q, *mcq; 224 uint32_t tot_queues; 225 int i; 226 struct dpaa2_queue *dpaa2_q; 227 228 PMD_INIT_FUNC_TRACE(); 229 230 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; 231 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, 232 RTE_CACHE_LINE_SIZE); 233 if (!mc_q) { 234 DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues"); 235 return -1; 236 } 237 238 for (i = 0; i < priv->nb_rx_queues; i++) { 239 mc_q->dev = dev; 240 priv->rx_vq[i] = mc_q++; 241 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 242 dpaa2_q->q_storage = rte_malloc("dq_storage", 243 sizeof(struct queue_storage_info_t), 244 RTE_CACHE_LINE_SIZE); 245 if (!dpaa2_q->q_storage) 246 goto fail; 247 248 memset(dpaa2_q->q_storage, 0, 249 sizeof(struct queue_storage_info_t)); 250 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) 251 goto fail; 252 } 253 254 for (i = 0; i < priv->nb_tx_queues; i++) { 255 mc_q->dev = dev; 256 mc_q->flow_id = 0xffff; 257 priv->tx_vq[i] = mc_q++; 258 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 259 dpaa2_q->cscn = rte_malloc(NULL, 260 sizeof(struct qbman_result), 16); 261 if (!dpaa2_q->cscn) 262 goto fail_tx; 263 } 264 265 vq_id = 0; 266 for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { 267 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; 268 mcq->tc_index = DPAA2_DEF_TC; 269 mcq->flow_id = dist_idx; 270 vq_id++; 271 } 272 273 return 0; 274 fail_tx: 275 i -= 1; 276 while (i >= 0) { 277 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 278 rte_free(dpaa2_q->cscn); 279 priv->tx_vq[i--] = NULL; 280 } 281 i = priv->nb_rx_queues; 282 fail: 283 i -= 1; 284 mc_q = priv->rx_vq[0]; 285 while (i >= 0) { 286 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 287 dpaa2_free_dq_storage(dpaa2_q->q_storage); 288 rte_free(dpaa2_q->q_storage); 289 priv->rx_vq[i--] = NULL; 290 } 291 rte_free(mc_q); 292 return -1; 293 } 294 295 static int 296 dpaa2_eth_dev_configure(struct rte_eth_dev *dev) 297 { 298 struct dpaa2_dev_priv *priv = dev->data->dev_private; 299 struct fsl_mc_io *dpni = priv->hw; 300 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 301 uint64_t rx_offloads = eth_conf->rxmode.offloads; 302 uint64_t tx_offloads = eth_conf->txmode.offloads; 303 int rx_l3_csum_offload = false; 304 int rx_l4_csum_offload = false; 305 int tx_l3_csum_offload = false; 306 int tx_l4_csum_offload = false; 307 int ret; 308 309 PMD_INIT_FUNC_TRACE(); 310 311 /* Rx offloads validation */ 312 if (dev_rx_offloads_nodis & ~rx_offloads) { 313 DPAA2_PMD_WARN( 314 "Rx offloads non configurable - requested 0x%" PRIx64 315 " ignored 0x%" PRIx64, 316 rx_offloads, dev_rx_offloads_nodis); 317 } 318 319 /* Tx offloads validation */ 320 if (dev_tx_offloads_nodis & ~tx_offloads) { 321 DPAA2_PMD_WARN( 322 "Tx offloads non configurable - requested 0x%" PRIx64 323 " ignored 0x%" PRIx64, 324 tx_offloads, dev_tx_offloads_nodis); 325 } 326 327 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 328 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { 329 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, 330 priv->token, eth_conf->rxmode.max_rx_pkt_len); 331 if (ret) { 332 DPAA2_PMD_ERR( 333 "Unable to set mtu. check config"); 334 return ret; 335 } 336 } else { 337 return -1; 338 } 339 } 340 341 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { 342 ret = dpaa2_setup_flow_dist(dev, 343 eth_conf->rx_adv_conf.rss_conf.rss_hf); 344 if (ret) { 345 DPAA2_PMD_ERR("Unable to set flow distribution." 346 "Check queue config"); 347 return ret; 348 } 349 } 350 351 if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) 352 rx_l3_csum_offload = true; 353 354 if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) || 355 (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM)) 356 rx_l4_csum_offload = true; 357 358 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 359 DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload); 360 if (ret) { 361 DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret); 362 return ret; 363 } 364 365 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 366 DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload); 367 if (ret) { 368 DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret); 369 return ret; 370 } 371 372 if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) 373 tx_l3_csum_offload = true; 374 375 if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) || 376 (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) || 377 (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)) 378 tx_l4_csum_offload = true; 379 380 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 381 DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload); 382 if (ret) { 383 DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret); 384 return ret; 385 } 386 387 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 388 DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload); 389 if (ret) { 390 DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret); 391 return ret; 392 } 393 394 /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in 395 * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC] 396 * to 0 for LS2 in the hardware thus disabling data/annotation 397 * stashing. For LX2 this is fixed in hardware and thus hash result and 398 * parse results can be received in FD using this option. 399 */ 400 if (dpaa2_svr_family == SVR_LX2160A) { 401 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 402 DPNI_FLCTYPE_HASH, true); 403 if (ret) { 404 DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret); 405 return ret; 406 } 407 } 408 409 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); 410 411 /* update the current status */ 412 dpaa2_dev_link_update(dev, 0); 413 414 return 0; 415 } 416 417 /* Function to setup RX flow information. It contains traffic class ID, 418 * flow ID, destination configuration etc. 419 */ 420 static int 421 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, 422 uint16_t rx_queue_id, 423 uint16_t nb_rx_desc __rte_unused, 424 unsigned int socket_id __rte_unused, 425 const struct rte_eth_rxconf *rx_conf __rte_unused, 426 struct rte_mempool *mb_pool) 427 { 428 struct dpaa2_dev_priv *priv = dev->data->dev_private; 429 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 430 struct dpaa2_queue *dpaa2_q; 431 struct dpni_queue cfg; 432 uint8_t options = 0; 433 uint8_t flow_id; 434 uint32_t bpid; 435 int ret; 436 437 PMD_INIT_FUNC_TRACE(); 438 439 DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p", 440 dev, rx_queue_id, mb_pool, rx_conf); 441 442 if (!priv->bp_list || priv->bp_list->mp != mb_pool) { 443 bpid = mempool_to_bpid(mb_pool); 444 ret = dpaa2_attach_bp_list(priv, 445 rte_dpaa2_bpid_info[bpid].bp_list); 446 if (ret) 447 return ret; 448 } 449 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 450 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ 451 452 /*Get the flow id from given VQ id*/ 453 flow_id = rx_queue_id % priv->nb_rx_queues; 454 memset(&cfg, 0, sizeof(struct dpni_queue)); 455 456 options = options | DPNI_QUEUE_OPT_USER_CTX; 457 cfg.user_context = (size_t)(dpaa2_q); 458 459 /*if ls2088 or rev2 device, enable the stashing */ 460 461 if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) { 462 options |= DPNI_QUEUE_OPT_FLC; 463 cfg.flc.stash_control = true; 464 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0; 465 /* 00 00 00 - last 6 bit represent annotation, context stashing, 466 * data stashing setting 01 01 00 (0x14) 467 * (in following order ->DS AS CS) 468 * to enable 1 line data, 1 line annotation. 469 * For LX2, this setting should be 01 00 00 (0x10) 470 */ 471 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A) 472 cfg.flc.value |= 0x10; 473 else 474 cfg.flc.value |= 0x14; 475 } 476 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, 477 dpaa2_q->tc_index, flow_id, options, &cfg); 478 if (ret) { 479 DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret); 480 return -1; 481 } 482 483 if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { 484 struct dpni_taildrop taildrop; 485 486 taildrop.enable = 1; 487 /*enabling per rx queue congestion control */ 488 taildrop.threshold = CONG_THRESHOLD_RX_Q; 489 taildrop.units = DPNI_CONGESTION_UNIT_BYTES; 490 taildrop.oal = CONG_RX_OAL; 491 DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d", 492 rx_queue_id); 493 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 494 DPNI_CP_QUEUE, DPNI_QUEUE_RX, 495 dpaa2_q->tc_index, flow_id, &taildrop); 496 if (ret) { 497 DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", 498 ret); 499 return -1; 500 } 501 } 502 503 dev->data->rx_queues[rx_queue_id] = dpaa2_q; 504 return 0; 505 } 506 507 static int 508 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, 509 uint16_t tx_queue_id, 510 uint16_t nb_tx_desc __rte_unused, 511 unsigned int socket_id __rte_unused, 512 const struct rte_eth_txconf *tx_conf __rte_unused) 513 { 514 struct dpaa2_dev_priv *priv = dev->data->dev_private; 515 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) 516 priv->tx_vq[tx_queue_id]; 517 struct fsl_mc_io *dpni = priv->hw; 518 struct dpni_queue tx_conf_cfg; 519 struct dpni_queue tx_flow_cfg; 520 uint8_t options = 0, flow_id; 521 uint32_t tc_id; 522 int ret; 523 524 PMD_INIT_FUNC_TRACE(); 525 526 /* Return if queue already configured */ 527 if (dpaa2_q->flow_id != 0xffff) { 528 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 529 return 0; 530 } 531 532 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); 533 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); 534 535 tc_id = tx_queue_id; 536 flow_id = 0; 537 538 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, 539 tc_id, flow_id, options, &tx_flow_cfg); 540 if (ret) { 541 DPAA2_PMD_ERR("Error in setting the tx flow: " 542 "tc_id=%d, flow=%d err=%d", 543 tc_id, flow_id, ret); 544 return -1; 545 } 546 547 dpaa2_q->flow_id = flow_id; 548 549 if (tx_queue_id == 0) { 550 /*Set tx-conf and error configuration*/ 551 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 552 priv->token, 553 DPNI_CONF_DISABLE); 554 if (ret) { 555 DPAA2_PMD_ERR("Error in set tx conf mode settings: " 556 "err=%d", ret); 557 return -1; 558 } 559 } 560 dpaa2_q->tc_index = tc_id; 561 562 if (!(priv->flags & DPAA2_TX_CGR_OFF)) { 563 struct dpni_congestion_notification_cfg cong_notif_cfg; 564 565 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; 566 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; 567 /* Notify that the queue is not congested when the data in 568 * the queue is below this thershold. 569 */ 570 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; 571 cong_notif_cfg.message_ctx = 0; 572 cong_notif_cfg.message_iova = (size_t)dpaa2_q->cscn; 573 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; 574 cong_notif_cfg.notification_mode = 575 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | 576 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | 577 DPNI_CONG_OPT_COHERENT_WRITE; 578 579 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, 580 priv->token, 581 DPNI_QUEUE_TX, 582 tc_id, 583 &cong_notif_cfg); 584 if (ret) { 585 DPAA2_PMD_ERR( 586 "Error in setting tx congestion notification: " 587 "err=%d", ret); 588 return -ret; 589 } 590 } 591 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 592 return 0; 593 } 594 595 static void 596 dpaa2_dev_rx_queue_release(void *q __rte_unused) 597 { 598 PMD_INIT_FUNC_TRACE(); 599 } 600 601 static void 602 dpaa2_dev_tx_queue_release(void *q __rte_unused) 603 { 604 PMD_INIT_FUNC_TRACE(); 605 } 606 607 static uint32_t 608 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 609 { 610 int32_t ret; 611 struct dpaa2_dev_priv *priv = dev->data->dev_private; 612 struct dpaa2_queue *dpaa2_q; 613 struct qbman_swp *swp; 614 struct qbman_fq_query_np_rslt state; 615 uint32_t frame_cnt = 0; 616 617 PMD_INIT_FUNC_TRACE(); 618 619 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 620 ret = dpaa2_affine_qbman_swp(); 621 if (ret) { 622 DPAA2_PMD_ERR("Failure in affining portal"); 623 return -EINVAL; 624 } 625 } 626 swp = DPAA2_PER_LCORE_PORTAL; 627 628 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 629 630 if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) { 631 frame_cnt = qbman_fq_state_frame_count(&state); 632 DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u", 633 rx_queue_id, frame_cnt); 634 } 635 return frame_cnt; 636 } 637 638 static const uint32_t * 639 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) 640 { 641 static const uint32_t ptypes[] = { 642 /*todo -= add more types */ 643 RTE_PTYPE_L2_ETHER, 644 RTE_PTYPE_L3_IPV4, 645 RTE_PTYPE_L3_IPV4_EXT, 646 RTE_PTYPE_L3_IPV6, 647 RTE_PTYPE_L3_IPV6_EXT, 648 RTE_PTYPE_L4_TCP, 649 RTE_PTYPE_L4_UDP, 650 RTE_PTYPE_L4_SCTP, 651 RTE_PTYPE_L4_ICMP, 652 RTE_PTYPE_UNKNOWN 653 }; 654 655 if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx) 656 return ptypes; 657 return NULL; 658 } 659 660 /** 661 * Dpaa2 link Interrupt handler 662 * 663 * @param param 664 * The address of parameter (struct rte_eth_dev *) regsitered before. 665 * 666 * @return 667 * void 668 */ 669 static void 670 dpaa2_interrupt_handler(void *param) 671 { 672 struct rte_eth_dev *dev = param; 673 struct dpaa2_dev_priv *priv = dev->data->dev_private; 674 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 675 int ret; 676 int irq_index = DPNI_IRQ_INDEX; 677 unsigned int status = 0, clear = 0; 678 679 PMD_INIT_FUNC_TRACE(); 680 681 if (dpni == NULL) { 682 DPAA2_PMD_ERR("dpni is NULL"); 683 return; 684 } 685 686 ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, 687 irq_index, &status); 688 if (unlikely(ret)) { 689 DPAA2_PMD_ERR("Can't get irq status (err %d)", ret); 690 clear = 0xffffffff; 691 goto out; 692 } 693 694 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { 695 clear = DPNI_IRQ_EVENT_LINK_CHANGED; 696 dpaa2_dev_link_update(dev, 0); 697 /* calling all the apps registered for link status event */ 698 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 699 NULL); 700 } 701 out: 702 ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, 703 irq_index, clear); 704 if (unlikely(ret)) 705 DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret); 706 } 707 708 static int 709 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) 710 { 711 int err = 0; 712 struct dpaa2_dev_priv *priv = dev->data->dev_private; 713 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 714 int irq_index = DPNI_IRQ_INDEX; 715 unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; 716 717 PMD_INIT_FUNC_TRACE(); 718 719 err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, 720 irq_index, mask); 721 if (err < 0) { 722 DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err, 723 strerror(-err)); 724 return err; 725 } 726 727 err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, 728 irq_index, enable); 729 if (err < 0) 730 DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err, 731 strerror(-err)); 732 733 return err; 734 } 735 736 static int 737 dpaa2_dev_start(struct rte_eth_dev *dev) 738 { 739 struct rte_device *rdev = dev->device; 740 struct rte_dpaa2_device *dpaa2_dev; 741 struct rte_eth_dev_data *data = dev->data; 742 struct dpaa2_dev_priv *priv = data->dev_private; 743 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 744 struct dpni_queue cfg; 745 struct dpni_error_cfg err_cfg; 746 uint16_t qdid; 747 struct dpni_queue_id qid; 748 struct dpaa2_queue *dpaa2_q; 749 int ret, i; 750 struct rte_intr_handle *intr_handle; 751 752 dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); 753 intr_handle = &dpaa2_dev->intr_handle; 754 755 PMD_INIT_FUNC_TRACE(); 756 757 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 758 if (ret) { 759 DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d", 760 priv->hw_id, ret); 761 return ret; 762 } 763 764 /* Power up the phy. Needed to make the link go UP */ 765 dpaa2_dev_set_link_up(dev); 766 767 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, 768 DPNI_QUEUE_TX, &qdid); 769 if (ret) { 770 DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret); 771 return ret; 772 } 773 priv->qdid = qdid; 774 775 for (i = 0; i < data->nb_rx_queues; i++) { 776 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; 777 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 778 DPNI_QUEUE_RX, dpaa2_q->tc_index, 779 dpaa2_q->flow_id, &cfg, &qid); 780 if (ret) { 781 DPAA2_PMD_ERR("Error in getting flow information: " 782 "err=%d", ret); 783 return ret; 784 } 785 dpaa2_q->fqid = qid.fqid; 786 } 787 788 /*checksum errors, send them to normal path and set it in annotation */ 789 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; 790 791 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; 792 err_cfg.set_frame_annotation = true; 793 794 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, 795 priv->token, &err_cfg); 796 if (ret) { 797 DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d", 798 ret); 799 return ret; 800 } 801 802 /* if the interrupts were configured on this devices*/ 803 if (intr_handle && (intr_handle->fd) && 804 (dev->data->dev_conf.intr_conf.lsc != 0)) { 805 /* Registering LSC interrupt handler */ 806 rte_intr_callback_register(intr_handle, 807 dpaa2_interrupt_handler, 808 (void *)dev); 809 810 /* enable vfio intr/eventfd mapping 811 * Interrupt index 0 is required, so we can not use 812 * rte_intr_enable. 813 */ 814 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX); 815 816 /* enable dpni_irqs */ 817 dpaa2_eth_setup_irqs(dev, 1); 818 } 819 820 return 0; 821 } 822 823 /** 824 * This routine disables all traffic on the adapter by issuing a 825 * global reset on the MAC. 826 */ 827 static void 828 dpaa2_dev_stop(struct rte_eth_dev *dev) 829 { 830 struct dpaa2_dev_priv *priv = dev->data->dev_private; 831 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 832 int ret; 833 struct rte_eth_link link; 834 struct rte_intr_handle *intr_handle = dev->intr_handle; 835 836 PMD_INIT_FUNC_TRACE(); 837 838 /* reset interrupt callback */ 839 if (intr_handle && (intr_handle->fd) && 840 (dev->data->dev_conf.intr_conf.lsc != 0)) { 841 /*disable dpni irqs */ 842 dpaa2_eth_setup_irqs(dev, 0); 843 844 /* disable vfio intr before callback unregister */ 845 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX); 846 847 /* Unregistering LSC interrupt handler */ 848 rte_intr_callback_unregister(intr_handle, 849 dpaa2_interrupt_handler, 850 (void *)dev); 851 } 852 853 dpaa2_dev_set_link_down(dev); 854 855 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); 856 if (ret) { 857 DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev", 858 ret, priv->hw_id); 859 return; 860 } 861 862 /* clear the recorded link status */ 863 memset(&link, 0, sizeof(link)); 864 rte_eth_linkstatus_set(dev, &link); 865 } 866 867 static void 868 dpaa2_dev_close(struct rte_eth_dev *dev) 869 { 870 struct rte_eth_dev_data *data = dev->data; 871 struct dpaa2_dev_priv *priv = dev->data->dev_private; 872 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 873 int i, ret; 874 struct rte_eth_link link; 875 struct dpaa2_queue *dpaa2_q; 876 877 PMD_INIT_FUNC_TRACE(); 878 879 for (i = 0; i < data->nb_tx_queues; i++) { 880 dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i]; 881 if (!dpaa2_q->cscn) { 882 rte_free(dpaa2_q->cscn); 883 dpaa2_q->cscn = NULL; 884 } 885 } 886 887 /* Clean the device first */ 888 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); 889 if (ret) { 890 DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret); 891 return; 892 } 893 894 memset(&link, 0, sizeof(link)); 895 rte_eth_linkstatus_set(dev, &link); 896 } 897 898 static void 899 dpaa2_dev_promiscuous_enable( 900 struct rte_eth_dev *dev) 901 { 902 int ret; 903 struct dpaa2_dev_priv *priv = dev->data->dev_private; 904 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 905 906 PMD_INIT_FUNC_TRACE(); 907 908 if (dpni == NULL) { 909 DPAA2_PMD_ERR("dpni is NULL"); 910 return; 911 } 912 913 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 914 if (ret < 0) 915 DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret); 916 917 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 918 if (ret < 0) 919 DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret); 920 } 921 922 static void 923 dpaa2_dev_promiscuous_disable( 924 struct rte_eth_dev *dev) 925 { 926 int ret; 927 struct dpaa2_dev_priv *priv = dev->data->dev_private; 928 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 929 930 PMD_INIT_FUNC_TRACE(); 931 932 if (dpni == NULL) { 933 DPAA2_PMD_ERR("dpni is NULL"); 934 return; 935 } 936 937 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 938 if (ret < 0) 939 DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret); 940 941 if (dev->data->all_multicast == 0) { 942 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, 943 priv->token, false); 944 if (ret < 0) 945 DPAA2_PMD_ERR("Unable to disable M promisc mode %d", 946 ret); 947 } 948 } 949 950 static void 951 dpaa2_dev_allmulticast_enable( 952 struct rte_eth_dev *dev) 953 { 954 int ret; 955 struct dpaa2_dev_priv *priv = dev->data->dev_private; 956 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 957 958 PMD_INIT_FUNC_TRACE(); 959 960 if (dpni == NULL) { 961 DPAA2_PMD_ERR("dpni is NULL"); 962 return; 963 } 964 965 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 966 if (ret < 0) 967 DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret); 968 } 969 970 static void 971 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) 972 { 973 int ret; 974 struct dpaa2_dev_priv *priv = dev->data->dev_private; 975 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 976 977 PMD_INIT_FUNC_TRACE(); 978 979 if (dpni == NULL) { 980 DPAA2_PMD_ERR("dpni is NULL"); 981 return; 982 } 983 984 /* must remain on for all promiscuous */ 985 if (dev->data->promiscuous == 1) 986 return; 987 988 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 989 if (ret < 0) 990 DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret); 991 } 992 993 static int 994 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 995 { 996 int ret; 997 struct dpaa2_dev_priv *priv = dev->data->dev_private; 998 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 999 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN 1000 + VLAN_TAG_SIZE; 1001 1002 PMD_INIT_FUNC_TRACE(); 1003 1004 if (dpni == NULL) { 1005 DPAA2_PMD_ERR("dpni is NULL"); 1006 return -EINVAL; 1007 } 1008 1009 /* check that mtu is within the allowed range */ 1010 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) 1011 return -EINVAL; 1012 1013 if (frame_size > ETHER_MAX_LEN) 1014 dev->data->dev_conf.rxmode.offloads &= 1015 DEV_RX_OFFLOAD_JUMBO_FRAME; 1016 else 1017 dev->data->dev_conf.rxmode.offloads &= 1018 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1019 1020 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1021 1022 /* Set the Max Rx frame length as 'mtu' + 1023 * Maximum Ethernet header length 1024 */ 1025 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 1026 frame_size); 1027 if (ret) { 1028 DPAA2_PMD_ERR("Setting the max frame length failed"); 1029 return -1; 1030 } 1031 DPAA2_PMD_INFO("MTU configured for the device: %d", mtu); 1032 return 0; 1033 } 1034 1035 static int 1036 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, 1037 struct ether_addr *addr, 1038 __rte_unused uint32_t index, 1039 __rte_unused uint32_t pool) 1040 { 1041 int ret; 1042 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1043 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1044 1045 PMD_INIT_FUNC_TRACE(); 1046 1047 if (dpni == NULL) { 1048 DPAA2_PMD_ERR("dpni is NULL"); 1049 return -1; 1050 } 1051 1052 ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, 1053 priv->token, addr->addr_bytes); 1054 if (ret) 1055 DPAA2_PMD_ERR( 1056 "error: Adding the MAC ADDR failed: err = %d", ret); 1057 return 0; 1058 } 1059 1060 static void 1061 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, 1062 uint32_t index) 1063 { 1064 int ret; 1065 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1066 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1067 struct rte_eth_dev_data *data = dev->data; 1068 struct ether_addr *macaddr; 1069 1070 PMD_INIT_FUNC_TRACE(); 1071 1072 macaddr = &data->mac_addrs[index]; 1073 1074 if (dpni == NULL) { 1075 DPAA2_PMD_ERR("dpni is NULL"); 1076 return; 1077 } 1078 1079 ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, 1080 priv->token, macaddr->addr_bytes); 1081 if (ret) 1082 DPAA2_PMD_ERR( 1083 "error: Removing the MAC ADDR failed: err = %d", ret); 1084 } 1085 1086 static int 1087 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, 1088 struct ether_addr *addr) 1089 { 1090 int ret; 1091 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1092 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1093 1094 PMD_INIT_FUNC_TRACE(); 1095 1096 if (dpni == NULL) { 1097 DPAA2_PMD_ERR("dpni is NULL"); 1098 return -EINVAL; 1099 } 1100 1101 ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, 1102 priv->token, addr->addr_bytes); 1103 1104 if (ret) 1105 DPAA2_PMD_ERR( 1106 "error: Setting the MAC ADDR failed %d", ret); 1107 1108 return ret; 1109 } 1110 1111 static 1112 int dpaa2_dev_stats_get(struct rte_eth_dev *dev, 1113 struct rte_eth_stats *stats) 1114 { 1115 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1116 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1117 int32_t retcode; 1118 uint8_t page0 = 0, page1 = 1, page2 = 2; 1119 union dpni_statistics value; 1120 1121 memset(&value, 0, sizeof(union dpni_statistics)); 1122 1123 PMD_INIT_FUNC_TRACE(); 1124 1125 if (!dpni) { 1126 DPAA2_PMD_ERR("dpni is NULL"); 1127 return -EINVAL; 1128 } 1129 1130 if (!stats) { 1131 DPAA2_PMD_ERR("stats is NULL"); 1132 return -EINVAL; 1133 } 1134 1135 /*Get Counters from page_0*/ 1136 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1137 page0, 0, &value); 1138 if (retcode) 1139 goto err; 1140 1141 stats->ipackets = value.page_0.ingress_all_frames; 1142 stats->ibytes = value.page_0.ingress_all_bytes; 1143 1144 /*Get Counters from page_1*/ 1145 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1146 page1, 0, &value); 1147 if (retcode) 1148 goto err; 1149 1150 stats->opackets = value.page_1.egress_all_frames; 1151 stats->obytes = value.page_1.egress_all_bytes; 1152 1153 /*Get Counters from page_2*/ 1154 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1155 page2, 0, &value); 1156 if (retcode) 1157 goto err; 1158 1159 /* Ingress drop frame count due to configured rules */ 1160 stats->ierrors = value.page_2.ingress_filtered_frames; 1161 /* Ingress drop frame count due to error */ 1162 stats->ierrors += value.page_2.ingress_discarded_frames; 1163 1164 stats->oerrors = value.page_2.egress_discarded_frames; 1165 stats->imissed = value.page_2.ingress_nobuffer_discards; 1166 1167 return 0; 1168 1169 err: 1170 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 1171 return retcode; 1172 }; 1173 1174 static int 1175 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1176 unsigned int n) 1177 { 1178 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1179 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1180 int32_t retcode; 1181 union dpni_statistics value[3] = {}; 1182 unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); 1183 1184 if (n < num) 1185 return num; 1186 1187 if (xstats == NULL) 1188 return 0; 1189 1190 /* Get Counters from page_0*/ 1191 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1192 0, 0, &value[0]); 1193 if (retcode) 1194 goto err; 1195 1196 /* Get Counters from page_1*/ 1197 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1198 1, 0, &value[1]); 1199 if (retcode) 1200 goto err; 1201 1202 /* Get Counters from page_2*/ 1203 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1204 2, 0, &value[2]); 1205 if (retcode) 1206 goto err; 1207 1208 for (i = 0; i < num; i++) { 1209 xstats[i].id = i; 1210 xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. 1211 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1212 } 1213 return i; 1214 err: 1215 DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode); 1216 return retcode; 1217 } 1218 1219 static int 1220 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1221 struct rte_eth_xstat_name *xstats_names, 1222 unsigned int limit) 1223 { 1224 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1225 1226 if (limit < stat_cnt) 1227 return stat_cnt; 1228 1229 if (xstats_names != NULL) 1230 for (i = 0; i < stat_cnt; i++) 1231 snprintf(xstats_names[i].name, 1232 sizeof(xstats_names[i].name), 1233 "%s", 1234 dpaa2_xstats_strings[i].name); 1235 1236 return stat_cnt; 1237 } 1238 1239 static int 1240 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1241 uint64_t *values, unsigned int n) 1242 { 1243 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1244 uint64_t values_copy[stat_cnt]; 1245 1246 if (!ids) { 1247 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1248 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1249 int32_t retcode; 1250 union dpni_statistics value[3] = {}; 1251 1252 if (n < stat_cnt) 1253 return stat_cnt; 1254 1255 if (!values) 1256 return 0; 1257 1258 /* Get Counters from page_0*/ 1259 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1260 0, 0, &value[0]); 1261 if (retcode) 1262 return 0; 1263 1264 /* Get Counters from page_1*/ 1265 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1266 1, 0, &value[1]); 1267 if (retcode) 1268 return 0; 1269 1270 /* Get Counters from page_2*/ 1271 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1272 2, 0, &value[2]); 1273 if (retcode) 1274 return 0; 1275 1276 for (i = 0; i < stat_cnt; i++) { 1277 values[i] = value[dpaa2_xstats_strings[i].page_id]. 1278 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1279 } 1280 return stat_cnt; 1281 } 1282 1283 dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 1284 1285 for (i = 0; i < n; i++) { 1286 if (ids[i] >= stat_cnt) { 1287 DPAA2_PMD_ERR("xstats id value isn't valid"); 1288 return -1; 1289 } 1290 values[i] = values_copy[ids[i]]; 1291 } 1292 return n; 1293 } 1294 1295 static int 1296 dpaa2_xstats_get_names_by_id( 1297 struct rte_eth_dev *dev, 1298 struct rte_eth_xstat_name *xstats_names, 1299 const uint64_t *ids, 1300 unsigned int limit) 1301 { 1302 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1303 struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 1304 1305 if (!ids) 1306 return dpaa2_xstats_get_names(dev, xstats_names, limit); 1307 1308 dpaa2_xstats_get_names(dev, xstats_names_copy, limit); 1309 1310 for (i = 0; i < limit; i++) { 1311 if (ids[i] >= stat_cnt) { 1312 DPAA2_PMD_ERR("xstats id value isn't valid"); 1313 return -1; 1314 } 1315 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 1316 } 1317 return limit; 1318 } 1319 1320 static void 1321 dpaa2_dev_stats_reset(struct rte_eth_dev *dev) 1322 { 1323 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1324 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1325 int32_t retcode; 1326 1327 PMD_INIT_FUNC_TRACE(); 1328 1329 if (dpni == NULL) { 1330 DPAA2_PMD_ERR("dpni is NULL"); 1331 return; 1332 } 1333 1334 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); 1335 if (retcode) 1336 goto error; 1337 1338 return; 1339 1340 error: 1341 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 1342 return; 1343 }; 1344 1345 /* return 0 means link status changed, -1 means not changed */ 1346 static int 1347 dpaa2_dev_link_update(struct rte_eth_dev *dev, 1348 int wait_to_complete __rte_unused) 1349 { 1350 int ret; 1351 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1352 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1353 struct rte_eth_link link; 1354 struct dpni_link_state state = {0}; 1355 1356 if (dpni == NULL) { 1357 DPAA2_PMD_ERR("dpni is NULL"); 1358 return 0; 1359 } 1360 1361 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1362 if (ret < 0) { 1363 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); 1364 return -1; 1365 } 1366 1367 memset(&link, 0, sizeof(struct rte_eth_link)); 1368 link.link_status = state.up; 1369 link.link_speed = state.rate; 1370 1371 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) 1372 link.link_duplex = ETH_LINK_HALF_DUPLEX; 1373 else 1374 link.link_duplex = ETH_LINK_FULL_DUPLEX; 1375 1376 ret = rte_eth_linkstatus_set(dev, &link); 1377 if (ret == -1) 1378 DPAA2_PMD_DEBUG("No change in status"); 1379 else 1380 DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id, 1381 link.link_status ? "Up" : "Down"); 1382 1383 return ret; 1384 } 1385 1386 /** 1387 * Toggle the DPNI to enable, if not already enabled. 1388 * This is not strictly PHY up/down - it is more of logical toggling. 1389 */ 1390 static int 1391 dpaa2_dev_set_link_up(struct rte_eth_dev *dev) 1392 { 1393 int ret = -EINVAL; 1394 struct dpaa2_dev_priv *priv; 1395 struct fsl_mc_io *dpni; 1396 int en = 0; 1397 struct dpni_link_state state = {0}; 1398 1399 priv = dev->data->dev_private; 1400 dpni = (struct fsl_mc_io *)priv->hw; 1401 1402 if (dpni == NULL) { 1403 DPAA2_PMD_ERR("dpni is NULL"); 1404 return ret; 1405 } 1406 1407 /* Check if DPNI is currently enabled */ 1408 ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); 1409 if (ret) { 1410 /* Unable to obtain dpni status; Not continuing */ 1411 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1412 return -EINVAL; 1413 } 1414 1415 /* Enable link if not already enabled */ 1416 if (!en) { 1417 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 1418 if (ret) { 1419 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1420 return -EINVAL; 1421 } 1422 } 1423 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1424 if (ret < 0) { 1425 DPAA2_PMD_ERR("Unable to get link state (%d)", ret); 1426 return -1; 1427 } 1428 1429 /* changing tx burst function to start enqueues */ 1430 dev->tx_pkt_burst = dpaa2_dev_tx; 1431 dev->data->dev_link.link_status = state.up; 1432 1433 if (state.up) 1434 DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id); 1435 else 1436 DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id); 1437 return ret; 1438 } 1439 1440 /** 1441 * Toggle the DPNI to disable, if not already disabled. 1442 * This is not strictly PHY up/down - it is more of logical toggling. 1443 */ 1444 static int 1445 dpaa2_dev_set_link_down(struct rte_eth_dev *dev) 1446 { 1447 int ret = -EINVAL; 1448 struct dpaa2_dev_priv *priv; 1449 struct fsl_mc_io *dpni; 1450 int dpni_enabled = 0; 1451 int retries = 10; 1452 1453 PMD_INIT_FUNC_TRACE(); 1454 1455 priv = dev->data->dev_private; 1456 dpni = (struct fsl_mc_io *)priv->hw; 1457 1458 if (dpni == NULL) { 1459 DPAA2_PMD_ERR("Device has not yet been configured"); 1460 return ret; 1461 } 1462 1463 /*changing tx burst function to avoid any more enqueues */ 1464 dev->tx_pkt_burst = dummy_dev_tx; 1465 1466 /* Loop while dpni_disable() attempts to drain the egress FQs 1467 * and confirm them back to us. 1468 */ 1469 do { 1470 ret = dpni_disable(dpni, 0, priv->token); 1471 if (ret) { 1472 DPAA2_PMD_ERR("dpni disable failed (%d)", ret); 1473 return ret; 1474 } 1475 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); 1476 if (ret) { 1477 DPAA2_PMD_ERR("dpni enable check failed (%d)", ret); 1478 return ret; 1479 } 1480 if (dpni_enabled) 1481 /* Allow the MC some slack */ 1482 rte_delay_us(100 * 1000); 1483 } while (dpni_enabled && --retries); 1484 1485 if (!retries) { 1486 DPAA2_PMD_WARN("Retry count exceeded disabling dpni"); 1487 /* todo- we may have to manually cleanup queues. 1488 */ 1489 } else { 1490 DPAA2_PMD_INFO("Port %d Link DOWN successful", 1491 dev->data->port_id); 1492 } 1493 1494 dev->data->dev_link.link_status = 0; 1495 1496 return ret; 1497 } 1498 1499 static int 1500 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1501 { 1502 int ret = -EINVAL; 1503 struct dpaa2_dev_priv *priv; 1504 struct fsl_mc_io *dpni; 1505 struct dpni_link_state state = {0}; 1506 1507 PMD_INIT_FUNC_TRACE(); 1508 1509 priv = dev->data->dev_private; 1510 dpni = (struct fsl_mc_io *)priv->hw; 1511 1512 if (dpni == NULL || fc_conf == NULL) { 1513 DPAA2_PMD_ERR("device not configured"); 1514 return ret; 1515 } 1516 1517 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1518 if (ret) { 1519 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); 1520 return ret; 1521 } 1522 1523 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); 1524 if (state.options & DPNI_LINK_OPT_PAUSE) { 1525 /* DPNI_LINK_OPT_PAUSE set 1526 * if ASYM_PAUSE not set, 1527 * RX Side flow control (handle received Pause frame) 1528 * TX side flow control (send Pause frame) 1529 * if ASYM_PAUSE set, 1530 * RX Side flow control (handle received Pause frame) 1531 * No TX side flow control (send Pause frame disabled) 1532 */ 1533 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) 1534 fc_conf->mode = RTE_FC_FULL; 1535 else 1536 fc_conf->mode = RTE_FC_RX_PAUSE; 1537 } else { 1538 /* DPNI_LINK_OPT_PAUSE not set 1539 * if ASYM_PAUSE set, 1540 * TX side flow control (send Pause frame) 1541 * No RX side flow control (No action on pause frame rx) 1542 * if ASYM_PAUSE not set, 1543 * Flow control disabled 1544 */ 1545 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) 1546 fc_conf->mode = RTE_FC_TX_PAUSE; 1547 else 1548 fc_conf->mode = RTE_FC_NONE; 1549 } 1550 1551 return ret; 1552 } 1553 1554 static int 1555 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1556 { 1557 int ret = -EINVAL; 1558 struct dpaa2_dev_priv *priv; 1559 struct fsl_mc_io *dpni; 1560 struct dpni_link_state state = {0}; 1561 struct dpni_link_cfg cfg = {0}; 1562 1563 PMD_INIT_FUNC_TRACE(); 1564 1565 priv = dev->data->dev_private; 1566 dpni = (struct fsl_mc_io *)priv->hw; 1567 1568 if (dpni == NULL) { 1569 DPAA2_PMD_ERR("dpni is NULL"); 1570 return ret; 1571 } 1572 1573 /* It is necessary to obtain the current state before setting fc_conf 1574 * as MC would return error in case rate, autoneg or duplex values are 1575 * different. 1576 */ 1577 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1578 if (ret) { 1579 DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret); 1580 return -1; 1581 } 1582 1583 /* Disable link before setting configuration */ 1584 dpaa2_dev_set_link_down(dev); 1585 1586 /* Based on fc_conf, update cfg */ 1587 cfg.rate = state.rate; 1588 cfg.options = state.options; 1589 1590 /* update cfg with fc_conf */ 1591 switch (fc_conf->mode) { 1592 case RTE_FC_FULL: 1593 /* Full flow control; 1594 * OPT_PAUSE set, ASYM_PAUSE not set 1595 */ 1596 cfg.options |= DPNI_LINK_OPT_PAUSE; 1597 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1598 break; 1599 case RTE_FC_TX_PAUSE: 1600 /* Enable RX flow control 1601 * OPT_PAUSE not set; 1602 * ASYM_PAUSE set; 1603 */ 1604 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1605 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1606 break; 1607 case RTE_FC_RX_PAUSE: 1608 /* Enable TX Flow control 1609 * OPT_PAUSE set 1610 * ASYM_PAUSE set 1611 */ 1612 cfg.options |= DPNI_LINK_OPT_PAUSE; 1613 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1614 break; 1615 case RTE_FC_NONE: 1616 /* Disable Flow control 1617 * OPT_PAUSE not set 1618 * ASYM_PAUSE not set 1619 */ 1620 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1621 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1622 break; 1623 default: 1624 DPAA2_PMD_ERR("Incorrect Flow control flag (%d)", 1625 fc_conf->mode); 1626 return -1; 1627 } 1628 1629 ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); 1630 if (ret) 1631 DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)", 1632 ret); 1633 1634 /* Enable link */ 1635 dpaa2_dev_set_link_up(dev); 1636 1637 return ret; 1638 } 1639 1640 static int 1641 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, 1642 struct rte_eth_rss_conf *rss_conf) 1643 { 1644 struct rte_eth_dev_data *data = dev->data; 1645 struct rte_eth_conf *eth_conf = &data->dev_conf; 1646 int ret; 1647 1648 PMD_INIT_FUNC_TRACE(); 1649 1650 if (rss_conf->rss_hf) { 1651 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf); 1652 if (ret) { 1653 DPAA2_PMD_ERR("Unable to set flow dist"); 1654 return ret; 1655 } 1656 } else { 1657 ret = dpaa2_remove_flow_dist(dev, 0); 1658 if (ret) { 1659 DPAA2_PMD_ERR("Unable to remove flow dist"); 1660 return ret; 1661 } 1662 } 1663 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; 1664 return 0; 1665 } 1666 1667 static int 1668 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1669 struct rte_eth_rss_conf *rss_conf) 1670 { 1671 struct rte_eth_dev_data *data = dev->data; 1672 struct rte_eth_conf *eth_conf = &data->dev_conf; 1673 1674 /* dpaa2 does not support rss_key, so length should be 0*/ 1675 rss_conf->rss_key_len = 0; 1676 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; 1677 return 0; 1678 } 1679 1680 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, 1681 int eth_rx_queue_id, 1682 uint16_t dpcon_id, 1683 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 1684 { 1685 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1686 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1687 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1688 uint8_t flow_id = dpaa2_ethq->flow_id; 1689 struct dpni_queue cfg; 1690 uint8_t options; 1691 int ret; 1692 1693 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 1694 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; 1695 else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) 1696 dpaa2_ethq->cb = dpaa2_dev_process_atomic_event; 1697 else 1698 return -EINVAL; 1699 1700 memset(&cfg, 0, sizeof(struct dpni_queue)); 1701 options = DPNI_QUEUE_OPT_DEST; 1702 cfg.destination.type = DPNI_DEST_DPCON; 1703 cfg.destination.id = dpcon_id; 1704 cfg.destination.priority = queue_conf->ev.priority; 1705 1706 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { 1707 options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; 1708 cfg.destination.hold_active = 1; 1709 } 1710 1711 options |= DPNI_QUEUE_OPT_USER_CTX; 1712 cfg.user_context = (size_t)(dpaa2_ethq); 1713 1714 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1715 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1716 if (ret) { 1717 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 1718 return ret; 1719 } 1720 1721 memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event)); 1722 1723 return 0; 1724 } 1725 1726 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, 1727 int eth_rx_queue_id) 1728 { 1729 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1730 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1731 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1732 uint8_t flow_id = dpaa2_ethq->flow_id; 1733 struct dpni_queue cfg; 1734 uint8_t options; 1735 int ret; 1736 1737 memset(&cfg, 0, sizeof(struct dpni_queue)); 1738 options = DPNI_QUEUE_OPT_DEST; 1739 cfg.destination.type = DPNI_DEST_NONE; 1740 1741 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1742 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1743 if (ret) 1744 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 1745 1746 return ret; 1747 } 1748 1749 static struct eth_dev_ops dpaa2_ethdev_ops = { 1750 .dev_configure = dpaa2_eth_dev_configure, 1751 .dev_start = dpaa2_dev_start, 1752 .dev_stop = dpaa2_dev_stop, 1753 .dev_close = dpaa2_dev_close, 1754 .promiscuous_enable = dpaa2_dev_promiscuous_enable, 1755 .promiscuous_disable = dpaa2_dev_promiscuous_disable, 1756 .allmulticast_enable = dpaa2_dev_allmulticast_enable, 1757 .allmulticast_disable = dpaa2_dev_allmulticast_disable, 1758 .dev_set_link_up = dpaa2_dev_set_link_up, 1759 .dev_set_link_down = dpaa2_dev_set_link_down, 1760 .link_update = dpaa2_dev_link_update, 1761 .stats_get = dpaa2_dev_stats_get, 1762 .xstats_get = dpaa2_dev_xstats_get, 1763 .xstats_get_by_id = dpaa2_xstats_get_by_id, 1764 .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id, 1765 .xstats_get_names = dpaa2_xstats_get_names, 1766 .stats_reset = dpaa2_dev_stats_reset, 1767 .xstats_reset = dpaa2_dev_stats_reset, 1768 .fw_version_get = dpaa2_fw_version_get, 1769 .dev_infos_get = dpaa2_dev_info_get, 1770 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, 1771 .mtu_set = dpaa2_dev_mtu_set, 1772 .vlan_filter_set = dpaa2_vlan_filter_set, 1773 .vlan_offload_set = dpaa2_vlan_offload_set, 1774 .rx_queue_setup = dpaa2_dev_rx_queue_setup, 1775 .rx_queue_release = dpaa2_dev_rx_queue_release, 1776 .tx_queue_setup = dpaa2_dev_tx_queue_setup, 1777 .tx_queue_release = dpaa2_dev_tx_queue_release, 1778 .rx_queue_count = dpaa2_dev_rx_queue_count, 1779 .flow_ctrl_get = dpaa2_flow_ctrl_get, 1780 .flow_ctrl_set = dpaa2_flow_ctrl_set, 1781 .mac_addr_add = dpaa2_dev_add_mac_addr, 1782 .mac_addr_remove = dpaa2_dev_remove_mac_addr, 1783 .mac_addr_set = dpaa2_dev_set_mac_addr, 1784 .rss_hash_update = dpaa2_dev_rss_hash_update, 1785 .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, 1786 }; 1787 1788 static int 1789 dpaa2_dev_init(struct rte_eth_dev *eth_dev) 1790 { 1791 struct rte_device *dev = eth_dev->device; 1792 struct rte_dpaa2_device *dpaa2_dev; 1793 struct fsl_mc_io *dpni_dev; 1794 struct dpni_attr attr; 1795 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1796 struct dpni_buffer_layout layout; 1797 int ret, hw_id; 1798 1799 PMD_INIT_FUNC_TRACE(); 1800 1801 /* For secondary processes, the primary has done all the work */ 1802 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1803 return 0; 1804 1805 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 1806 1807 hw_id = dpaa2_dev->object_id; 1808 1809 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); 1810 if (!dpni_dev) { 1811 DPAA2_PMD_ERR("Memory allocation failed for dpni device"); 1812 return -1; 1813 } 1814 1815 dpni_dev->regs = rte_mcp_ptr_list[0]; 1816 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); 1817 if (ret) { 1818 DPAA2_PMD_ERR( 1819 "Failure in opening dpni@%d with err code %d", 1820 hw_id, ret); 1821 rte_free(dpni_dev); 1822 return -1; 1823 } 1824 1825 /* Clean the device first */ 1826 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); 1827 if (ret) { 1828 DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d", 1829 hw_id, ret); 1830 goto init_err; 1831 } 1832 1833 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); 1834 if (ret) { 1835 DPAA2_PMD_ERR( 1836 "Failure in get dpni@%d attribute, err code %d", 1837 hw_id, ret); 1838 goto init_err; 1839 } 1840 1841 priv->num_rx_tc = attr.num_rx_tcs; 1842 1843 /* Resetting the "num_rx_queues" to equal number of queues in first TC 1844 * as only one TC is supported on Rx Side. Once Multiple TCs will be 1845 * in use for Rx processing then this will be changed or removed. 1846 */ 1847 priv->nb_rx_queues = attr.num_queues; 1848 1849 /* Using number of TX queues as number of TX TCs */ 1850 priv->nb_tx_queues = attr.num_tx_tcs; 1851 1852 DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", 1853 priv->num_rx_tc, priv->nb_rx_queues, 1854 priv->nb_tx_queues); 1855 1856 priv->hw = dpni_dev; 1857 priv->hw_id = hw_id; 1858 priv->options = attr.options; 1859 priv->max_mac_filters = attr.mac_filter_entries; 1860 priv->max_vlan_filters = attr.vlan_filter_entries; 1861 priv->flags = 0; 1862 1863 /* Allocate memory for hardware structure for queues */ 1864 ret = dpaa2_alloc_rx_tx_queues(eth_dev); 1865 if (ret) { 1866 DPAA2_PMD_ERR("Queue allocation Failed"); 1867 goto init_err; 1868 } 1869 1870 /* Allocate memory for storing MAC addresses */ 1871 eth_dev->data->mac_addrs = rte_zmalloc("dpni", 1872 ETHER_ADDR_LEN * attr.mac_filter_entries, 0); 1873 if (eth_dev->data->mac_addrs == NULL) { 1874 DPAA2_PMD_ERR( 1875 "Failed to allocate %d bytes needed to store MAC addresses", 1876 ETHER_ADDR_LEN * attr.mac_filter_entries); 1877 ret = -ENOMEM; 1878 goto init_err; 1879 } 1880 1881 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 1882 priv->token, 1883 (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes)); 1884 if (ret) { 1885 DPAA2_PMD_ERR("DPNI get mac address failed:Err Code = %d", 1886 ret); 1887 goto init_err; 1888 } 1889 1890 /* ... tx buffer layout ... */ 1891 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1892 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1893 layout.pass_frame_status = 1; 1894 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1895 DPNI_QUEUE_TX, &layout); 1896 if (ret) { 1897 DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret); 1898 goto init_err; 1899 } 1900 1901 /* ... tx-conf and error buffer layout ... */ 1902 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1903 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1904 layout.pass_frame_status = 1; 1905 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1906 DPNI_QUEUE_TX_CONFIRM, &layout); 1907 if (ret) { 1908 DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout", 1909 ret); 1910 goto init_err; 1911 } 1912 1913 eth_dev->dev_ops = &dpaa2_ethdev_ops; 1914 1915 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 1916 eth_dev->tx_pkt_burst = dpaa2_dev_tx; 1917 1918 RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); 1919 return 0; 1920 init_err: 1921 dpaa2_dev_uninit(eth_dev); 1922 return ret; 1923 } 1924 1925 static int 1926 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) 1927 { 1928 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1929 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1930 int i, ret; 1931 struct dpaa2_queue *dpaa2_q; 1932 1933 PMD_INIT_FUNC_TRACE(); 1934 1935 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1936 return 0; 1937 1938 if (!dpni) { 1939 DPAA2_PMD_WARN("Already closed or not started"); 1940 return -1; 1941 } 1942 1943 dpaa2_dev_close(eth_dev); 1944 1945 if (priv->rx_vq[0]) { 1946 /* cleaning up queue storage */ 1947 for (i = 0; i < priv->nb_rx_queues; i++) { 1948 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 1949 if (dpaa2_q->q_storage) 1950 rte_free(dpaa2_q->q_storage); 1951 } 1952 /*free the all queue memory */ 1953 rte_free(priv->rx_vq[0]); 1954 priv->rx_vq[0] = NULL; 1955 } 1956 1957 /* free memory for storing MAC addresses */ 1958 if (eth_dev->data->mac_addrs) { 1959 rte_free(eth_dev->data->mac_addrs); 1960 eth_dev->data->mac_addrs = NULL; 1961 } 1962 1963 /* Close the device at underlying layer*/ 1964 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); 1965 if (ret) { 1966 DPAA2_PMD_ERR( 1967 "Failure closing dpni device with err code %d", 1968 ret); 1969 } 1970 1971 /* Free the allocated memory for ethernet private data and dpni*/ 1972 priv->hw = NULL; 1973 rte_free(dpni); 1974 1975 eth_dev->dev_ops = NULL; 1976 eth_dev->rx_pkt_burst = NULL; 1977 eth_dev->tx_pkt_burst = NULL; 1978 1979 DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name); 1980 return 0; 1981 } 1982 1983 static int 1984 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, 1985 struct rte_dpaa2_device *dpaa2_dev) 1986 { 1987 struct rte_eth_dev *eth_dev; 1988 int diag; 1989 1990 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1991 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); 1992 if (!eth_dev) 1993 return -ENODEV; 1994 eth_dev->data->dev_private = rte_zmalloc( 1995 "ethdev private structure", 1996 sizeof(struct dpaa2_dev_priv), 1997 RTE_CACHE_LINE_SIZE); 1998 if (eth_dev->data->dev_private == NULL) { 1999 DPAA2_PMD_CRIT( 2000 "Unable to allocate memory for private data"); 2001 rte_eth_dev_release_port(eth_dev); 2002 return -ENOMEM; 2003 } 2004 } else { 2005 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); 2006 if (!eth_dev) 2007 return -ENODEV; 2008 } 2009 2010 eth_dev->device = &dpaa2_dev->device; 2011 eth_dev->device->driver = &dpaa2_drv->driver; 2012 2013 dpaa2_dev->eth_dev = eth_dev; 2014 eth_dev->data->rx_mbuf_alloc_failed = 0; 2015 2016 if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC) 2017 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 2018 2019 /* Invoke PMD device initialization function */ 2020 diag = dpaa2_dev_init(eth_dev); 2021 if (diag == 0) { 2022 rte_eth_dev_probing_finish(eth_dev); 2023 return 0; 2024 } 2025 2026 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 2027 rte_free(eth_dev->data->dev_private); 2028 rte_eth_dev_release_port(eth_dev); 2029 return diag; 2030 } 2031 2032 static int 2033 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) 2034 { 2035 struct rte_eth_dev *eth_dev; 2036 2037 eth_dev = dpaa2_dev->eth_dev; 2038 dpaa2_dev_uninit(eth_dev); 2039 2040 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 2041 rte_free(eth_dev->data->dev_private); 2042 rte_eth_dev_release_port(eth_dev); 2043 2044 return 0; 2045 } 2046 2047 static struct rte_dpaa2_driver rte_dpaa2_pmd = { 2048 .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA, 2049 .drv_type = DPAA2_ETH, 2050 .probe = rte_dpaa2_probe, 2051 .remove = rte_dpaa2_remove, 2052 }; 2053 2054 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); 2055 2056 RTE_INIT(dpaa2_pmd_init_log); 2057 static void 2058 dpaa2_pmd_init_log(void) 2059 { 2060 dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2"); 2061 if (dpaa2_logtype_pmd >= 0) 2062 rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE); 2063 } 2064