1 /* * SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 11 #include <rte_mbuf.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_memcpy.h> 15 #include <rte_string_fns.h> 16 #include <rte_cycles.h> 17 #include <rte_kvargs.h> 18 #include <rte_dev.h> 19 #include <rte_fslmc.h> 20 21 #include "dpaa2_pmd_logs.h" 22 #include <fslmc_vfio.h> 23 #include <dpaa2_hw_pvt.h> 24 #include <dpaa2_hw_mempool.h> 25 #include <dpaa2_hw_dpio.h> 26 #include <mc/fsl_dpmng.h> 27 #include "dpaa2_ethdev.h" 28 #include <fsl_qbman_debug.h> 29 30 /* Supported Rx offloads */ 31 static uint64_t dev_rx_offloads_sup = 32 DEV_RX_OFFLOAD_VLAN_STRIP | 33 DEV_RX_OFFLOAD_IPV4_CKSUM | 34 DEV_RX_OFFLOAD_UDP_CKSUM | 35 DEV_RX_OFFLOAD_TCP_CKSUM | 36 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 37 DEV_RX_OFFLOAD_VLAN_FILTER | 38 DEV_RX_OFFLOAD_JUMBO_FRAME; 39 40 /* Rx offloads which cannot be disabled */ 41 static uint64_t dev_rx_offloads_nodis = 42 DEV_RX_OFFLOAD_CRC_STRIP | 43 DEV_RX_OFFLOAD_SCATTER; 44 45 /* Supported Tx offloads */ 46 static uint64_t dev_tx_offloads_sup = 47 DEV_TX_OFFLOAD_VLAN_INSERT | 48 DEV_TX_OFFLOAD_IPV4_CKSUM | 49 DEV_TX_OFFLOAD_UDP_CKSUM | 50 DEV_TX_OFFLOAD_TCP_CKSUM | 51 DEV_TX_OFFLOAD_SCTP_CKSUM | 52 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 53 54 /* Tx offloads which cannot be disabled */ 55 static uint64_t dev_tx_offloads_nodis = 56 DEV_TX_OFFLOAD_MULTI_SEGS | 57 DEV_TX_OFFLOAD_MT_LOCKFREE | 58 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 59 60 struct rte_dpaa2_xstats_name_off { 61 char name[RTE_ETH_XSTATS_NAME_SIZE]; 62 uint8_t page_id; /* dpni statistics page id */ 63 uint8_t stats_id; /* stats id in the given page */ 64 }; 65 66 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { 67 {"ingress_multicast_frames", 0, 2}, 68 {"ingress_multicast_bytes", 0, 3}, 69 {"ingress_broadcast_frames", 0, 4}, 70 {"ingress_broadcast_bytes", 0, 5}, 71 {"egress_multicast_frames", 1, 2}, 72 {"egress_multicast_bytes", 1, 3}, 73 {"egress_broadcast_frames", 1, 4}, 74 {"egress_broadcast_bytes", 1, 5}, 75 {"ingress_filtered_frames", 2, 0}, 76 {"ingress_discarded_frames", 2, 1}, 77 {"ingress_nobuffer_discards", 2, 2}, 78 {"egress_discarded_frames", 2, 3}, 79 {"egress_confirmed_frames", 2, 4}, 80 }; 81 82 static struct rte_dpaa2_driver rte_dpaa2_pmd; 83 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev); 84 static int dpaa2_dev_link_update(struct rte_eth_dev *dev, 85 int wait_to_complete); 86 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); 87 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); 88 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 89 90 int dpaa2_logtype_pmd; 91 92 static int 93 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 94 { 95 int ret; 96 struct dpaa2_dev_priv *priv = dev->data->dev_private; 97 struct fsl_mc_io *dpni = priv->hw; 98 99 PMD_INIT_FUNC_TRACE(); 100 101 if (dpni == NULL) { 102 DPAA2_PMD_ERR("dpni is NULL"); 103 return -1; 104 } 105 106 if (on) 107 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, 108 priv->token, vlan_id); 109 else 110 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, 111 priv->token, vlan_id); 112 113 if (ret < 0) 114 DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d", 115 ret, vlan_id, priv->hw_id); 116 117 return ret; 118 } 119 120 static int 121 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) 122 { 123 struct dpaa2_dev_priv *priv = dev->data->dev_private; 124 struct fsl_mc_io *dpni = priv->hw; 125 int ret; 126 127 PMD_INIT_FUNC_TRACE(); 128 129 if (mask & ETH_VLAN_FILTER_MASK) { 130 /* VLAN Filter not avaialble */ 131 if (!priv->max_vlan_filters) { 132 DPAA2_PMD_INFO("VLAN filter not available"); 133 goto next_mask; 134 } 135 136 if (dev->data->dev_conf.rxmode.offloads & 137 DEV_RX_OFFLOAD_VLAN_FILTER) 138 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 139 priv->token, true); 140 else 141 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 142 priv->token, false); 143 if (ret < 0) 144 DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret); 145 } 146 next_mask: 147 if (mask & ETH_VLAN_EXTEND_MASK) { 148 if (dev->data->dev_conf.rxmode.offloads & 149 DEV_RX_OFFLOAD_VLAN_EXTEND) 150 DPAA2_PMD_INFO("VLAN extend offload not supported"); 151 } 152 153 return 0; 154 } 155 156 static int 157 dpaa2_fw_version_get(struct rte_eth_dev *dev, 158 char *fw_version, 159 size_t fw_size) 160 { 161 int ret; 162 struct dpaa2_dev_priv *priv = dev->data->dev_private; 163 struct fsl_mc_io *dpni = priv->hw; 164 struct mc_soc_version mc_plat_info = {0}; 165 struct mc_version mc_ver_info = {0}; 166 167 PMD_INIT_FUNC_TRACE(); 168 169 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 170 DPAA2_PMD_WARN("\tmc_get_soc_version failed"); 171 172 if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) 173 DPAA2_PMD_WARN("\tmc_get_version failed"); 174 175 ret = snprintf(fw_version, fw_size, 176 "%x-%d.%d.%d", 177 mc_plat_info.svr, 178 mc_ver_info.major, 179 mc_ver_info.minor, 180 mc_ver_info.revision); 181 182 ret += 1; /* add the size of '\0' */ 183 if (fw_size < (uint32_t)ret) 184 return ret; 185 else 186 return 0; 187 } 188 189 static void 190 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 191 { 192 struct dpaa2_dev_priv *priv = dev->data->dev_private; 193 194 PMD_INIT_FUNC_TRACE(); 195 196 dev_info->if_index = priv->hw_id; 197 198 dev_info->max_mac_addrs = priv->max_mac_filters; 199 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; 200 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; 201 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; 202 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; 203 dev_info->rx_offload_capa = dev_rx_offloads_sup | 204 dev_rx_offloads_nodis; 205 dev_info->tx_offload_capa = dev_tx_offloads_sup | 206 dev_tx_offloads_nodis; 207 dev_info->speed_capa = ETH_LINK_SPEED_1G | 208 ETH_LINK_SPEED_2_5G | 209 ETH_LINK_SPEED_10G; 210 } 211 212 static int 213 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) 214 { 215 struct dpaa2_dev_priv *priv = dev->data->dev_private; 216 uint16_t dist_idx; 217 uint32_t vq_id; 218 struct dpaa2_queue *mc_q, *mcq; 219 uint32_t tot_queues; 220 int i; 221 struct dpaa2_queue *dpaa2_q; 222 223 PMD_INIT_FUNC_TRACE(); 224 225 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; 226 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, 227 RTE_CACHE_LINE_SIZE); 228 if (!mc_q) { 229 DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues"); 230 return -1; 231 } 232 233 for (i = 0; i < priv->nb_rx_queues; i++) { 234 mc_q->dev = dev; 235 priv->rx_vq[i] = mc_q++; 236 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 237 dpaa2_q->q_storage = rte_malloc("dq_storage", 238 sizeof(struct queue_storage_info_t), 239 RTE_CACHE_LINE_SIZE); 240 if (!dpaa2_q->q_storage) 241 goto fail; 242 243 memset(dpaa2_q->q_storage, 0, 244 sizeof(struct queue_storage_info_t)); 245 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) 246 goto fail; 247 } 248 249 for (i = 0; i < priv->nb_tx_queues; i++) { 250 mc_q->dev = dev; 251 mc_q->flow_id = 0xffff; 252 priv->tx_vq[i] = mc_q++; 253 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 254 dpaa2_q->cscn = rte_malloc(NULL, 255 sizeof(struct qbman_result), 16); 256 if (!dpaa2_q->cscn) 257 goto fail_tx; 258 } 259 260 vq_id = 0; 261 for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { 262 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; 263 mcq->tc_index = DPAA2_DEF_TC; 264 mcq->flow_id = dist_idx; 265 vq_id++; 266 } 267 268 return 0; 269 fail_tx: 270 i -= 1; 271 while (i >= 0) { 272 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 273 rte_free(dpaa2_q->cscn); 274 priv->tx_vq[i--] = NULL; 275 } 276 i = priv->nb_rx_queues; 277 fail: 278 i -= 1; 279 mc_q = priv->rx_vq[0]; 280 while (i >= 0) { 281 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 282 dpaa2_free_dq_storage(dpaa2_q->q_storage); 283 rte_free(dpaa2_q->q_storage); 284 priv->rx_vq[i--] = NULL; 285 } 286 rte_free(mc_q); 287 return -1; 288 } 289 290 static int 291 dpaa2_eth_dev_configure(struct rte_eth_dev *dev) 292 { 293 struct dpaa2_dev_priv *priv = dev->data->dev_private; 294 struct fsl_mc_io *dpni = priv->hw; 295 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 296 uint64_t rx_offloads = eth_conf->rxmode.offloads; 297 uint64_t tx_offloads = eth_conf->txmode.offloads; 298 int rx_l3_csum_offload = false; 299 int rx_l4_csum_offload = false; 300 int tx_l3_csum_offload = false; 301 int tx_l4_csum_offload = false; 302 int ret; 303 304 PMD_INIT_FUNC_TRACE(); 305 306 /* Rx offloads validation */ 307 if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) { 308 DPAA2_PMD_ERR( 309 "Rx offloads non supported - requested 0x%" PRIx64 310 " supported 0x%" PRIx64, 311 rx_offloads, 312 dev_rx_offloads_sup | dev_rx_offloads_nodis); 313 return -ENOTSUP; 314 } 315 if (dev_rx_offloads_nodis & ~rx_offloads) { 316 DPAA2_PMD_WARN( 317 "Rx offloads non configurable - requested 0x%" PRIx64 318 " ignored 0x%" PRIx64, 319 rx_offloads, dev_rx_offloads_nodis); 320 } 321 322 /* Tx offloads validation */ 323 if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) { 324 DPAA2_PMD_ERR( 325 "Tx offloads non supported - requested 0x%" PRIx64 326 " supported 0x%" PRIx64, 327 tx_offloads, 328 dev_tx_offloads_sup | dev_tx_offloads_nodis); 329 return -ENOTSUP; 330 } 331 if (dev_tx_offloads_nodis & ~tx_offloads) { 332 DPAA2_PMD_WARN( 333 "Tx offloads non configurable - requested 0x%" PRIx64 334 " ignored 0x%" PRIx64, 335 tx_offloads, dev_tx_offloads_nodis); 336 } 337 338 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 339 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { 340 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, 341 priv->token, eth_conf->rxmode.max_rx_pkt_len); 342 if (ret) { 343 DPAA2_PMD_ERR( 344 "Unable to set mtu. check config"); 345 return ret; 346 } 347 } else { 348 return -1; 349 } 350 } 351 352 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { 353 ret = dpaa2_setup_flow_dist(dev, 354 eth_conf->rx_adv_conf.rss_conf.rss_hf); 355 if (ret) { 356 DPAA2_PMD_ERR("Unable to set flow distribution." 357 "Check queue config"); 358 return ret; 359 } 360 } 361 362 if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) 363 rx_l3_csum_offload = true; 364 365 if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) || 366 (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM)) 367 rx_l4_csum_offload = true; 368 369 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 370 DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload); 371 if (ret) { 372 DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret); 373 return ret; 374 } 375 376 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 377 DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload); 378 if (ret) { 379 DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret); 380 return ret; 381 } 382 383 if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) 384 tx_l3_csum_offload = true; 385 386 if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) || 387 (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) || 388 (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)) 389 tx_l4_csum_offload = true; 390 391 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 392 DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload); 393 if (ret) { 394 DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret); 395 return ret; 396 } 397 398 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 399 DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload); 400 if (ret) { 401 DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret); 402 return ret; 403 } 404 405 /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in 406 * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC] 407 * to 0 for LS2 in the hardware thus disabling data/annotation 408 * stashing. For LX2 this is fixed in hardware and thus hash result and 409 * parse results can be received in FD using this option. 410 */ 411 if (dpaa2_svr_family == SVR_LX2160A) { 412 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 413 DPNI_FLCTYPE_HASH, true); 414 if (ret) { 415 DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret); 416 return ret; 417 } 418 } 419 420 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); 421 422 /* update the current status */ 423 dpaa2_dev_link_update(dev, 0); 424 425 return 0; 426 } 427 428 /* Function to setup RX flow information. It contains traffic class ID, 429 * flow ID, destination configuration etc. 430 */ 431 static int 432 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, 433 uint16_t rx_queue_id, 434 uint16_t nb_rx_desc __rte_unused, 435 unsigned int socket_id __rte_unused, 436 const struct rte_eth_rxconf *rx_conf __rte_unused, 437 struct rte_mempool *mb_pool) 438 { 439 struct dpaa2_dev_priv *priv = dev->data->dev_private; 440 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 441 struct dpaa2_queue *dpaa2_q; 442 struct dpni_queue cfg; 443 uint8_t options = 0; 444 uint8_t flow_id; 445 uint32_t bpid; 446 int ret; 447 448 PMD_INIT_FUNC_TRACE(); 449 450 DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p", 451 dev, rx_queue_id, mb_pool, rx_conf); 452 453 if (!priv->bp_list || priv->bp_list->mp != mb_pool) { 454 bpid = mempool_to_bpid(mb_pool); 455 ret = dpaa2_attach_bp_list(priv, 456 rte_dpaa2_bpid_info[bpid].bp_list); 457 if (ret) 458 return ret; 459 } 460 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 461 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ 462 463 /*Get the flow id from given VQ id*/ 464 flow_id = rx_queue_id % priv->nb_rx_queues; 465 memset(&cfg, 0, sizeof(struct dpni_queue)); 466 467 options = options | DPNI_QUEUE_OPT_USER_CTX; 468 cfg.user_context = (size_t)(dpaa2_q); 469 470 /*if ls2088 or rev2 device, enable the stashing */ 471 472 if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) { 473 options |= DPNI_QUEUE_OPT_FLC; 474 cfg.flc.stash_control = true; 475 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0; 476 /* 00 00 00 - last 6 bit represent annotation, context stashing, 477 * data stashing setting 01 01 00 (0x14) 478 * (in following order ->DS AS CS) 479 * to enable 1 line data, 1 line annotation. 480 * For LX2, this setting should be 01 00 00 (0x10) 481 */ 482 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A) 483 cfg.flc.value |= 0x10; 484 else 485 cfg.flc.value |= 0x14; 486 } 487 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, 488 dpaa2_q->tc_index, flow_id, options, &cfg); 489 if (ret) { 490 DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret); 491 return -1; 492 } 493 494 if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { 495 struct dpni_taildrop taildrop; 496 497 taildrop.enable = 1; 498 /*enabling per rx queue congestion control */ 499 taildrop.threshold = CONG_THRESHOLD_RX_Q; 500 taildrop.units = DPNI_CONGESTION_UNIT_BYTES; 501 taildrop.oal = CONG_RX_OAL; 502 DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d", 503 rx_queue_id); 504 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 505 DPNI_CP_QUEUE, DPNI_QUEUE_RX, 506 dpaa2_q->tc_index, flow_id, &taildrop); 507 if (ret) { 508 DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", 509 ret); 510 return -1; 511 } 512 } 513 514 dev->data->rx_queues[rx_queue_id] = dpaa2_q; 515 return 0; 516 } 517 518 static int 519 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, 520 uint16_t tx_queue_id, 521 uint16_t nb_tx_desc __rte_unused, 522 unsigned int socket_id __rte_unused, 523 const struct rte_eth_txconf *tx_conf __rte_unused) 524 { 525 struct dpaa2_dev_priv *priv = dev->data->dev_private; 526 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) 527 priv->tx_vq[tx_queue_id]; 528 struct fsl_mc_io *dpni = priv->hw; 529 struct dpni_queue tx_conf_cfg; 530 struct dpni_queue tx_flow_cfg; 531 uint8_t options = 0, flow_id; 532 uint32_t tc_id; 533 int ret; 534 535 PMD_INIT_FUNC_TRACE(); 536 537 /* Return if queue already configured */ 538 if (dpaa2_q->flow_id != 0xffff) { 539 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 540 return 0; 541 } 542 543 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); 544 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); 545 546 tc_id = tx_queue_id; 547 flow_id = 0; 548 549 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, 550 tc_id, flow_id, options, &tx_flow_cfg); 551 if (ret) { 552 DPAA2_PMD_ERR("Error in setting the tx flow: " 553 "tc_id=%d, flow=%d err=%d", 554 tc_id, flow_id, ret); 555 return -1; 556 } 557 558 dpaa2_q->flow_id = flow_id; 559 560 if (tx_queue_id == 0) { 561 /*Set tx-conf and error configuration*/ 562 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 563 priv->token, 564 DPNI_CONF_DISABLE); 565 if (ret) { 566 DPAA2_PMD_ERR("Error in set tx conf mode settings: " 567 "err=%d", ret); 568 return -1; 569 } 570 } 571 dpaa2_q->tc_index = tc_id; 572 573 if (!(priv->flags & DPAA2_TX_CGR_OFF)) { 574 struct dpni_congestion_notification_cfg cong_notif_cfg; 575 576 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; 577 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; 578 /* Notify that the queue is not congested when the data in 579 * the queue is below this thershold. 580 */ 581 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; 582 cong_notif_cfg.message_ctx = 0; 583 cong_notif_cfg.message_iova = (size_t)dpaa2_q->cscn; 584 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; 585 cong_notif_cfg.notification_mode = 586 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | 587 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | 588 DPNI_CONG_OPT_COHERENT_WRITE; 589 590 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, 591 priv->token, 592 DPNI_QUEUE_TX, 593 tc_id, 594 &cong_notif_cfg); 595 if (ret) { 596 DPAA2_PMD_ERR( 597 "Error in setting tx congestion notification: " 598 "err=%d", ret); 599 return -ret; 600 } 601 } 602 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 603 return 0; 604 } 605 606 static void 607 dpaa2_dev_rx_queue_release(void *q __rte_unused) 608 { 609 PMD_INIT_FUNC_TRACE(); 610 } 611 612 static void 613 dpaa2_dev_tx_queue_release(void *q __rte_unused) 614 { 615 PMD_INIT_FUNC_TRACE(); 616 } 617 618 static uint32_t 619 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 620 { 621 int32_t ret; 622 struct dpaa2_dev_priv *priv = dev->data->dev_private; 623 struct dpaa2_queue *dpaa2_q; 624 struct qbman_swp *swp; 625 struct qbman_fq_query_np_rslt state; 626 uint32_t frame_cnt = 0; 627 628 PMD_INIT_FUNC_TRACE(); 629 630 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 631 ret = dpaa2_affine_qbman_swp(); 632 if (ret) { 633 DPAA2_PMD_ERR("Failure in affining portal"); 634 return -EINVAL; 635 } 636 } 637 swp = DPAA2_PER_LCORE_PORTAL; 638 639 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 640 641 if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) { 642 frame_cnt = qbman_fq_state_frame_count(&state); 643 DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u", 644 rx_queue_id, frame_cnt); 645 } 646 return frame_cnt; 647 } 648 649 static const uint32_t * 650 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) 651 { 652 static const uint32_t ptypes[] = { 653 /*todo -= add more types */ 654 RTE_PTYPE_L2_ETHER, 655 RTE_PTYPE_L3_IPV4, 656 RTE_PTYPE_L3_IPV4_EXT, 657 RTE_PTYPE_L3_IPV6, 658 RTE_PTYPE_L3_IPV6_EXT, 659 RTE_PTYPE_L4_TCP, 660 RTE_PTYPE_L4_UDP, 661 RTE_PTYPE_L4_SCTP, 662 RTE_PTYPE_L4_ICMP, 663 RTE_PTYPE_UNKNOWN 664 }; 665 666 if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx) 667 return ptypes; 668 return NULL; 669 } 670 671 /** 672 * Dpaa2 link Interrupt handler 673 * 674 * @param param 675 * The address of parameter (struct rte_eth_dev *) regsitered before. 676 * 677 * @return 678 * void 679 */ 680 static void 681 dpaa2_interrupt_handler(void *param) 682 { 683 struct rte_eth_dev *dev = param; 684 struct dpaa2_dev_priv *priv = dev->data->dev_private; 685 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 686 int ret; 687 int irq_index = DPNI_IRQ_INDEX; 688 unsigned int status = 0, clear = 0; 689 690 PMD_INIT_FUNC_TRACE(); 691 692 if (dpni == NULL) { 693 DPAA2_PMD_ERR("dpni is NULL"); 694 return; 695 } 696 697 ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, 698 irq_index, &status); 699 if (unlikely(ret)) { 700 DPAA2_PMD_ERR("Can't get irq status (err %d)", ret); 701 clear = 0xffffffff; 702 goto out; 703 } 704 705 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { 706 clear = DPNI_IRQ_EVENT_LINK_CHANGED; 707 dpaa2_dev_link_update(dev, 0); 708 /* calling all the apps registered for link status event */ 709 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 710 NULL); 711 } 712 out: 713 ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, 714 irq_index, clear); 715 if (unlikely(ret)) 716 DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret); 717 } 718 719 static int 720 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) 721 { 722 int err = 0; 723 struct dpaa2_dev_priv *priv = dev->data->dev_private; 724 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 725 int irq_index = DPNI_IRQ_INDEX; 726 unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; 727 728 PMD_INIT_FUNC_TRACE(); 729 730 err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, 731 irq_index, mask); 732 if (err < 0) { 733 DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err, 734 strerror(-err)); 735 return err; 736 } 737 738 err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, 739 irq_index, enable); 740 if (err < 0) 741 DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err, 742 strerror(-err)); 743 744 return err; 745 } 746 747 static int 748 dpaa2_dev_start(struct rte_eth_dev *dev) 749 { 750 struct rte_device *rdev = dev->device; 751 struct rte_dpaa2_device *dpaa2_dev; 752 struct rte_eth_dev_data *data = dev->data; 753 struct dpaa2_dev_priv *priv = data->dev_private; 754 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 755 struct dpni_queue cfg; 756 struct dpni_error_cfg err_cfg; 757 uint16_t qdid; 758 struct dpni_queue_id qid; 759 struct dpaa2_queue *dpaa2_q; 760 int ret, i; 761 struct rte_intr_handle *intr_handle; 762 763 dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); 764 intr_handle = &dpaa2_dev->intr_handle; 765 766 PMD_INIT_FUNC_TRACE(); 767 768 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 769 if (ret) { 770 DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d", 771 priv->hw_id, ret); 772 return ret; 773 } 774 775 /* Power up the phy. Needed to make the link go UP */ 776 dpaa2_dev_set_link_up(dev); 777 778 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, 779 DPNI_QUEUE_TX, &qdid); 780 if (ret) { 781 DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret); 782 return ret; 783 } 784 priv->qdid = qdid; 785 786 for (i = 0; i < data->nb_rx_queues; i++) { 787 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; 788 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 789 DPNI_QUEUE_RX, dpaa2_q->tc_index, 790 dpaa2_q->flow_id, &cfg, &qid); 791 if (ret) { 792 DPAA2_PMD_ERR("Error in getting flow information: " 793 "err=%d", ret); 794 return ret; 795 } 796 dpaa2_q->fqid = qid.fqid; 797 } 798 799 /*checksum errors, send them to normal path and set it in annotation */ 800 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; 801 802 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; 803 err_cfg.set_frame_annotation = true; 804 805 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, 806 priv->token, &err_cfg); 807 if (ret) { 808 DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d", 809 ret); 810 return ret; 811 } 812 813 /* if the interrupts were configured on this devices*/ 814 if (intr_handle && (intr_handle->fd) && 815 (dev->data->dev_conf.intr_conf.lsc != 0)) { 816 /* Registering LSC interrupt handler */ 817 rte_intr_callback_register(intr_handle, 818 dpaa2_interrupt_handler, 819 (void *)dev); 820 821 /* enable vfio intr/eventfd mapping 822 * Interrupt index 0 is required, so we can not use 823 * rte_intr_enable. 824 */ 825 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX); 826 827 /* enable dpni_irqs */ 828 dpaa2_eth_setup_irqs(dev, 1); 829 } 830 831 return 0; 832 } 833 834 /** 835 * This routine disables all traffic on the adapter by issuing a 836 * global reset on the MAC. 837 */ 838 static void 839 dpaa2_dev_stop(struct rte_eth_dev *dev) 840 { 841 struct dpaa2_dev_priv *priv = dev->data->dev_private; 842 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 843 int ret; 844 struct rte_eth_link link; 845 struct rte_intr_handle *intr_handle = dev->intr_handle; 846 847 PMD_INIT_FUNC_TRACE(); 848 849 /* reset interrupt callback */ 850 if (intr_handle && (intr_handle->fd) && 851 (dev->data->dev_conf.intr_conf.lsc != 0)) { 852 /*disable dpni irqs */ 853 dpaa2_eth_setup_irqs(dev, 0); 854 855 /* disable vfio intr before callback unregister */ 856 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX); 857 858 /* Unregistering LSC interrupt handler */ 859 rte_intr_callback_unregister(intr_handle, 860 dpaa2_interrupt_handler, 861 (void *)dev); 862 } 863 864 dpaa2_dev_set_link_down(dev); 865 866 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); 867 if (ret) { 868 DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev", 869 ret, priv->hw_id); 870 return; 871 } 872 873 /* clear the recorded link status */ 874 memset(&link, 0, sizeof(link)); 875 rte_eth_linkstatus_set(dev, &link); 876 } 877 878 static void 879 dpaa2_dev_close(struct rte_eth_dev *dev) 880 { 881 struct rte_eth_dev_data *data = dev->data; 882 struct dpaa2_dev_priv *priv = dev->data->dev_private; 883 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 884 int i, ret; 885 struct rte_eth_link link; 886 struct dpaa2_queue *dpaa2_q; 887 888 PMD_INIT_FUNC_TRACE(); 889 890 for (i = 0; i < data->nb_tx_queues; i++) { 891 dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i]; 892 if (!dpaa2_q->cscn) { 893 rte_free(dpaa2_q->cscn); 894 dpaa2_q->cscn = NULL; 895 } 896 } 897 898 /* Clean the device first */ 899 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); 900 if (ret) { 901 DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret); 902 return; 903 } 904 905 memset(&link, 0, sizeof(link)); 906 rte_eth_linkstatus_set(dev, &link); 907 } 908 909 static void 910 dpaa2_dev_promiscuous_enable( 911 struct rte_eth_dev *dev) 912 { 913 int ret; 914 struct dpaa2_dev_priv *priv = dev->data->dev_private; 915 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 916 917 PMD_INIT_FUNC_TRACE(); 918 919 if (dpni == NULL) { 920 DPAA2_PMD_ERR("dpni is NULL"); 921 return; 922 } 923 924 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 925 if (ret < 0) 926 DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret); 927 928 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 929 if (ret < 0) 930 DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret); 931 } 932 933 static void 934 dpaa2_dev_promiscuous_disable( 935 struct rte_eth_dev *dev) 936 { 937 int ret; 938 struct dpaa2_dev_priv *priv = dev->data->dev_private; 939 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 940 941 PMD_INIT_FUNC_TRACE(); 942 943 if (dpni == NULL) { 944 DPAA2_PMD_ERR("dpni is NULL"); 945 return; 946 } 947 948 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 949 if (ret < 0) 950 DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret); 951 952 if (dev->data->all_multicast == 0) { 953 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, 954 priv->token, false); 955 if (ret < 0) 956 DPAA2_PMD_ERR("Unable to disable M promisc mode %d", 957 ret); 958 } 959 } 960 961 static void 962 dpaa2_dev_allmulticast_enable( 963 struct rte_eth_dev *dev) 964 { 965 int ret; 966 struct dpaa2_dev_priv *priv = dev->data->dev_private; 967 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 968 969 PMD_INIT_FUNC_TRACE(); 970 971 if (dpni == NULL) { 972 DPAA2_PMD_ERR("dpni is NULL"); 973 return; 974 } 975 976 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 977 if (ret < 0) 978 DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret); 979 } 980 981 static void 982 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) 983 { 984 int ret; 985 struct dpaa2_dev_priv *priv = dev->data->dev_private; 986 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 987 988 PMD_INIT_FUNC_TRACE(); 989 990 if (dpni == NULL) { 991 DPAA2_PMD_ERR("dpni is NULL"); 992 return; 993 } 994 995 /* must remain on for all promiscuous */ 996 if (dev->data->promiscuous == 1) 997 return; 998 999 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 1000 if (ret < 0) 1001 DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret); 1002 } 1003 1004 static int 1005 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1006 { 1007 int ret; 1008 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1009 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1010 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN 1011 + VLAN_TAG_SIZE; 1012 1013 PMD_INIT_FUNC_TRACE(); 1014 1015 if (dpni == NULL) { 1016 DPAA2_PMD_ERR("dpni is NULL"); 1017 return -EINVAL; 1018 } 1019 1020 /* check that mtu is within the allowed range */ 1021 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) 1022 return -EINVAL; 1023 1024 if (frame_size > ETHER_MAX_LEN) 1025 dev->data->dev_conf.rxmode.offloads &= 1026 DEV_RX_OFFLOAD_JUMBO_FRAME; 1027 else 1028 dev->data->dev_conf.rxmode.offloads &= 1029 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1030 1031 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1032 1033 /* Set the Max Rx frame length as 'mtu' + 1034 * Maximum Ethernet header length 1035 */ 1036 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 1037 frame_size); 1038 if (ret) { 1039 DPAA2_PMD_ERR("Setting the max frame length failed"); 1040 return -1; 1041 } 1042 DPAA2_PMD_INFO("MTU configured for the device: %d", mtu); 1043 return 0; 1044 } 1045 1046 static int 1047 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, 1048 struct ether_addr *addr, 1049 __rte_unused uint32_t index, 1050 __rte_unused uint32_t pool) 1051 { 1052 int ret; 1053 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1054 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1055 1056 PMD_INIT_FUNC_TRACE(); 1057 1058 if (dpni == NULL) { 1059 DPAA2_PMD_ERR("dpni is NULL"); 1060 return -1; 1061 } 1062 1063 ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, 1064 priv->token, addr->addr_bytes); 1065 if (ret) 1066 DPAA2_PMD_ERR( 1067 "error: Adding the MAC ADDR failed: err = %d", ret); 1068 return 0; 1069 } 1070 1071 static void 1072 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, 1073 uint32_t index) 1074 { 1075 int ret; 1076 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1077 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1078 struct rte_eth_dev_data *data = dev->data; 1079 struct ether_addr *macaddr; 1080 1081 PMD_INIT_FUNC_TRACE(); 1082 1083 macaddr = &data->mac_addrs[index]; 1084 1085 if (dpni == NULL) { 1086 DPAA2_PMD_ERR("dpni is NULL"); 1087 return; 1088 } 1089 1090 ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, 1091 priv->token, macaddr->addr_bytes); 1092 if (ret) 1093 DPAA2_PMD_ERR( 1094 "error: Removing the MAC ADDR failed: err = %d", ret); 1095 } 1096 1097 static int 1098 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, 1099 struct ether_addr *addr) 1100 { 1101 int ret; 1102 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1103 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1104 1105 PMD_INIT_FUNC_TRACE(); 1106 1107 if (dpni == NULL) { 1108 DPAA2_PMD_ERR("dpni is NULL"); 1109 return -EINVAL; 1110 } 1111 1112 ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, 1113 priv->token, addr->addr_bytes); 1114 1115 if (ret) 1116 DPAA2_PMD_ERR( 1117 "error: Setting the MAC ADDR failed %d", ret); 1118 1119 return ret; 1120 } 1121 1122 static 1123 int dpaa2_dev_stats_get(struct rte_eth_dev *dev, 1124 struct rte_eth_stats *stats) 1125 { 1126 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1127 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1128 int32_t retcode; 1129 uint8_t page0 = 0, page1 = 1, page2 = 2; 1130 union dpni_statistics value; 1131 1132 memset(&value, 0, sizeof(union dpni_statistics)); 1133 1134 PMD_INIT_FUNC_TRACE(); 1135 1136 if (!dpni) { 1137 DPAA2_PMD_ERR("dpni is NULL"); 1138 return -EINVAL; 1139 } 1140 1141 if (!stats) { 1142 DPAA2_PMD_ERR("stats is NULL"); 1143 return -EINVAL; 1144 } 1145 1146 /*Get Counters from page_0*/ 1147 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1148 page0, 0, &value); 1149 if (retcode) 1150 goto err; 1151 1152 stats->ipackets = value.page_0.ingress_all_frames; 1153 stats->ibytes = value.page_0.ingress_all_bytes; 1154 1155 /*Get Counters from page_1*/ 1156 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1157 page1, 0, &value); 1158 if (retcode) 1159 goto err; 1160 1161 stats->opackets = value.page_1.egress_all_frames; 1162 stats->obytes = value.page_1.egress_all_bytes; 1163 1164 /*Get Counters from page_2*/ 1165 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1166 page2, 0, &value); 1167 if (retcode) 1168 goto err; 1169 1170 /* Ingress drop frame count due to configured rules */ 1171 stats->ierrors = value.page_2.ingress_filtered_frames; 1172 /* Ingress drop frame count due to error */ 1173 stats->ierrors += value.page_2.ingress_discarded_frames; 1174 1175 stats->oerrors = value.page_2.egress_discarded_frames; 1176 stats->imissed = value.page_2.ingress_nobuffer_discards; 1177 1178 return 0; 1179 1180 err: 1181 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 1182 return retcode; 1183 }; 1184 1185 static int 1186 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1187 unsigned int n) 1188 { 1189 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1190 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1191 int32_t retcode; 1192 union dpni_statistics value[3] = {}; 1193 unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); 1194 1195 if (n < num) 1196 return num; 1197 1198 if (xstats == NULL) 1199 return 0; 1200 1201 /* Get Counters from page_0*/ 1202 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1203 0, 0, &value[0]); 1204 if (retcode) 1205 goto err; 1206 1207 /* Get Counters from page_1*/ 1208 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1209 1, 0, &value[1]); 1210 if (retcode) 1211 goto err; 1212 1213 /* Get Counters from page_2*/ 1214 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1215 2, 0, &value[2]); 1216 if (retcode) 1217 goto err; 1218 1219 for (i = 0; i < num; i++) { 1220 xstats[i].id = i; 1221 xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. 1222 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1223 } 1224 return i; 1225 err: 1226 DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode); 1227 return retcode; 1228 } 1229 1230 static int 1231 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1232 struct rte_eth_xstat_name *xstats_names, 1233 unsigned int limit) 1234 { 1235 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1236 1237 if (limit < stat_cnt) 1238 return stat_cnt; 1239 1240 if (xstats_names != NULL) 1241 for (i = 0; i < stat_cnt; i++) 1242 snprintf(xstats_names[i].name, 1243 sizeof(xstats_names[i].name), 1244 "%s", 1245 dpaa2_xstats_strings[i].name); 1246 1247 return stat_cnt; 1248 } 1249 1250 static int 1251 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1252 uint64_t *values, unsigned int n) 1253 { 1254 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1255 uint64_t values_copy[stat_cnt]; 1256 1257 if (!ids) { 1258 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1259 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1260 int32_t retcode; 1261 union dpni_statistics value[3] = {}; 1262 1263 if (n < stat_cnt) 1264 return stat_cnt; 1265 1266 if (!values) 1267 return 0; 1268 1269 /* Get Counters from page_0*/ 1270 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1271 0, 0, &value[0]); 1272 if (retcode) 1273 return 0; 1274 1275 /* Get Counters from page_1*/ 1276 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1277 1, 0, &value[1]); 1278 if (retcode) 1279 return 0; 1280 1281 /* Get Counters from page_2*/ 1282 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1283 2, 0, &value[2]); 1284 if (retcode) 1285 return 0; 1286 1287 for (i = 0; i < stat_cnt; i++) { 1288 values[i] = value[dpaa2_xstats_strings[i].page_id]. 1289 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1290 } 1291 return stat_cnt; 1292 } 1293 1294 dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 1295 1296 for (i = 0; i < n; i++) { 1297 if (ids[i] >= stat_cnt) { 1298 DPAA2_PMD_ERR("xstats id value isn't valid"); 1299 return -1; 1300 } 1301 values[i] = values_copy[ids[i]]; 1302 } 1303 return n; 1304 } 1305 1306 static int 1307 dpaa2_xstats_get_names_by_id( 1308 struct rte_eth_dev *dev, 1309 struct rte_eth_xstat_name *xstats_names, 1310 const uint64_t *ids, 1311 unsigned int limit) 1312 { 1313 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1314 struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 1315 1316 if (!ids) 1317 return dpaa2_xstats_get_names(dev, xstats_names, limit); 1318 1319 dpaa2_xstats_get_names(dev, xstats_names_copy, limit); 1320 1321 for (i = 0; i < limit; i++) { 1322 if (ids[i] >= stat_cnt) { 1323 DPAA2_PMD_ERR("xstats id value isn't valid"); 1324 return -1; 1325 } 1326 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 1327 } 1328 return limit; 1329 } 1330 1331 static void 1332 dpaa2_dev_stats_reset(struct rte_eth_dev *dev) 1333 { 1334 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1335 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1336 int32_t retcode; 1337 1338 PMD_INIT_FUNC_TRACE(); 1339 1340 if (dpni == NULL) { 1341 DPAA2_PMD_ERR("dpni is NULL"); 1342 return; 1343 } 1344 1345 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); 1346 if (retcode) 1347 goto error; 1348 1349 return; 1350 1351 error: 1352 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 1353 return; 1354 }; 1355 1356 /* return 0 means link status changed, -1 means not changed */ 1357 static int 1358 dpaa2_dev_link_update(struct rte_eth_dev *dev, 1359 int wait_to_complete __rte_unused) 1360 { 1361 int ret; 1362 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1363 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1364 struct rte_eth_link link; 1365 struct dpni_link_state state = {0}; 1366 1367 if (dpni == NULL) { 1368 DPAA2_PMD_ERR("dpni is NULL"); 1369 return 0; 1370 } 1371 1372 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1373 if (ret < 0) { 1374 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); 1375 return -1; 1376 } 1377 1378 memset(&link, 0, sizeof(struct rte_eth_link)); 1379 link.link_status = state.up; 1380 link.link_speed = state.rate; 1381 1382 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) 1383 link.link_duplex = ETH_LINK_HALF_DUPLEX; 1384 else 1385 link.link_duplex = ETH_LINK_FULL_DUPLEX; 1386 1387 ret = rte_eth_linkstatus_set(dev, &link); 1388 if (ret == -1) 1389 DPAA2_PMD_DEBUG("No change in status"); 1390 else 1391 DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id, 1392 link.link_status ? "Up" : "Down"); 1393 1394 return ret; 1395 } 1396 1397 /** 1398 * Toggle the DPNI to enable, if not already enabled. 1399 * This is not strictly PHY up/down - it is more of logical toggling. 1400 */ 1401 static int 1402 dpaa2_dev_set_link_up(struct rte_eth_dev *dev) 1403 { 1404 int ret = -EINVAL; 1405 struct dpaa2_dev_priv *priv; 1406 struct fsl_mc_io *dpni; 1407 int en = 0; 1408 struct dpni_link_state state = {0}; 1409 1410 priv = dev->data->dev_private; 1411 dpni = (struct fsl_mc_io *)priv->hw; 1412 1413 if (dpni == NULL) { 1414 DPAA2_PMD_ERR("dpni is NULL"); 1415 return ret; 1416 } 1417 1418 /* Check if DPNI is currently enabled */ 1419 ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); 1420 if (ret) { 1421 /* Unable to obtain dpni status; Not continuing */ 1422 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1423 return -EINVAL; 1424 } 1425 1426 /* Enable link if not already enabled */ 1427 if (!en) { 1428 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 1429 if (ret) { 1430 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1431 return -EINVAL; 1432 } 1433 } 1434 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1435 if (ret < 0) { 1436 DPAA2_PMD_ERR("Unable to get link state (%d)", ret); 1437 return -1; 1438 } 1439 1440 /* changing tx burst function to start enqueues */ 1441 dev->tx_pkt_burst = dpaa2_dev_tx; 1442 dev->data->dev_link.link_status = state.up; 1443 1444 if (state.up) 1445 DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id); 1446 else 1447 DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id); 1448 return ret; 1449 } 1450 1451 /** 1452 * Toggle the DPNI to disable, if not already disabled. 1453 * This is not strictly PHY up/down - it is more of logical toggling. 1454 */ 1455 static int 1456 dpaa2_dev_set_link_down(struct rte_eth_dev *dev) 1457 { 1458 int ret = -EINVAL; 1459 struct dpaa2_dev_priv *priv; 1460 struct fsl_mc_io *dpni; 1461 int dpni_enabled = 0; 1462 int retries = 10; 1463 1464 PMD_INIT_FUNC_TRACE(); 1465 1466 priv = dev->data->dev_private; 1467 dpni = (struct fsl_mc_io *)priv->hw; 1468 1469 if (dpni == NULL) { 1470 DPAA2_PMD_ERR("Device has not yet been configured"); 1471 return ret; 1472 } 1473 1474 /*changing tx burst function to avoid any more enqueues */ 1475 dev->tx_pkt_burst = dummy_dev_tx; 1476 1477 /* Loop while dpni_disable() attempts to drain the egress FQs 1478 * and confirm them back to us. 1479 */ 1480 do { 1481 ret = dpni_disable(dpni, 0, priv->token); 1482 if (ret) { 1483 DPAA2_PMD_ERR("dpni disable failed (%d)", ret); 1484 return ret; 1485 } 1486 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); 1487 if (ret) { 1488 DPAA2_PMD_ERR("dpni enable check failed (%d)", ret); 1489 return ret; 1490 } 1491 if (dpni_enabled) 1492 /* Allow the MC some slack */ 1493 rte_delay_us(100 * 1000); 1494 } while (dpni_enabled && --retries); 1495 1496 if (!retries) { 1497 DPAA2_PMD_WARN("Retry count exceeded disabling dpni"); 1498 /* todo- we may have to manually cleanup queues. 1499 */ 1500 } else { 1501 DPAA2_PMD_INFO("Port %d Link DOWN successful", 1502 dev->data->port_id); 1503 } 1504 1505 dev->data->dev_link.link_status = 0; 1506 1507 return ret; 1508 } 1509 1510 static int 1511 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1512 { 1513 int ret = -EINVAL; 1514 struct dpaa2_dev_priv *priv; 1515 struct fsl_mc_io *dpni; 1516 struct dpni_link_state state = {0}; 1517 1518 PMD_INIT_FUNC_TRACE(); 1519 1520 priv = dev->data->dev_private; 1521 dpni = (struct fsl_mc_io *)priv->hw; 1522 1523 if (dpni == NULL || fc_conf == NULL) { 1524 DPAA2_PMD_ERR("device not configured"); 1525 return ret; 1526 } 1527 1528 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1529 if (ret) { 1530 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); 1531 return ret; 1532 } 1533 1534 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); 1535 if (state.options & DPNI_LINK_OPT_PAUSE) { 1536 /* DPNI_LINK_OPT_PAUSE set 1537 * if ASYM_PAUSE not set, 1538 * RX Side flow control (handle received Pause frame) 1539 * TX side flow control (send Pause frame) 1540 * if ASYM_PAUSE set, 1541 * RX Side flow control (handle received Pause frame) 1542 * No TX side flow control (send Pause frame disabled) 1543 */ 1544 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) 1545 fc_conf->mode = RTE_FC_FULL; 1546 else 1547 fc_conf->mode = RTE_FC_RX_PAUSE; 1548 } else { 1549 /* DPNI_LINK_OPT_PAUSE not set 1550 * if ASYM_PAUSE set, 1551 * TX side flow control (send Pause frame) 1552 * No RX side flow control (No action on pause frame rx) 1553 * if ASYM_PAUSE not set, 1554 * Flow control disabled 1555 */ 1556 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) 1557 fc_conf->mode = RTE_FC_TX_PAUSE; 1558 else 1559 fc_conf->mode = RTE_FC_NONE; 1560 } 1561 1562 return ret; 1563 } 1564 1565 static int 1566 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1567 { 1568 int ret = -EINVAL; 1569 struct dpaa2_dev_priv *priv; 1570 struct fsl_mc_io *dpni; 1571 struct dpni_link_state state = {0}; 1572 struct dpni_link_cfg cfg = {0}; 1573 1574 PMD_INIT_FUNC_TRACE(); 1575 1576 priv = dev->data->dev_private; 1577 dpni = (struct fsl_mc_io *)priv->hw; 1578 1579 if (dpni == NULL) { 1580 DPAA2_PMD_ERR("dpni is NULL"); 1581 return ret; 1582 } 1583 1584 /* It is necessary to obtain the current state before setting fc_conf 1585 * as MC would return error in case rate, autoneg or duplex values are 1586 * different. 1587 */ 1588 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1589 if (ret) { 1590 DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret); 1591 return -1; 1592 } 1593 1594 /* Disable link before setting configuration */ 1595 dpaa2_dev_set_link_down(dev); 1596 1597 /* Based on fc_conf, update cfg */ 1598 cfg.rate = state.rate; 1599 cfg.options = state.options; 1600 1601 /* update cfg with fc_conf */ 1602 switch (fc_conf->mode) { 1603 case RTE_FC_FULL: 1604 /* Full flow control; 1605 * OPT_PAUSE set, ASYM_PAUSE not set 1606 */ 1607 cfg.options |= DPNI_LINK_OPT_PAUSE; 1608 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1609 break; 1610 case RTE_FC_TX_PAUSE: 1611 /* Enable RX flow control 1612 * OPT_PAUSE not set; 1613 * ASYM_PAUSE set; 1614 */ 1615 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1616 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1617 break; 1618 case RTE_FC_RX_PAUSE: 1619 /* Enable TX Flow control 1620 * OPT_PAUSE set 1621 * ASYM_PAUSE set 1622 */ 1623 cfg.options |= DPNI_LINK_OPT_PAUSE; 1624 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1625 break; 1626 case RTE_FC_NONE: 1627 /* Disable Flow control 1628 * OPT_PAUSE not set 1629 * ASYM_PAUSE not set 1630 */ 1631 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1632 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1633 break; 1634 default: 1635 DPAA2_PMD_ERR("Incorrect Flow control flag (%d)", 1636 fc_conf->mode); 1637 return -1; 1638 } 1639 1640 ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); 1641 if (ret) 1642 DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)", 1643 ret); 1644 1645 /* Enable link */ 1646 dpaa2_dev_set_link_up(dev); 1647 1648 return ret; 1649 } 1650 1651 static int 1652 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, 1653 struct rte_eth_rss_conf *rss_conf) 1654 { 1655 struct rte_eth_dev_data *data = dev->data; 1656 struct rte_eth_conf *eth_conf = &data->dev_conf; 1657 int ret; 1658 1659 PMD_INIT_FUNC_TRACE(); 1660 1661 if (rss_conf->rss_hf) { 1662 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf); 1663 if (ret) { 1664 DPAA2_PMD_ERR("Unable to set flow dist"); 1665 return ret; 1666 } 1667 } else { 1668 ret = dpaa2_remove_flow_dist(dev, 0); 1669 if (ret) { 1670 DPAA2_PMD_ERR("Unable to remove flow dist"); 1671 return ret; 1672 } 1673 } 1674 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; 1675 return 0; 1676 } 1677 1678 static int 1679 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1680 struct rte_eth_rss_conf *rss_conf) 1681 { 1682 struct rte_eth_dev_data *data = dev->data; 1683 struct rte_eth_conf *eth_conf = &data->dev_conf; 1684 1685 /* dpaa2 does not support rss_key, so length should be 0*/ 1686 rss_conf->rss_key_len = 0; 1687 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; 1688 return 0; 1689 } 1690 1691 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, 1692 int eth_rx_queue_id, 1693 uint16_t dpcon_id, 1694 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 1695 { 1696 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1697 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1698 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1699 uint8_t flow_id = dpaa2_ethq->flow_id; 1700 struct dpni_queue cfg; 1701 uint8_t options; 1702 int ret; 1703 1704 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 1705 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; 1706 else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) 1707 dpaa2_ethq->cb = dpaa2_dev_process_atomic_event; 1708 else 1709 return -EINVAL; 1710 1711 memset(&cfg, 0, sizeof(struct dpni_queue)); 1712 options = DPNI_QUEUE_OPT_DEST; 1713 cfg.destination.type = DPNI_DEST_DPCON; 1714 cfg.destination.id = dpcon_id; 1715 cfg.destination.priority = queue_conf->ev.priority; 1716 1717 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { 1718 options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; 1719 cfg.destination.hold_active = 1; 1720 } 1721 1722 options |= DPNI_QUEUE_OPT_USER_CTX; 1723 cfg.user_context = (size_t)(dpaa2_ethq); 1724 1725 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1726 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1727 if (ret) { 1728 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 1729 return ret; 1730 } 1731 1732 memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event)); 1733 1734 return 0; 1735 } 1736 1737 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, 1738 int eth_rx_queue_id) 1739 { 1740 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1741 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1742 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1743 uint8_t flow_id = dpaa2_ethq->flow_id; 1744 struct dpni_queue cfg; 1745 uint8_t options; 1746 int ret; 1747 1748 memset(&cfg, 0, sizeof(struct dpni_queue)); 1749 options = DPNI_QUEUE_OPT_DEST; 1750 cfg.destination.type = DPNI_DEST_NONE; 1751 1752 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1753 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1754 if (ret) 1755 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 1756 1757 return ret; 1758 } 1759 1760 static struct eth_dev_ops dpaa2_ethdev_ops = { 1761 .dev_configure = dpaa2_eth_dev_configure, 1762 .dev_start = dpaa2_dev_start, 1763 .dev_stop = dpaa2_dev_stop, 1764 .dev_close = dpaa2_dev_close, 1765 .promiscuous_enable = dpaa2_dev_promiscuous_enable, 1766 .promiscuous_disable = dpaa2_dev_promiscuous_disable, 1767 .allmulticast_enable = dpaa2_dev_allmulticast_enable, 1768 .allmulticast_disable = dpaa2_dev_allmulticast_disable, 1769 .dev_set_link_up = dpaa2_dev_set_link_up, 1770 .dev_set_link_down = dpaa2_dev_set_link_down, 1771 .link_update = dpaa2_dev_link_update, 1772 .stats_get = dpaa2_dev_stats_get, 1773 .xstats_get = dpaa2_dev_xstats_get, 1774 .xstats_get_by_id = dpaa2_xstats_get_by_id, 1775 .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id, 1776 .xstats_get_names = dpaa2_xstats_get_names, 1777 .stats_reset = dpaa2_dev_stats_reset, 1778 .xstats_reset = dpaa2_dev_stats_reset, 1779 .fw_version_get = dpaa2_fw_version_get, 1780 .dev_infos_get = dpaa2_dev_info_get, 1781 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, 1782 .mtu_set = dpaa2_dev_mtu_set, 1783 .vlan_filter_set = dpaa2_vlan_filter_set, 1784 .vlan_offload_set = dpaa2_vlan_offload_set, 1785 .rx_queue_setup = dpaa2_dev_rx_queue_setup, 1786 .rx_queue_release = dpaa2_dev_rx_queue_release, 1787 .tx_queue_setup = dpaa2_dev_tx_queue_setup, 1788 .tx_queue_release = dpaa2_dev_tx_queue_release, 1789 .rx_queue_count = dpaa2_dev_rx_queue_count, 1790 .flow_ctrl_get = dpaa2_flow_ctrl_get, 1791 .flow_ctrl_set = dpaa2_flow_ctrl_set, 1792 .mac_addr_add = dpaa2_dev_add_mac_addr, 1793 .mac_addr_remove = dpaa2_dev_remove_mac_addr, 1794 .mac_addr_set = dpaa2_dev_set_mac_addr, 1795 .rss_hash_update = dpaa2_dev_rss_hash_update, 1796 .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, 1797 }; 1798 1799 static int 1800 dpaa2_dev_init(struct rte_eth_dev *eth_dev) 1801 { 1802 struct rte_device *dev = eth_dev->device; 1803 struct rte_dpaa2_device *dpaa2_dev; 1804 struct fsl_mc_io *dpni_dev; 1805 struct dpni_attr attr; 1806 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1807 struct dpni_buffer_layout layout; 1808 int ret, hw_id; 1809 1810 PMD_INIT_FUNC_TRACE(); 1811 1812 /* For secondary processes, the primary has done all the work */ 1813 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1814 return 0; 1815 1816 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 1817 1818 hw_id = dpaa2_dev->object_id; 1819 1820 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); 1821 if (!dpni_dev) { 1822 DPAA2_PMD_ERR("Memory allocation failed for dpni device"); 1823 return -1; 1824 } 1825 1826 dpni_dev->regs = rte_mcp_ptr_list[0]; 1827 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); 1828 if (ret) { 1829 DPAA2_PMD_ERR( 1830 "Failure in opening dpni@%d with err code %d", 1831 hw_id, ret); 1832 rte_free(dpni_dev); 1833 return -1; 1834 } 1835 1836 /* Clean the device first */ 1837 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); 1838 if (ret) { 1839 DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d", 1840 hw_id, ret); 1841 goto init_err; 1842 } 1843 1844 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); 1845 if (ret) { 1846 DPAA2_PMD_ERR( 1847 "Failure in get dpni@%d attribute, err code %d", 1848 hw_id, ret); 1849 goto init_err; 1850 } 1851 1852 priv->num_rx_tc = attr.num_rx_tcs; 1853 1854 /* Resetting the "num_rx_queues" to equal number of queues in first TC 1855 * as only one TC is supported on Rx Side. Once Multiple TCs will be 1856 * in use for Rx processing then this will be changed or removed. 1857 */ 1858 priv->nb_rx_queues = attr.num_queues; 1859 1860 /* Using number of TX queues as number of TX TCs */ 1861 priv->nb_tx_queues = attr.num_tx_tcs; 1862 1863 DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", 1864 priv->num_rx_tc, priv->nb_rx_queues, 1865 priv->nb_tx_queues); 1866 1867 priv->hw = dpni_dev; 1868 priv->hw_id = hw_id; 1869 priv->options = attr.options; 1870 priv->max_mac_filters = attr.mac_filter_entries; 1871 priv->max_vlan_filters = attr.vlan_filter_entries; 1872 priv->flags = 0; 1873 1874 /* Allocate memory for hardware structure for queues */ 1875 ret = dpaa2_alloc_rx_tx_queues(eth_dev); 1876 if (ret) { 1877 DPAA2_PMD_ERR("Queue allocation Failed"); 1878 goto init_err; 1879 } 1880 1881 /* Allocate memory for storing MAC addresses */ 1882 eth_dev->data->mac_addrs = rte_zmalloc("dpni", 1883 ETHER_ADDR_LEN * attr.mac_filter_entries, 0); 1884 if (eth_dev->data->mac_addrs == NULL) { 1885 DPAA2_PMD_ERR( 1886 "Failed to allocate %d bytes needed to store MAC addresses", 1887 ETHER_ADDR_LEN * attr.mac_filter_entries); 1888 ret = -ENOMEM; 1889 goto init_err; 1890 } 1891 1892 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 1893 priv->token, 1894 (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes)); 1895 if (ret) { 1896 DPAA2_PMD_ERR("DPNI get mac address failed:Err Code = %d", 1897 ret); 1898 goto init_err; 1899 } 1900 1901 /* ... tx buffer layout ... */ 1902 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1903 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1904 layout.pass_frame_status = 1; 1905 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1906 DPNI_QUEUE_TX, &layout); 1907 if (ret) { 1908 DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret); 1909 goto init_err; 1910 } 1911 1912 /* ... tx-conf and error buffer layout ... */ 1913 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1914 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1915 layout.pass_frame_status = 1; 1916 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1917 DPNI_QUEUE_TX_CONFIRM, &layout); 1918 if (ret) { 1919 DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout", 1920 ret); 1921 goto init_err; 1922 } 1923 1924 eth_dev->dev_ops = &dpaa2_ethdev_ops; 1925 1926 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 1927 eth_dev->tx_pkt_burst = dpaa2_dev_tx; 1928 1929 DPAA2_PMD_INFO("%s: netdev created", eth_dev->data->name); 1930 return 0; 1931 init_err: 1932 dpaa2_dev_uninit(eth_dev); 1933 return ret; 1934 } 1935 1936 static int 1937 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) 1938 { 1939 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1940 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1941 int i, ret; 1942 struct dpaa2_queue *dpaa2_q; 1943 1944 PMD_INIT_FUNC_TRACE(); 1945 1946 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1947 return 0; 1948 1949 if (!dpni) { 1950 DPAA2_PMD_WARN("Already closed or not started"); 1951 return -1; 1952 } 1953 1954 dpaa2_dev_close(eth_dev); 1955 1956 if (priv->rx_vq[0]) { 1957 /* cleaning up queue storage */ 1958 for (i = 0; i < priv->nb_rx_queues; i++) { 1959 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 1960 if (dpaa2_q->q_storage) 1961 rte_free(dpaa2_q->q_storage); 1962 } 1963 /*free the all queue memory */ 1964 rte_free(priv->rx_vq[0]); 1965 priv->rx_vq[0] = NULL; 1966 } 1967 1968 /* free memory for storing MAC addresses */ 1969 if (eth_dev->data->mac_addrs) { 1970 rte_free(eth_dev->data->mac_addrs); 1971 eth_dev->data->mac_addrs = NULL; 1972 } 1973 1974 /* Close the device at underlying layer*/ 1975 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); 1976 if (ret) { 1977 DPAA2_PMD_ERR( 1978 "Failure closing dpni device with err code %d", 1979 ret); 1980 } 1981 1982 /* Free the allocated memory for ethernet private data and dpni*/ 1983 priv->hw = NULL; 1984 rte_free(dpni); 1985 1986 eth_dev->dev_ops = NULL; 1987 eth_dev->rx_pkt_burst = NULL; 1988 eth_dev->tx_pkt_burst = NULL; 1989 1990 DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name); 1991 return 0; 1992 } 1993 1994 static int 1995 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, 1996 struct rte_dpaa2_device *dpaa2_dev) 1997 { 1998 struct rte_eth_dev *eth_dev; 1999 int diag; 2000 2001 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 2002 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); 2003 if (!eth_dev) 2004 return -ENODEV; 2005 eth_dev->data->dev_private = rte_zmalloc( 2006 "ethdev private structure", 2007 sizeof(struct dpaa2_dev_priv), 2008 RTE_CACHE_LINE_SIZE); 2009 if (eth_dev->data->dev_private == NULL) { 2010 DPAA2_PMD_CRIT( 2011 "Unable to allocate memory for private data"); 2012 rte_eth_dev_release_port(eth_dev); 2013 return -ENOMEM; 2014 } 2015 } else { 2016 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); 2017 if (!eth_dev) 2018 return -ENODEV; 2019 } 2020 2021 eth_dev->device = &dpaa2_dev->device; 2022 eth_dev->device->driver = &dpaa2_drv->driver; 2023 2024 dpaa2_dev->eth_dev = eth_dev; 2025 eth_dev->data->rx_mbuf_alloc_failed = 0; 2026 2027 if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC) 2028 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 2029 2030 /* Invoke PMD device initialization function */ 2031 diag = dpaa2_dev_init(eth_dev); 2032 if (diag == 0) 2033 return 0; 2034 2035 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 2036 rte_free(eth_dev->data->dev_private); 2037 rte_eth_dev_release_port(eth_dev); 2038 return diag; 2039 } 2040 2041 static int 2042 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) 2043 { 2044 struct rte_eth_dev *eth_dev; 2045 2046 eth_dev = dpaa2_dev->eth_dev; 2047 dpaa2_dev_uninit(eth_dev); 2048 2049 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 2050 rte_free(eth_dev->data->dev_private); 2051 rte_eth_dev_release_port(eth_dev); 2052 2053 return 0; 2054 } 2055 2056 static struct rte_dpaa2_driver rte_dpaa2_pmd = { 2057 .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA, 2058 .drv_type = DPAA2_ETH, 2059 .probe = rte_dpaa2_probe, 2060 .remove = rte_dpaa2_remove, 2061 }; 2062 2063 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); 2064 2065 RTE_INIT(dpaa2_pmd_init_log); 2066 static void 2067 dpaa2_pmd_init_log(void) 2068 { 2069 dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2"); 2070 if (dpaa2_logtype_pmd >= 0) 2071 rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE); 2072 } 2073