1 /* * SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 11 #include <rte_mbuf.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_memcpy.h> 15 #include <rte_string_fns.h> 16 #include <rte_cycles.h> 17 #include <rte_kvargs.h> 18 #include <rte_dev.h> 19 #include <rte_fslmc.h> 20 21 #include "dpaa2_pmd_logs.h" 22 #include <fslmc_vfio.h> 23 #include <dpaa2_hw_pvt.h> 24 #include <dpaa2_hw_mempool.h> 25 #include <dpaa2_hw_dpio.h> 26 #include <mc/fsl_dpmng.h> 27 #include "dpaa2_ethdev.h" 28 #include <fsl_qbman_debug.h> 29 30 struct rte_dpaa2_xstats_name_off { 31 char name[RTE_ETH_XSTATS_NAME_SIZE]; 32 uint8_t page_id; /* dpni statistics page id */ 33 uint8_t stats_id; /* stats id in the given page */ 34 }; 35 36 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { 37 {"ingress_multicast_frames", 0, 2}, 38 {"ingress_multicast_bytes", 0, 3}, 39 {"ingress_broadcast_frames", 0, 4}, 40 {"ingress_broadcast_bytes", 0, 5}, 41 {"egress_multicast_frames", 1, 2}, 42 {"egress_multicast_bytes", 1, 3}, 43 {"egress_broadcast_frames", 1, 4}, 44 {"egress_broadcast_bytes", 1, 5}, 45 {"ingress_filtered_frames", 2, 0}, 46 {"ingress_discarded_frames", 2, 1}, 47 {"ingress_nobuffer_discards", 2, 2}, 48 {"egress_discarded_frames", 2, 3}, 49 {"egress_confirmed_frames", 2, 4}, 50 }; 51 52 static struct rte_dpaa2_driver rte_dpaa2_pmd; 53 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev); 54 static int dpaa2_dev_link_update(struct rte_eth_dev *dev, 55 int wait_to_complete); 56 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); 57 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); 58 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 59 60 int dpaa2_logtype_pmd; 61 62 static int 63 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 64 { 65 int ret; 66 struct dpaa2_dev_priv *priv = dev->data->dev_private; 67 struct fsl_mc_io *dpni = priv->hw; 68 69 PMD_INIT_FUNC_TRACE(); 70 71 if (dpni == NULL) { 72 DPAA2_PMD_ERR("dpni is NULL"); 73 return -1; 74 } 75 76 if (on) 77 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, 78 priv->token, vlan_id); 79 else 80 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, 81 priv->token, vlan_id); 82 83 if (ret < 0) 84 DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d", 85 ret, vlan_id, priv->hw_id); 86 87 return ret; 88 } 89 90 static int 91 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) 92 { 93 struct dpaa2_dev_priv *priv = dev->data->dev_private; 94 struct fsl_mc_io *dpni = priv->hw; 95 int ret; 96 97 PMD_INIT_FUNC_TRACE(); 98 99 if (mask & ETH_VLAN_FILTER_MASK) { 100 /* VLAN Filter not avaialble */ 101 if (!priv->max_vlan_filters) { 102 DPAA2_PMD_INFO("VLAN filter not available"); 103 goto next_mask; 104 } 105 106 if (dev->data->dev_conf.rxmode.offloads & 107 DEV_RX_OFFLOAD_VLAN_FILTER) 108 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 109 priv->token, true); 110 else 111 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 112 priv->token, false); 113 if (ret < 0) 114 DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret); 115 } 116 next_mask: 117 if (mask & ETH_VLAN_EXTEND_MASK) { 118 if (dev->data->dev_conf.rxmode.offloads & 119 DEV_RX_OFFLOAD_VLAN_EXTEND) 120 DPAA2_PMD_INFO("VLAN extend offload not supported"); 121 } 122 123 return 0; 124 } 125 126 static int 127 dpaa2_fw_version_get(struct rte_eth_dev *dev, 128 char *fw_version, 129 size_t fw_size) 130 { 131 int ret; 132 struct dpaa2_dev_priv *priv = dev->data->dev_private; 133 struct fsl_mc_io *dpni = priv->hw; 134 struct mc_soc_version mc_plat_info = {0}; 135 struct mc_version mc_ver_info = {0}; 136 137 PMD_INIT_FUNC_TRACE(); 138 139 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 140 DPAA2_PMD_WARN("\tmc_get_soc_version failed"); 141 142 if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) 143 DPAA2_PMD_WARN("\tmc_get_version failed"); 144 145 ret = snprintf(fw_version, fw_size, 146 "%x-%d.%d.%d", 147 mc_plat_info.svr, 148 mc_ver_info.major, 149 mc_ver_info.minor, 150 mc_ver_info.revision); 151 152 ret += 1; /* add the size of '\0' */ 153 if (fw_size < (uint32_t)ret) 154 return ret; 155 else 156 return 0; 157 } 158 159 static void 160 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 161 { 162 struct dpaa2_dev_priv *priv = dev->data->dev_private; 163 164 PMD_INIT_FUNC_TRACE(); 165 166 dev_info->if_index = priv->hw_id; 167 168 dev_info->max_mac_addrs = priv->max_mac_filters; 169 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; 170 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; 171 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; 172 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; 173 dev_info->rx_offload_capa = 174 DEV_RX_OFFLOAD_IPV4_CKSUM | 175 DEV_RX_OFFLOAD_UDP_CKSUM | 176 DEV_RX_OFFLOAD_TCP_CKSUM | 177 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 178 DEV_RX_OFFLOAD_VLAN_FILTER | 179 DEV_RX_OFFLOAD_VLAN_STRIP | 180 DEV_RX_OFFLOAD_JUMBO_FRAME | 181 DEV_RX_OFFLOAD_SCATTER; 182 dev_info->tx_offload_capa = 183 DEV_TX_OFFLOAD_IPV4_CKSUM | 184 DEV_TX_OFFLOAD_UDP_CKSUM | 185 DEV_TX_OFFLOAD_TCP_CKSUM | 186 DEV_TX_OFFLOAD_SCTP_CKSUM | 187 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 188 DEV_TX_OFFLOAD_VLAN_INSERT | 189 DEV_TX_OFFLOAD_MBUF_FAST_FREE | 190 DEV_TX_OFFLOAD_MULTI_SEGS; 191 dev_info->speed_capa = ETH_LINK_SPEED_1G | 192 ETH_LINK_SPEED_2_5G | 193 ETH_LINK_SPEED_10G; 194 } 195 196 static int 197 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) 198 { 199 struct dpaa2_dev_priv *priv = dev->data->dev_private; 200 uint16_t dist_idx; 201 uint32_t vq_id; 202 struct dpaa2_queue *mc_q, *mcq; 203 uint32_t tot_queues; 204 int i; 205 struct dpaa2_queue *dpaa2_q; 206 207 PMD_INIT_FUNC_TRACE(); 208 209 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; 210 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, 211 RTE_CACHE_LINE_SIZE); 212 if (!mc_q) { 213 DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues"); 214 return -1; 215 } 216 217 for (i = 0; i < priv->nb_rx_queues; i++) { 218 mc_q->dev = dev; 219 priv->rx_vq[i] = mc_q++; 220 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 221 dpaa2_q->q_storage = rte_malloc("dq_storage", 222 sizeof(struct queue_storage_info_t), 223 RTE_CACHE_LINE_SIZE); 224 if (!dpaa2_q->q_storage) 225 goto fail; 226 227 memset(dpaa2_q->q_storage, 0, 228 sizeof(struct queue_storage_info_t)); 229 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) 230 goto fail; 231 } 232 233 for (i = 0; i < priv->nb_tx_queues; i++) { 234 mc_q->dev = dev; 235 mc_q->flow_id = 0xffff; 236 priv->tx_vq[i] = mc_q++; 237 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 238 dpaa2_q->cscn = rte_malloc(NULL, 239 sizeof(struct qbman_result), 16); 240 if (!dpaa2_q->cscn) 241 goto fail_tx; 242 } 243 244 vq_id = 0; 245 for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { 246 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; 247 mcq->tc_index = DPAA2_DEF_TC; 248 mcq->flow_id = dist_idx; 249 vq_id++; 250 } 251 252 return 0; 253 fail_tx: 254 i -= 1; 255 while (i >= 0) { 256 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 257 rte_free(dpaa2_q->cscn); 258 priv->tx_vq[i--] = NULL; 259 } 260 i = priv->nb_rx_queues; 261 fail: 262 i -= 1; 263 mc_q = priv->rx_vq[0]; 264 while (i >= 0) { 265 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 266 dpaa2_free_dq_storage(dpaa2_q->q_storage); 267 rte_free(dpaa2_q->q_storage); 268 priv->rx_vq[i--] = NULL; 269 } 270 rte_free(mc_q); 271 return -1; 272 } 273 274 static int 275 dpaa2_eth_dev_configure(struct rte_eth_dev *dev) 276 { 277 struct dpaa2_dev_priv *priv = dev->data->dev_private; 278 struct fsl_mc_io *dpni = priv->hw; 279 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 280 struct rte_eth_dev_info dev_info; 281 uint64_t rx_offloads = eth_conf->rxmode.offloads; 282 uint64_t tx_offloads = eth_conf->txmode.offloads; 283 int rx_l3_csum_offload = false; 284 int rx_l4_csum_offload = false; 285 int tx_l3_csum_offload = false; 286 int tx_l4_csum_offload = false; 287 int ret; 288 289 PMD_INIT_FUNC_TRACE(); 290 291 dpaa2_dev_info_get(dev, &dev_info); 292 if ((~(dev_info.rx_offload_capa) & rx_offloads) != 0) { 293 DPAA2_PMD_ERR("Some Rx offloads are not supported " 294 "requested 0x%" PRIx64 " supported 0x%" PRIx64, 295 rx_offloads, dev_info.rx_offload_capa); 296 return -ENOTSUP; 297 } 298 299 if ((~(dev_info.tx_offload_capa) & tx_offloads) != 0) { 300 DPAA2_PMD_ERR("Some Tx offloads are not supported " 301 "requested 0x%" PRIx64 " supported 0x%" PRIx64, 302 tx_offloads, dev_info.tx_offload_capa); 303 return -ENOTSUP; 304 } 305 306 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 307 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { 308 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, 309 priv->token, eth_conf->rxmode.max_rx_pkt_len); 310 if (ret) { 311 DPAA2_PMD_ERR( 312 "Unable to set mtu. check config"); 313 return ret; 314 } 315 } else { 316 return -1; 317 } 318 } 319 320 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { 321 ret = dpaa2_setup_flow_dist(dev, 322 eth_conf->rx_adv_conf.rss_conf.rss_hf); 323 if (ret) { 324 DPAA2_PMD_ERR("Unable to set flow distribution." 325 "Check queue config"); 326 return ret; 327 } 328 } 329 330 if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) 331 rx_l3_csum_offload = true; 332 333 if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) || 334 (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM)) 335 rx_l4_csum_offload = true; 336 337 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 338 DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload); 339 if (ret) { 340 DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret); 341 return ret; 342 } 343 344 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 345 DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload); 346 if (ret) { 347 DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret); 348 return ret; 349 } 350 351 if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) 352 tx_l3_csum_offload = true; 353 354 if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) || 355 (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) || 356 (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)) 357 tx_l4_csum_offload = true; 358 359 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 360 DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload); 361 if (ret) { 362 DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret); 363 return ret; 364 } 365 366 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 367 DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload); 368 if (ret) { 369 DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret); 370 return ret; 371 } 372 373 /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in 374 * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC] 375 * to 0 for LS2 in the hardware thus disabling data/annotation 376 * stashing. For LX2 this is fixed in hardware and thus hash result and 377 * parse results can be received in FD using this option. 378 */ 379 if (dpaa2_svr_family == SVR_LX2160A) { 380 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 381 DPNI_FLCTYPE_HASH, true); 382 if (ret) { 383 DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret); 384 return ret; 385 } 386 } 387 388 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); 389 390 /* update the current status */ 391 dpaa2_dev_link_update(dev, 0); 392 393 return 0; 394 } 395 396 /* Function to setup RX flow information. It contains traffic class ID, 397 * flow ID, destination configuration etc. 398 */ 399 static int 400 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, 401 uint16_t rx_queue_id, 402 uint16_t nb_rx_desc __rte_unused, 403 unsigned int socket_id __rte_unused, 404 const struct rte_eth_rxconf *rx_conf __rte_unused, 405 struct rte_mempool *mb_pool) 406 { 407 struct dpaa2_dev_priv *priv = dev->data->dev_private; 408 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 409 struct dpaa2_queue *dpaa2_q; 410 struct dpni_queue cfg; 411 uint8_t options = 0; 412 uint8_t flow_id; 413 uint32_t bpid; 414 int ret; 415 416 PMD_INIT_FUNC_TRACE(); 417 418 DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p", 419 dev, rx_queue_id, mb_pool, rx_conf); 420 421 if (!priv->bp_list || priv->bp_list->mp != mb_pool) { 422 bpid = mempool_to_bpid(mb_pool); 423 ret = dpaa2_attach_bp_list(priv, 424 rte_dpaa2_bpid_info[bpid].bp_list); 425 if (ret) 426 return ret; 427 } 428 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 429 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ 430 431 /*Get the flow id from given VQ id*/ 432 flow_id = rx_queue_id % priv->nb_rx_queues; 433 memset(&cfg, 0, sizeof(struct dpni_queue)); 434 435 options = options | DPNI_QUEUE_OPT_USER_CTX; 436 cfg.user_context = (size_t)(dpaa2_q); 437 438 /*if ls2088 or rev2 device, enable the stashing */ 439 440 if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) { 441 options |= DPNI_QUEUE_OPT_FLC; 442 cfg.flc.stash_control = true; 443 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0; 444 /* 00 00 00 - last 6 bit represent annotation, context stashing, 445 * data stashing setting 01 01 00 (0x14) 446 * (in following order ->DS AS CS) 447 * to enable 1 line data, 1 line annotation. 448 * For LX2, this setting should be 01 00 00 (0x10) 449 */ 450 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A) 451 cfg.flc.value |= 0x10; 452 else 453 cfg.flc.value |= 0x14; 454 } 455 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, 456 dpaa2_q->tc_index, flow_id, options, &cfg); 457 if (ret) { 458 DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret); 459 return -1; 460 } 461 462 if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { 463 struct dpni_taildrop taildrop; 464 465 taildrop.enable = 1; 466 /*enabling per rx queue congestion control */ 467 taildrop.threshold = CONG_THRESHOLD_RX_Q; 468 taildrop.units = DPNI_CONGESTION_UNIT_BYTES; 469 taildrop.oal = CONG_RX_OAL; 470 DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d", 471 rx_queue_id); 472 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 473 DPNI_CP_QUEUE, DPNI_QUEUE_RX, 474 dpaa2_q->tc_index, flow_id, &taildrop); 475 if (ret) { 476 DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", 477 ret); 478 return -1; 479 } 480 } 481 482 dev->data->rx_queues[rx_queue_id] = dpaa2_q; 483 return 0; 484 } 485 486 static int 487 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, 488 uint16_t tx_queue_id, 489 uint16_t nb_tx_desc __rte_unused, 490 unsigned int socket_id __rte_unused, 491 const struct rte_eth_txconf *tx_conf __rte_unused) 492 { 493 struct dpaa2_dev_priv *priv = dev->data->dev_private; 494 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) 495 priv->tx_vq[tx_queue_id]; 496 struct fsl_mc_io *dpni = priv->hw; 497 struct dpni_queue tx_conf_cfg; 498 struct dpni_queue tx_flow_cfg; 499 uint8_t options = 0, flow_id; 500 uint32_t tc_id; 501 int ret; 502 503 PMD_INIT_FUNC_TRACE(); 504 505 /* Return if queue already configured */ 506 if (dpaa2_q->flow_id != 0xffff) { 507 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 508 return 0; 509 } 510 511 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); 512 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); 513 514 tc_id = tx_queue_id; 515 flow_id = 0; 516 517 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, 518 tc_id, flow_id, options, &tx_flow_cfg); 519 if (ret) { 520 DPAA2_PMD_ERR("Error in setting the tx flow: " 521 "tc_id=%d, flow=%d err=%d", 522 tc_id, flow_id, ret); 523 return -1; 524 } 525 526 dpaa2_q->flow_id = flow_id; 527 528 if (tx_queue_id == 0) { 529 /*Set tx-conf and error configuration*/ 530 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 531 priv->token, 532 DPNI_CONF_DISABLE); 533 if (ret) { 534 DPAA2_PMD_ERR("Error in set tx conf mode settings: " 535 "err=%d", ret); 536 return -1; 537 } 538 } 539 dpaa2_q->tc_index = tc_id; 540 541 if (!(priv->flags & DPAA2_TX_CGR_OFF)) { 542 struct dpni_congestion_notification_cfg cong_notif_cfg; 543 544 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; 545 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; 546 /* Notify that the queue is not congested when the data in 547 * the queue is below this thershold. 548 */ 549 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; 550 cong_notif_cfg.message_ctx = 0; 551 cong_notif_cfg.message_iova = (size_t)dpaa2_q->cscn; 552 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; 553 cong_notif_cfg.notification_mode = 554 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | 555 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | 556 DPNI_CONG_OPT_COHERENT_WRITE; 557 558 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, 559 priv->token, 560 DPNI_QUEUE_TX, 561 tc_id, 562 &cong_notif_cfg); 563 if (ret) { 564 DPAA2_PMD_ERR( 565 "Error in setting tx congestion notification: " 566 "err=%d", ret); 567 return -ret; 568 } 569 } 570 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 571 return 0; 572 } 573 574 static void 575 dpaa2_dev_rx_queue_release(void *q __rte_unused) 576 { 577 PMD_INIT_FUNC_TRACE(); 578 } 579 580 static void 581 dpaa2_dev_tx_queue_release(void *q __rte_unused) 582 { 583 PMD_INIT_FUNC_TRACE(); 584 } 585 586 static uint32_t 587 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 588 { 589 int32_t ret; 590 struct dpaa2_dev_priv *priv = dev->data->dev_private; 591 struct dpaa2_queue *dpaa2_q; 592 struct qbman_swp *swp; 593 struct qbman_fq_query_np_rslt state; 594 uint32_t frame_cnt = 0; 595 596 PMD_INIT_FUNC_TRACE(); 597 598 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 599 ret = dpaa2_affine_qbman_swp(); 600 if (ret) { 601 DPAA2_PMD_ERR("Failure in affining portal"); 602 return -EINVAL; 603 } 604 } 605 swp = DPAA2_PER_LCORE_PORTAL; 606 607 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 608 609 if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) { 610 frame_cnt = qbman_fq_state_frame_count(&state); 611 DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u", 612 rx_queue_id, frame_cnt); 613 } 614 return frame_cnt; 615 } 616 617 static const uint32_t * 618 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) 619 { 620 static const uint32_t ptypes[] = { 621 /*todo -= add more types */ 622 RTE_PTYPE_L2_ETHER, 623 RTE_PTYPE_L3_IPV4, 624 RTE_PTYPE_L3_IPV4_EXT, 625 RTE_PTYPE_L3_IPV6, 626 RTE_PTYPE_L3_IPV6_EXT, 627 RTE_PTYPE_L4_TCP, 628 RTE_PTYPE_L4_UDP, 629 RTE_PTYPE_L4_SCTP, 630 RTE_PTYPE_L4_ICMP, 631 RTE_PTYPE_UNKNOWN 632 }; 633 634 if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx) 635 return ptypes; 636 return NULL; 637 } 638 639 /** 640 * Dpaa2 link Interrupt handler 641 * 642 * @param param 643 * The address of parameter (struct rte_eth_dev *) regsitered before. 644 * 645 * @return 646 * void 647 */ 648 static void 649 dpaa2_interrupt_handler(void *param) 650 { 651 struct rte_eth_dev *dev = param; 652 struct dpaa2_dev_priv *priv = dev->data->dev_private; 653 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 654 int ret; 655 int irq_index = DPNI_IRQ_INDEX; 656 unsigned int status = 0, clear = 0; 657 658 PMD_INIT_FUNC_TRACE(); 659 660 if (dpni == NULL) { 661 DPAA2_PMD_ERR("dpni is NULL"); 662 return; 663 } 664 665 ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, 666 irq_index, &status); 667 if (unlikely(ret)) { 668 DPAA2_PMD_ERR("Can't get irq status (err %d)", ret); 669 clear = 0xffffffff; 670 goto out; 671 } 672 673 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { 674 clear = DPNI_IRQ_EVENT_LINK_CHANGED; 675 dpaa2_dev_link_update(dev, 0); 676 /* calling all the apps registered for link status event */ 677 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 678 NULL); 679 } 680 out: 681 ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, 682 irq_index, clear); 683 if (unlikely(ret)) 684 DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret); 685 } 686 687 static int 688 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) 689 { 690 int err = 0; 691 struct dpaa2_dev_priv *priv = dev->data->dev_private; 692 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 693 int irq_index = DPNI_IRQ_INDEX; 694 unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; 695 696 PMD_INIT_FUNC_TRACE(); 697 698 err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, 699 irq_index, mask); 700 if (err < 0) { 701 DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err, 702 strerror(-err)); 703 return err; 704 } 705 706 err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, 707 irq_index, enable); 708 if (err < 0) 709 DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err, 710 strerror(-err)); 711 712 return err; 713 } 714 715 static int 716 dpaa2_dev_start(struct rte_eth_dev *dev) 717 { 718 struct rte_device *rdev = dev->device; 719 struct rte_dpaa2_device *dpaa2_dev; 720 struct rte_eth_dev_data *data = dev->data; 721 struct dpaa2_dev_priv *priv = data->dev_private; 722 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 723 struct dpni_queue cfg; 724 struct dpni_error_cfg err_cfg; 725 uint16_t qdid; 726 struct dpni_queue_id qid; 727 struct dpaa2_queue *dpaa2_q; 728 int ret, i; 729 struct rte_intr_handle *intr_handle; 730 731 dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); 732 intr_handle = &dpaa2_dev->intr_handle; 733 734 PMD_INIT_FUNC_TRACE(); 735 736 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 737 if (ret) { 738 DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d", 739 priv->hw_id, ret); 740 return ret; 741 } 742 743 /* Power up the phy. Needed to make the link go UP */ 744 dpaa2_dev_set_link_up(dev); 745 746 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, 747 DPNI_QUEUE_TX, &qdid); 748 if (ret) { 749 DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret); 750 return ret; 751 } 752 priv->qdid = qdid; 753 754 for (i = 0; i < data->nb_rx_queues; i++) { 755 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; 756 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 757 DPNI_QUEUE_RX, dpaa2_q->tc_index, 758 dpaa2_q->flow_id, &cfg, &qid); 759 if (ret) { 760 DPAA2_PMD_ERR("Error in getting flow information: " 761 "err=%d", ret); 762 return ret; 763 } 764 dpaa2_q->fqid = qid.fqid; 765 } 766 767 /*checksum errors, send them to normal path and set it in annotation */ 768 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; 769 770 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; 771 err_cfg.set_frame_annotation = true; 772 773 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, 774 priv->token, &err_cfg); 775 if (ret) { 776 DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d", 777 ret); 778 return ret; 779 } 780 781 /* if the interrupts were configured on this devices*/ 782 if (intr_handle && (intr_handle->fd) && 783 (dev->data->dev_conf.intr_conf.lsc != 0)) { 784 /* Registering LSC interrupt handler */ 785 rte_intr_callback_register(intr_handle, 786 dpaa2_interrupt_handler, 787 (void *)dev); 788 789 /* enable vfio intr/eventfd mapping 790 * Interrupt index 0 is required, so we can not use 791 * rte_intr_enable. 792 */ 793 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX); 794 795 /* enable dpni_irqs */ 796 dpaa2_eth_setup_irqs(dev, 1); 797 } 798 799 return 0; 800 } 801 802 /** 803 * This routine disables all traffic on the adapter by issuing a 804 * global reset on the MAC. 805 */ 806 static void 807 dpaa2_dev_stop(struct rte_eth_dev *dev) 808 { 809 struct dpaa2_dev_priv *priv = dev->data->dev_private; 810 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 811 int ret; 812 struct rte_eth_link link; 813 struct rte_intr_handle *intr_handle = dev->intr_handle; 814 815 PMD_INIT_FUNC_TRACE(); 816 817 /* reset interrupt callback */ 818 if (intr_handle && (intr_handle->fd) && 819 (dev->data->dev_conf.intr_conf.lsc != 0)) { 820 /*disable dpni irqs */ 821 dpaa2_eth_setup_irqs(dev, 0); 822 823 /* disable vfio intr before callback unregister */ 824 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX); 825 826 /* Unregistering LSC interrupt handler */ 827 rte_intr_callback_unregister(intr_handle, 828 dpaa2_interrupt_handler, 829 (void *)dev); 830 } 831 832 dpaa2_dev_set_link_down(dev); 833 834 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); 835 if (ret) { 836 DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev", 837 ret, priv->hw_id); 838 return; 839 } 840 841 /* clear the recorded link status */ 842 memset(&link, 0, sizeof(link)); 843 rte_eth_linkstatus_set(dev, &link); 844 } 845 846 static void 847 dpaa2_dev_close(struct rte_eth_dev *dev) 848 { 849 struct rte_eth_dev_data *data = dev->data; 850 struct dpaa2_dev_priv *priv = dev->data->dev_private; 851 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 852 int i, ret; 853 struct rte_eth_link link; 854 struct dpaa2_queue *dpaa2_q; 855 856 PMD_INIT_FUNC_TRACE(); 857 858 for (i = 0; i < data->nb_tx_queues; i++) { 859 dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i]; 860 if (!dpaa2_q->cscn) { 861 rte_free(dpaa2_q->cscn); 862 dpaa2_q->cscn = NULL; 863 } 864 } 865 866 /* Clean the device first */ 867 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); 868 if (ret) { 869 DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret); 870 return; 871 } 872 873 memset(&link, 0, sizeof(link)); 874 rte_eth_linkstatus_set(dev, &link); 875 } 876 877 static void 878 dpaa2_dev_promiscuous_enable( 879 struct rte_eth_dev *dev) 880 { 881 int ret; 882 struct dpaa2_dev_priv *priv = dev->data->dev_private; 883 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 884 885 PMD_INIT_FUNC_TRACE(); 886 887 if (dpni == NULL) { 888 DPAA2_PMD_ERR("dpni is NULL"); 889 return; 890 } 891 892 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 893 if (ret < 0) 894 DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret); 895 896 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 897 if (ret < 0) 898 DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret); 899 } 900 901 static void 902 dpaa2_dev_promiscuous_disable( 903 struct rte_eth_dev *dev) 904 { 905 int ret; 906 struct dpaa2_dev_priv *priv = dev->data->dev_private; 907 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 908 909 PMD_INIT_FUNC_TRACE(); 910 911 if (dpni == NULL) { 912 DPAA2_PMD_ERR("dpni is NULL"); 913 return; 914 } 915 916 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 917 if (ret < 0) 918 DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret); 919 920 if (dev->data->all_multicast == 0) { 921 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, 922 priv->token, false); 923 if (ret < 0) 924 DPAA2_PMD_ERR("Unable to disable M promisc mode %d", 925 ret); 926 } 927 } 928 929 static void 930 dpaa2_dev_allmulticast_enable( 931 struct rte_eth_dev *dev) 932 { 933 int ret; 934 struct dpaa2_dev_priv *priv = dev->data->dev_private; 935 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 936 937 PMD_INIT_FUNC_TRACE(); 938 939 if (dpni == NULL) { 940 DPAA2_PMD_ERR("dpni is NULL"); 941 return; 942 } 943 944 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 945 if (ret < 0) 946 DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret); 947 } 948 949 static void 950 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) 951 { 952 int ret; 953 struct dpaa2_dev_priv *priv = dev->data->dev_private; 954 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 955 956 PMD_INIT_FUNC_TRACE(); 957 958 if (dpni == NULL) { 959 DPAA2_PMD_ERR("dpni is NULL"); 960 return; 961 } 962 963 /* must remain on for all promiscuous */ 964 if (dev->data->promiscuous == 1) 965 return; 966 967 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 968 if (ret < 0) 969 DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret); 970 } 971 972 static int 973 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 974 { 975 int ret; 976 struct dpaa2_dev_priv *priv = dev->data->dev_private; 977 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 978 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN 979 + VLAN_TAG_SIZE; 980 981 PMD_INIT_FUNC_TRACE(); 982 983 if (dpni == NULL) { 984 DPAA2_PMD_ERR("dpni is NULL"); 985 return -EINVAL; 986 } 987 988 /* check that mtu is within the allowed range */ 989 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) 990 return -EINVAL; 991 992 if (frame_size > ETHER_MAX_LEN) 993 dev->data->dev_conf.rxmode.offloads &= 994 DEV_RX_OFFLOAD_JUMBO_FRAME; 995 else 996 dev->data->dev_conf.rxmode.offloads &= 997 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 998 999 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1000 1001 /* Set the Max Rx frame length as 'mtu' + 1002 * Maximum Ethernet header length 1003 */ 1004 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 1005 frame_size); 1006 if (ret) { 1007 DPAA2_PMD_ERR("Setting the max frame length failed"); 1008 return -1; 1009 } 1010 DPAA2_PMD_INFO("MTU configured for the device: %d", mtu); 1011 return 0; 1012 } 1013 1014 static int 1015 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, 1016 struct ether_addr *addr, 1017 __rte_unused uint32_t index, 1018 __rte_unused uint32_t pool) 1019 { 1020 int ret; 1021 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1022 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1023 1024 PMD_INIT_FUNC_TRACE(); 1025 1026 if (dpni == NULL) { 1027 DPAA2_PMD_ERR("dpni is NULL"); 1028 return -1; 1029 } 1030 1031 ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, 1032 priv->token, addr->addr_bytes); 1033 if (ret) 1034 DPAA2_PMD_ERR( 1035 "error: Adding the MAC ADDR failed: err = %d", ret); 1036 return 0; 1037 } 1038 1039 static void 1040 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, 1041 uint32_t index) 1042 { 1043 int ret; 1044 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1045 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1046 struct rte_eth_dev_data *data = dev->data; 1047 struct ether_addr *macaddr; 1048 1049 PMD_INIT_FUNC_TRACE(); 1050 1051 macaddr = &data->mac_addrs[index]; 1052 1053 if (dpni == NULL) { 1054 DPAA2_PMD_ERR("dpni is NULL"); 1055 return; 1056 } 1057 1058 ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, 1059 priv->token, macaddr->addr_bytes); 1060 if (ret) 1061 DPAA2_PMD_ERR( 1062 "error: Removing the MAC ADDR failed: err = %d", ret); 1063 } 1064 1065 static int 1066 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, 1067 struct ether_addr *addr) 1068 { 1069 int ret; 1070 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1071 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1072 1073 PMD_INIT_FUNC_TRACE(); 1074 1075 if (dpni == NULL) { 1076 DPAA2_PMD_ERR("dpni is NULL"); 1077 return -EINVAL; 1078 } 1079 1080 ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, 1081 priv->token, addr->addr_bytes); 1082 1083 if (ret) 1084 DPAA2_PMD_ERR( 1085 "error: Setting the MAC ADDR failed %d", ret); 1086 1087 return ret; 1088 } 1089 1090 static 1091 int dpaa2_dev_stats_get(struct rte_eth_dev *dev, 1092 struct rte_eth_stats *stats) 1093 { 1094 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1095 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1096 int32_t retcode; 1097 uint8_t page0 = 0, page1 = 1, page2 = 2; 1098 union dpni_statistics value; 1099 1100 memset(&value, 0, sizeof(union dpni_statistics)); 1101 1102 PMD_INIT_FUNC_TRACE(); 1103 1104 if (!dpni) { 1105 DPAA2_PMD_ERR("dpni is NULL"); 1106 return -EINVAL; 1107 } 1108 1109 if (!stats) { 1110 DPAA2_PMD_ERR("stats is NULL"); 1111 return -EINVAL; 1112 } 1113 1114 /*Get Counters from page_0*/ 1115 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1116 page0, 0, &value); 1117 if (retcode) 1118 goto err; 1119 1120 stats->ipackets = value.page_0.ingress_all_frames; 1121 stats->ibytes = value.page_0.ingress_all_bytes; 1122 1123 /*Get Counters from page_1*/ 1124 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1125 page1, 0, &value); 1126 if (retcode) 1127 goto err; 1128 1129 stats->opackets = value.page_1.egress_all_frames; 1130 stats->obytes = value.page_1.egress_all_bytes; 1131 1132 /*Get Counters from page_2*/ 1133 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1134 page2, 0, &value); 1135 if (retcode) 1136 goto err; 1137 1138 /* Ingress drop frame count due to configured rules */ 1139 stats->ierrors = value.page_2.ingress_filtered_frames; 1140 /* Ingress drop frame count due to error */ 1141 stats->ierrors += value.page_2.ingress_discarded_frames; 1142 1143 stats->oerrors = value.page_2.egress_discarded_frames; 1144 stats->imissed = value.page_2.ingress_nobuffer_discards; 1145 1146 return 0; 1147 1148 err: 1149 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 1150 return retcode; 1151 }; 1152 1153 static int 1154 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1155 unsigned int n) 1156 { 1157 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1158 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1159 int32_t retcode; 1160 union dpni_statistics value[3] = {}; 1161 unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); 1162 1163 if (n < num) 1164 return num; 1165 1166 if (xstats == NULL) 1167 return 0; 1168 1169 /* Get Counters from page_0*/ 1170 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1171 0, 0, &value[0]); 1172 if (retcode) 1173 goto err; 1174 1175 /* Get Counters from page_1*/ 1176 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1177 1, 0, &value[1]); 1178 if (retcode) 1179 goto err; 1180 1181 /* Get Counters from page_2*/ 1182 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1183 2, 0, &value[2]); 1184 if (retcode) 1185 goto err; 1186 1187 for (i = 0; i < num; i++) { 1188 xstats[i].id = i; 1189 xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. 1190 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1191 } 1192 return i; 1193 err: 1194 DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode); 1195 return retcode; 1196 } 1197 1198 static int 1199 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1200 struct rte_eth_xstat_name *xstats_names, 1201 unsigned int limit) 1202 { 1203 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1204 1205 if (limit < stat_cnt) 1206 return stat_cnt; 1207 1208 if (xstats_names != NULL) 1209 for (i = 0; i < stat_cnt; i++) 1210 snprintf(xstats_names[i].name, 1211 sizeof(xstats_names[i].name), 1212 "%s", 1213 dpaa2_xstats_strings[i].name); 1214 1215 return stat_cnt; 1216 } 1217 1218 static int 1219 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1220 uint64_t *values, unsigned int n) 1221 { 1222 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1223 uint64_t values_copy[stat_cnt]; 1224 1225 if (!ids) { 1226 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1227 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1228 int32_t retcode; 1229 union dpni_statistics value[3] = {}; 1230 1231 if (n < stat_cnt) 1232 return stat_cnt; 1233 1234 if (!values) 1235 return 0; 1236 1237 /* Get Counters from page_0*/ 1238 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1239 0, 0, &value[0]); 1240 if (retcode) 1241 return 0; 1242 1243 /* Get Counters from page_1*/ 1244 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1245 1, 0, &value[1]); 1246 if (retcode) 1247 return 0; 1248 1249 /* Get Counters from page_2*/ 1250 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1251 2, 0, &value[2]); 1252 if (retcode) 1253 return 0; 1254 1255 for (i = 0; i < stat_cnt; i++) { 1256 values[i] = value[dpaa2_xstats_strings[i].page_id]. 1257 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1258 } 1259 return stat_cnt; 1260 } 1261 1262 dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 1263 1264 for (i = 0; i < n; i++) { 1265 if (ids[i] >= stat_cnt) { 1266 DPAA2_PMD_ERR("xstats id value isn't valid"); 1267 return -1; 1268 } 1269 values[i] = values_copy[ids[i]]; 1270 } 1271 return n; 1272 } 1273 1274 static int 1275 dpaa2_xstats_get_names_by_id( 1276 struct rte_eth_dev *dev, 1277 struct rte_eth_xstat_name *xstats_names, 1278 const uint64_t *ids, 1279 unsigned int limit) 1280 { 1281 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1282 struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 1283 1284 if (!ids) 1285 return dpaa2_xstats_get_names(dev, xstats_names, limit); 1286 1287 dpaa2_xstats_get_names(dev, xstats_names_copy, limit); 1288 1289 for (i = 0; i < limit; i++) { 1290 if (ids[i] >= stat_cnt) { 1291 DPAA2_PMD_ERR("xstats id value isn't valid"); 1292 return -1; 1293 } 1294 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 1295 } 1296 return limit; 1297 } 1298 1299 static void 1300 dpaa2_dev_stats_reset(struct rte_eth_dev *dev) 1301 { 1302 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1303 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1304 int32_t retcode; 1305 1306 PMD_INIT_FUNC_TRACE(); 1307 1308 if (dpni == NULL) { 1309 DPAA2_PMD_ERR("dpni is NULL"); 1310 return; 1311 } 1312 1313 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); 1314 if (retcode) 1315 goto error; 1316 1317 return; 1318 1319 error: 1320 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 1321 return; 1322 }; 1323 1324 /* return 0 means link status changed, -1 means not changed */ 1325 static int 1326 dpaa2_dev_link_update(struct rte_eth_dev *dev, 1327 int wait_to_complete __rte_unused) 1328 { 1329 int ret; 1330 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1331 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1332 struct rte_eth_link link; 1333 struct dpni_link_state state = {0}; 1334 1335 if (dpni == NULL) { 1336 DPAA2_PMD_ERR("dpni is NULL"); 1337 return 0; 1338 } 1339 1340 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1341 if (ret < 0) { 1342 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); 1343 return -1; 1344 } 1345 1346 memset(&link, 0, sizeof(struct rte_eth_link)); 1347 link.link_status = state.up; 1348 link.link_speed = state.rate; 1349 1350 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) 1351 link.link_duplex = ETH_LINK_HALF_DUPLEX; 1352 else 1353 link.link_duplex = ETH_LINK_FULL_DUPLEX; 1354 1355 ret = rte_eth_linkstatus_set(dev, &link); 1356 if (ret == -1) 1357 DPAA2_PMD_DEBUG("No change in status"); 1358 else 1359 DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id, 1360 link.link_status ? "Up" : "Down"); 1361 1362 return ret; 1363 } 1364 1365 /** 1366 * Toggle the DPNI to enable, if not already enabled. 1367 * This is not strictly PHY up/down - it is more of logical toggling. 1368 */ 1369 static int 1370 dpaa2_dev_set_link_up(struct rte_eth_dev *dev) 1371 { 1372 int ret = -EINVAL; 1373 struct dpaa2_dev_priv *priv; 1374 struct fsl_mc_io *dpni; 1375 int en = 0; 1376 struct dpni_link_state state = {0}; 1377 1378 priv = dev->data->dev_private; 1379 dpni = (struct fsl_mc_io *)priv->hw; 1380 1381 if (dpni == NULL) { 1382 DPAA2_PMD_ERR("dpni is NULL"); 1383 return ret; 1384 } 1385 1386 /* Check if DPNI is currently enabled */ 1387 ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); 1388 if (ret) { 1389 /* Unable to obtain dpni status; Not continuing */ 1390 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1391 return -EINVAL; 1392 } 1393 1394 /* Enable link if not already enabled */ 1395 if (!en) { 1396 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 1397 if (ret) { 1398 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1399 return -EINVAL; 1400 } 1401 } 1402 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1403 if (ret < 0) { 1404 DPAA2_PMD_ERR("Unable to get link state (%d)", ret); 1405 return -1; 1406 } 1407 1408 /* changing tx burst function to start enqueues */ 1409 dev->tx_pkt_burst = dpaa2_dev_tx; 1410 dev->data->dev_link.link_status = state.up; 1411 1412 if (state.up) 1413 DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id); 1414 else 1415 DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id); 1416 return ret; 1417 } 1418 1419 /** 1420 * Toggle the DPNI to disable, if not already disabled. 1421 * This is not strictly PHY up/down - it is more of logical toggling. 1422 */ 1423 static int 1424 dpaa2_dev_set_link_down(struct rte_eth_dev *dev) 1425 { 1426 int ret = -EINVAL; 1427 struct dpaa2_dev_priv *priv; 1428 struct fsl_mc_io *dpni; 1429 int dpni_enabled = 0; 1430 int retries = 10; 1431 1432 PMD_INIT_FUNC_TRACE(); 1433 1434 priv = dev->data->dev_private; 1435 dpni = (struct fsl_mc_io *)priv->hw; 1436 1437 if (dpni == NULL) { 1438 DPAA2_PMD_ERR("Device has not yet been configured"); 1439 return ret; 1440 } 1441 1442 /*changing tx burst function to avoid any more enqueues */ 1443 dev->tx_pkt_burst = dummy_dev_tx; 1444 1445 /* Loop while dpni_disable() attempts to drain the egress FQs 1446 * and confirm them back to us. 1447 */ 1448 do { 1449 ret = dpni_disable(dpni, 0, priv->token); 1450 if (ret) { 1451 DPAA2_PMD_ERR("dpni disable failed (%d)", ret); 1452 return ret; 1453 } 1454 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); 1455 if (ret) { 1456 DPAA2_PMD_ERR("dpni enable check failed (%d)", ret); 1457 return ret; 1458 } 1459 if (dpni_enabled) 1460 /* Allow the MC some slack */ 1461 rte_delay_us(100 * 1000); 1462 } while (dpni_enabled && --retries); 1463 1464 if (!retries) { 1465 DPAA2_PMD_WARN("Retry count exceeded disabling dpni"); 1466 /* todo- we may have to manually cleanup queues. 1467 */ 1468 } else { 1469 DPAA2_PMD_INFO("Port %d Link DOWN successful", 1470 dev->data->port_id); 1471 } 1472 1473 dev->data->dev_link.link_status = 0; 1474 1475 return ret; 1476 } 1477 1478 static int 1479 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1480 { 1481 int ret = -EINVAL; 1482 struct dpaa2_dev_priv *priv; 1483 struct fsl_mc_io *dpni; 1484 struct dpni_link_state state = {0}; 1485 1486 PMD_INIT_FUNC_TRACE(); 1487 1488 priv = dev->data->dev_private; 1489 dpni = (struct fsl_mc_io *)priv->hw; 1490 1491 if (dpni == NULL || fc_conf == NULL) { 1492 DPAA2_PMD_ERR("device not configured"); 1493 return ret; 1494 } 1495 1496 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1497 if (ret) { 1498 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); 1499 return ret; 1500 } 1501 1502 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); 1503 if (state.options & DPNI_LINK_OPT_PAUSE) { 1504 /* DPNI_LINK_OPT_PAUSE set 1505 * if ASYM_PAUSE not set, 1506 * RX Side flow control (handle received Pause frame) 1507 * TX side flow control (send Pause frame) 1508 * if ASYM_PAUSE set, 1509 * RX Side flow control (handle received Pause frame) 1510 * No TX side flow control (send Pause frame disabled) 1511 */ 1512 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) 1513 fc_conf->mode = RTE_FC_FULL; 1514 else 1515 fc_conf->mode = RTE_FC_RX_PAUSE; 1516 } else { 1517 /* DPNI_LINK_OPT_PAUSE not set 1518 * if ASYM_PAUSE set, 1519 * TX side flow control (send Pause frame) 1520 * No RX side flow control (No action on pause frame rx) 1521 * if ASYM_PAUSE not set, 1522 * Flow control disabled 1523 */ 1524 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) 1525 fc_conf->mode = RTE_FC_TX_PAUSE; 1526 else 1527 fc_conf->mode = RTE_FC_NONE; 1528 } 1529 1530 return ret; 1531 } 1532 1533 static int 1534 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1535 { 1536 int ret = -EINVAL; 1537 struct dpaa2_dev_priv *priv; 1538 struct fsl_mc_io *dpni; 1539 struct dpni_link_state state = {0}; 1540 struct dpni_link_cfg cfg = {0}; 1541 1542 PMD_INIT_FUNC_TRACE(); 1543 1544 priv = dev->data->dev_private; 1545 dpni = (struct fsl_mc_io *)priv->hw; 1546 1547 if (dpni == NULL) { 1548 DPAA2_PMD_ERR("dpni is NULL"); 1549 return ret; 1550 } 1551 1552 /* It is necessary to obtain the current state before setting fc_conf 1553 * as MC would return error in case rate, autoneg or duplex values are 1554 * different. 1555 */ 1556 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1557 if (ret) { 1558 DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret); 1559 return -1; 1560 } 1561 1562 /* Disable link before setting configuration */ 1563 dpaa2_dev_set_link_down(dev); 1564 1565 /* Based on fc_conf, update cfg */ 1566 cfg.rate = state.rate; 1567 cfg.options = state.options; 1568 1569 /* update cfg with fc_conf */ 1570 switch (fc_conf->mode) { 1571 case RTE_FC_FULL: 1572 /* Full flow control; 1573 * OPT_PAUSE set, ASYM_PAUSE not set 1574 */ 1575 cfg.options |= DPNI_LINK_OPT_PAUSE; 1576 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1577 break; 1578 case RTE_FC_TX_PAUSE: 1579 /* Enable RX flow control 1580 * OPT_PAUSE not set; 1581 * ASYM_PAUSE set; 1582 */ 1583 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1584 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1585 break; 1586 case RTE_FC_RX_PAUSE: 1587 /* Enable TX Flow control 1588 * OPT_PAUSE set 1589 * ASYM_PAUSE set 1590 */ 1591 cfg.options |= DPNI_LINK_OPT_PAUSE; 1592 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1593 break; 1594 case RTE_FC_NONE: 1595 /* Disable Flow control 1596 * OPT_PAUSE not set 1597 * ASYM_PAUSE not set 1598 */ 1599 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1600 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1601 break; 1602 default: 1603 DPAA2_PMD_ERR("Incorrect Flow control flag (%d)", 1604 fc_conf->mode); 1605 return -1; 1606 } 1607 1608 ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); 1609 if (ret) 1610 DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)", 1611 ret); 1612 1613 /* Enable link */ 1614 dpaa2_dev_set_link_up(dev); 1615 1616 return ret; 1617 } 1618 1619 static int 1620 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, 1621 struct rte_eth_rss_conf *rss_conf) 1622 { 1623 struct rte_eth_dev_data *data = dev->data; 1624 struct rte_eth_conf *eth_conf = &data->dev_conf; 1625 int ret; 1626 1627 PMD_INIT_FUNC_TRACE(); 1628 1629 if (rss_conf->rss_hf) { 1630 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf); 1631 if (ret) { 1632 DPAA2_PMD_ERR("Unable to set flow dist"); 1633 return ret; 1634 } 1635 } else { 1636 ret = dpaa2_remove_flow_dist(dev, 0); 1637 if (ret) { 1638 DPAA2_PMD_ERR("Unable to remove flow dist"); 1639 return ret; 1640 } 1641 } 1642 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; 1643 return 0; 1644 } 1645 1646 static int 1647 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1648 struct rte_eth_rss_conf *rss_conf) 1649 { 1650 struct rte_eth_dev_data *data = dev->data; 1651 struct rte_eth_conf *eth_conf = &data->dev_conf; 1652 1653 /* dpaa2 does not support rss_key, so length should be 0*/ 1654 rss_conf->rss_key_len = 0; 1655 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; 1656 return 0; 1657 } 1658 1659 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, 1660 int eth_rx_queue_id, 1661 uint16_t dpcon_id, 1662 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 1663 { 1664 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1665 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1666 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1667 uint8_t flow_id = dpaa2_ethq->flow_id; 1668 struct dpni_queue cfg; 1669 uint8_t options; 1670 int ret; 1671 1672 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 1673 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; 1674 else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) 1675 dpaa2_ethq->cb = dpaa2_dev_process_atomic_event; 1676 else 1677 return -EINVAL; 1678 1679 memset(&cfg, 0, sizeof(struct dpni_queue)); 1680 options = DPNI_QUEUE_OPT_DEST; 1681 cfg.destination.type = DPNI_DEST_DPCON; 1682 cfg.destination.id = dpcon_id; 1683 cfg.destination.priority = queue_conf->ev.priority; 1684 1685 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { 1686 options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; 1687 cfg.destination.hold_active = 1; 1688 } 1689 1690 options |= DPNI_QUEUE_OPT_USER_CTX; 1691 cfg.user_context = (size_t)(dpaa2_ethq); 1692 1693 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1694 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1695 if (ret) { 1696 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 1697 return ret; 1698 } 1699 1700 memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event)); 1701 1702 return 0; 1703 } 1704 1705 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, 1706 int eth_rx_queue_id) 1707 { 1708 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1709 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1710 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1711 uint8_t flow_id = dpaa2_ethq->flow_id; 1712 struct dpni_queue cfg; 1713 uint8_t options; 1714 int ret; 1715 1716 memset(&cfg, 0, sizeof(struct dpni_queue)); 1717 options = DPNI_QUEUE_OPT_DEST; 1718 cfg.destination.type = DPNI_DEST_NONE; 1719 1720 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1721 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1722 if (ret) 1723 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 1724 1725 return ret; 1726 } 1727 1728 static struct eth_dev_ops dpaa2_ethdev_ops = { 1729 .dev_configure = dpaa2_eth_dev_configure, 1730 .dev_start = dpaa2_dev_start, 1731 .dev_stop = dpaa2_dev_stop, 1732 .dev_close = dpaa2_dev_close, 1733 .promiscuous_enable = dpaa2_dev_promiscuous_enable, 1734 .promiscuous_disable = dpaa2_dev_promiscuous_disable, 1735 .allmulticast_enable = dpaa2_dev_allmulticast_enable, 1736 .allmulticast_disable = dpaa2_dev_allmulticast_disable, 1737 .dev_set_link_up = dpaa2_dev_set_link_up, 1738 .dev_set_link_down = dpaa2_dev_set_link_down, 1739 .link_update = dpaa2_dev_link_update, 1740 .stats_get = dpaa2_dev_stats_get, 1741 .xstats_get = dpaa2_dev_xstats_get, 1742 .xstats_get_by_id = dpaa2_xstats_get_by_id, 1743 .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id, 1744 .xstats_get_names = dpaa2_xstats_get_names, 1745 .stats_reset = dpaa2_dev_stats_reset, 1746 .xstats_reset = dpaa2_dev_stats_reset, 1747 .fw_version_get = dpaa2_fw_version_get, 1748 .dev_infos_get = dpaa2_dev_info_get, 1749 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, 1750 .mtu_set = dpaa2_dev_mtu_set, 1751 .vlan_filter_set = dpaa2_vlan_filter_set, 1752 .vlan_offload_set = dpaa2_vlan_offload_set, 1753 .rx_queue_setup = dpaa2_dev_rx_queue_setup, 1754 .rx_queue_release = dpaa2_dev_rx_queue_release, 1755 .tx_queue_setup = dpaa2_dev_tx_queue_setup, 1756 .tx_queue_release = dpaa2_dev_tx_queue_release, 1757 .rx_queue_count = dpaa2_dev_rx_queue_count, 1758 .flow_ctrl_get = dpaa2_flow_ctrl_get, 1759 .flow_ctrl_set = dpaa2_flow_ctrl_set, 1760 .mac_addr_add = dpaa2_dev_add_mac_addr, 1761 .mac_addr_remove = dpaa2_dev_remove_mac_addr, 1762 .mac_addr_set = dpaa2_dev_set_mac_addr, 1763 .rss_hash_update = dpaa2_dev_rss_hash_update, 1764 .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, 1765 }; 1766 1767 static int 1768 dpaa2_dev_init(struct rte_eth_dev *eth_dev) 1769 { 1770 struct rte_device *dev = eth_dev->device; 1771 struct rte_dpaa2_device *dpaa2_dev; 1772 struct fsl_mc_io *dpni_dev; 1773 struct dpni_attr attr; 1774 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1775 struct dpni_buffer_layout layout; 1776 int ret, hw_id; 1777 1778 PMD_INIT_FUNC_TRACE(); 1779 1780 /* For secondary processes, the primary has done all the work */ 1781 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1782 return 0; 1783 1784 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 1785 1786 hw_id = dpaa2_dev->object_id; 1787 1788 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); 1789 if (!dpni_dev) { 1790 DPAA2_PMD_ERR("Memory allocation failed for dpni device"); 1791 return -1; 1792 } 1793 1794 dpni_dev->regs = rte_mcp_ptr_list[0]; 1795 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); 1796 if (ret) { 1797 DPAA2_PMD_ERR( 1798 "Failure in opening dpni@%d with err code %d", 1799 hw_id, ret); 1800 rte_free(dpni_dev); 1801 return -1; 1802 } 1803 1804 /* Clean the device first */ 1805 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); 1806 if (ret) { 1807 DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d", 1808 hw_id, ret); 1809 goto init_err; 1810 } 1811 1812 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); 1813 if (ret) { 1814 DPAA2_PMD_ERR( 1815 "Failure in get dpni@%d attribute, err code %d", 1816 hw_id, ret); 1817 goto init_err; 1818 } 1819 1820 priv->num_rx_tc = attr.num_rx_tcs; 1821 1822 /* Resetting the "num_rx_queues" to equal number of queues in first TC 1823 * as only one TC is supported on Rx Side. Once Multiple TCs will be 1824 * in use for Rx processing then this will be changed or removed. 1825 */ 1826 priv->nb_rx_queues = attr.num_queues; 1827 1828 /* Using number of TX queues as number of TX TCs */ 1829 priv->nb_tx_queues = attr.num_tx_tcs; 1830 1831 DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", 1832 priv->num_rx_tc, priv->nb_rx_queues, 1833 priv->nb_tx_queues); 1834 1835 priv->hw = dpni_dev; 1836 priv->hw_id = hw_id; 1837 priv->options = attr.options; 1838 priv->max_mac_filters = attr.mac_filter_entries; 1839 priv->max_vlan_filters = attr.vlan_filter_entries; 1840 priv->flags = 0; 1841 1842 /* Allocate memory for hardware structure for queues */ 1843 ret = dpaa2_alloc_rx_tx_queues(eth_dev); 1844 if (ret) { 1845 DPAA2_PMD_ERR("Queue allocation Failed"); 1846 goto init_err; 1847 } 1848 1849 /* Allocate memory for storing MAC addresses */ 1850 eth_dev->data->mac_addrs = rte_zmalloc("dpni", 1851 ETHER_ADDR_LEN * attr.mac_filter_entries, 0); 1852 if (eth_dev->data->mac_addrs == NULL) { 1853 DPAA2_PMD_ERR( 1854 "Failed to allocate %d bytes needed to store MAC addresses", 1855 ETHER_ADDR_LEN * attr.mac_filter_entries); 1856 ret = -ENOMEM; 1857 goto init_err; 1858 } 1859 1860 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 1861 priv->token, 1862 (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes)); 1863 if (ret) { 1864 DPAA2_PMD_ERR("DPNI get mac address failed:Err Code = %d", 1865 ret); 1866 goto init_err; 1867 } 1868 1869 /* ... tx buffer layout ... */ 1870 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1871 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1872 layout.pass_frame_status = 1; 1873 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1874 DPNI_QUEUE_TX, &layout); 1875 if (ret) { 1876 DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret); 1877 goto init_err; 1878 } 1879 1880 /* ... tx-conf and error buffer layout ... */ 1881 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1882 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1883 layout.pass_frame_status = 1; 1884 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1885 DPNI_QUEUE_TX_CONFIRM, &layout); 1886 if (ret) { 1887 DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout", 1888 ret); 1889 goto init_err; 1890 } 1891 1892 eth_dev->dev_ops = &dpaa2_ethdev_ops; 1893 1894 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 1895 eth_dev->tx_pkt_burst = dpaa2_dev_tx; 1896 1897 DPAA2_PMD_INFO("%s: netdev created", eth_dev->data->name); 1898 return 0; 1899 init_err: 1900 dpaa2_dev_uninit(eth_dev); 1901 return ret; 1902 } 1903 1904 static int 1905 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) 1906 { 1907 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1908 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1909 int i, ret; 1910 struct dpaa2_queue *dpaa2_q; 1911 1912 PMD_INIT_FUNC_TRACE(); 1913 1914 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1915 return 0; 1916 1917 if (!dpni) { 1918 DPAA2_PMD_WARN("Already closed or not started"); 1919 return -1; 1920 } 1921 1922 dpaa2_dev_close(eth_dev); 1923 1924 if (priv->rx_vq[0]) { 1925 /* cleaning up queue storage */ 1926 for (i = 0; i < priv->nb_rx_queues; i++) { 1927 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 1928 if (dpaa2_q->q_storage) 1929 rte_free(dpaa2_q->q_storage); 1930 } 1931 /*free the all queue memory */ 1932 rte_free(priv->rx_vq[0]); 1933 priv->rx_vq[0] = NULL; 1934 } 1935 1936 /* free memory for storing MAC addresses */ 1937 if (eth_dev->data->mac_addrs) { 1938 rte_free(eth_dev->data->mac_addrs); 1939 eth_dev->data->mac_addrs = NULL; 1940 } 1941 1942 /* Close the device at underlying layer*/ 1943 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); 1944 if (ret) { 1945 DPAA2_PMD_ERR( 1946 "Failure closing dpni device with err code %d", 1947 ret); 1948 } 1949 1950 /* Free the allocated memory for ethernet private data and dpni*/ 1951 priv->hw = NULL; 1952 rte_free(dpni); 1953 1954 eth_dev->dev_ops = NULL; 1955 eth_dev->rx_pkt_burst = NULL; 1956 eth_dev->tx_pkt_burst = NULL; 1957 1958 DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name); 1959 return 0; 1960 } 1961 1962 static int 1963 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, 1964 struct rte_dpaa2_device *dpaa2_dev) 1965 { 1966 struct rte_eth_dev *eth_dev; 1967 int diag; 1968 1969 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1970 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); 1971 if (!eth_dev) 1972 return -ENODEV; 1973 eth_dev->data->dev_private = rte_zmalloc( 1974 "ethdev private structure", 1975 sizeof(struct dpaa2_dev_priv), 1976 RTE_CACHE_LINE_SIZE); 1977 if (eth_dev->data->dev_private == NULL) { 1978 DPAA2_PMD_CRIT( 1979 "Unable to allocate memory for private data"); 1980 rte_eth_dev_release_port(eth_dev); 1981 return -ENOMEM; 1982 } 1983 } else { 1984 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); 1985 if (!eth_dev) 1986 return -ENODEV; 1987 } 1988 1989 eth_dev->device = &dpaa2_dev->device; 1990 eth_dev->device->driver = &dpaa2_drv->driver; 1991 1992 dpaa2_dev->eth_dev = eth_dev; 1993 eth_dev->data->rx_mbuf_alloc_failed = 0; 1994 1995 if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC) 1996 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 1997 1998 /* Invoke PMD device initialization function */ 1999 diag = dpaa2_dev_init(eth_dev); 2000 if (diag == 0) 2001 return 0; 2002 2003 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 2004 rte_free(eth_dev->data->dev_private); 2005 rte_eth_dev_release_port(eth_dev); 2006 return diag; 2007 } 2008 2009 static int 2010 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) 2011 { 2012 struct rte_eth_dev *eth_dev; 2013 2014 eth_dev = dpaa2_dev->eth_dev; 2015 dpaa2_dev_uninit(eth_dev); 2016 2017 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 2018 rte_free(eth_dev->data->dev_private); 2019 rte_eth_dev_release_port(eth_dev); 2020 2021 return 0; 2022 } 2023 2024 static struct rte_dpaa2_driver rte_dpaa2_pmd = { 2025 .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA, 2026 .drv_type = DPAA2_ETH, 2027 .probe = rte_dpaa2_probe, 2028 .remove = rte_dpaa2_remove, 2029 }; 2030 2031 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); 2032 2033 RTE_INIT(dpaa2_pmd_init_log); 2034 static void 2035 dpaa2_pmd_init_log(void) 2036 { 2037 dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2"); 2038 if (dpaa2_logtype_pmd >= 0) 2039 rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE); 2040 } 2041