1 /* * SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 11 #include <rte_mbuf.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_memcpy.h> 15 #include <rte_string_fns.h> 16 #include <rte_cycles.h> 17 #include <rte_kvargs.h> 18 #include <rte_dev.h> 19 #include <rte_fslmc.h> 20 21 #include <fslmc_logs.h> 22 #include <fslmc_vfio.h> 23 #include <dpaa2_hw_pvt.h> 24 #include <dpaa2_hw_mempool.h> 25 #include <dpaa2_hw_dpio.h> 26 #include <mc/fsl_dpmng.h> 27 #include "dpaa2_ethdev.h" 28 #include <fsl_qbman_debug.h> 29 30 struct rte_dpaa2_xstats_name_off { 31 char name[RTE_ETH_XSTATS_NAME_SIZE]; 32 uint8_t page_id; /* dpni statistics page id */ 33 uint8_t stats_id; /* stats id in the given page */ 34 }; 35 36 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { 37 {"ingress_multicast_frames", 0, 2}, 38 {"ingress_multicast_bytes", 0, 3}, 39 {"ingress_broadcast_frames", 0, 4}, 40 {"ingress_broadcast_bytes", 0, 5}, 41 {"egress_multicast_frames", 1, 2}, 42 {"egress_multicast_bytes", 1, 3}, 43 {"egress_broadcast_frames", 1, 4}, 44 {"egress_broadcast_bytes", 1, 5}, 45 {"ingress_filtered_frames", 2, 0}, 46 {"ingress_discarded_frames", 2, 1}, 47 {"ingress_nobuffer_discards", 2, 2}, 48 {"egress_discarded_frames", 2, 3}, 49 {"egress_confirmed_frames", 2, 4}, 50 }; 51 52 static struct rte_dpaa2_driver rte_dpaa2_pmd; 53 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev); 54 static int dpaa2_dev_link_update(struct rte_eth_dev *dev, 55 int wait_to_complete); 56 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); 57 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); 58 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 59 60 /** 61 * Atomically reads the link status information from global 62 * structure rte_eth_dev. 63 * 64 * @param dev 65 * - Pointer to the structure rte_eth_dev to read from. 66 * - Pointer to the buffer to be saved with the link status. 67 * 68 * @return 69 * - On success, zero. 70 * - On failure, negative value. 71 */ 72 static inline int 73 dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev, 74 struct rte_eth_link *link) 75 { 76 struct rte_eth_link *dst = link; 77 struct rte_eth_link *src = &dev->data->dev_link; 78 79 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 80 *(uint64_t *)src) == 0) 81 return -1; 82 83 return 0; 84 } 85 86 /** 87 * Atomically writes the link status information into global 88 * structure rte_eth_dev. 89 * 90 * @param dev 91 * - Pointer to the structure rte_eth_dev to read from. 92 * - Pointer to the buffer to be saved with the link status. 93 * 94 * @return 95 * - On success, zero. 96 * - On failure, negative value. 97 */ 98 static inline int 99 dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev, 100 struct rte_eth_link *link) 101 { 102 struct rte_eth_link *dst = &dev->data->dev_link; 103 struct rte_eth_link *src = link; 104 105 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 106 *(uint64_t *)src) == 0) 107 return -1; 108 109 return 0; 110 } 111 112 static int 113 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 114 { 115 int ret; 116 struct dpaa2_dev_priv *priv = dev->data->dev_private; 117 struct fsl_mc_io *dpni = priv->hw; 118 119 PMD_INIT_FUNC_TRACE(); 120 121 if (dpni == NULL) { 122 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 123 return -1; 124 } 125 126 if (on) 127 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, 128 priv->token, vlan_id); 129 else 130 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, 131 priv->token, vlan_id); 132 133 if (ret < 0) 134 PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d", 135 ret, vlan_id, priv->hw_id); 136 137 return ret; 138 } 139 140 static int 141 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) 142 { 143 struct dpaa2_dev_priv *priv = dev->data->dev_private; 144 struct fsl_mc_io *dpni = priv->hw; 145 int ret; 146 147 PMD_INIT_FUNC_TRACE(); 148 149 if (mask & ETH_VLAN_FILTER_MASK) { 150 /* VLAN Filter not avaialble */ 151 if (!priv->max_vlan_filters) { 152 RTE_LOG(INFO, PMD, "VLAN filter not available\n"); 153 goto next_mask; 154 } 155 156 if (dev->data->dev_conf.rxmode.hw_vlan_filter) 157 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 158 priv->token, true); 159 else 160 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 161 priv->token, false); 162 if (ret < 0) 163 RTE_LOG(ERR, PMD, "Unable to set vlan filter = %d\n", 164 ret); 165 } 166 next_mask: 167 if (mask & ETH_VLAN_EXTEND_MASK) { 168 if (dev->data->dev_conf.rxmode.hw_vlan_extend) 169 RTE_LOG(INFO, PMD, 170 "VLAN extend offload not supported\n"); 171 } 172 173 return 0; 174 } 175 176 static int 177 dpaa2_fw_version_get(struct rte_eth_dev *dev, 178 char *fw_version, 179 size_t fw_size) 180 { 181 int ret; 182 struct dpaa2_dev_priv *priv = dev->data->dev_private; 183 struct fsl_mc_io *dpni = priv->hw; 184 struct mc_soc_version mc_plat_info = {0}; 185 struct mc_version mc_ver_info = {0}; 186 187 PMD_INIT_FUNC_TRACE(); 188 189 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 190 RTE_LOG(WARNING, PMD, "\tmc_get_soc_version failed\n"); 191 192 if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) 193 RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n"); 194 195 ret = snprintf(fw_version, fw_size, 196 "%x-%d.%d.%d", 197 mc_plat_info.svr, 198 mc_ver_info.major, 199 mc_ver_info.minor, 200 mc_ver_info.revision); 201 202 ret += 1; /* add the size of '\0' */ 203 if (fw_size < (uint32_t)ret) 204 return ret; 205 else 206 return 0; 207 } 208 209 static void 210 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 211 { 212 struct dpaa2_dev_priv *priv = dev->data->dev_private; 213 214 PMD_INIT_FUNC_TRACE(); 215 216 dev_info->if_index = priv->hw_id; 217 218 dev_info->max_mac_addrs = priv->max_mac_filters; 219 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; 220 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; 221 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; 222 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; 223 dev_info->rx_offload_capa = 224 DEV_RX_OFFLOAD_IPV4_CKSUM | 225 DEV_RX_OFFLOAD_UDP_CKSUM | 226 DEV_RX_OFFLOAD_TCP_CKSUM | 227 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; 228 dev_info->tx_offload_capa = 229 DEV_TX_OFFLOAD_IPV4_CKSUM | 230 DEV_TX_OFFLOAD_UDP_CKSUM | 231 DEV_TX_OFFLOAD_TCP_CKSUM | 232 DEV_TX_OFFLOAD_SCTP_CKSUM | 233 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 234 dev_info->speed_capa = ETH_LINK_SPEED_1G | 235 ETH_LINK_SPEED_2_5G | 236 ETH_LINK_SPEED_10G; 237 } 238 239 static int 240 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) 241 { 242 struct dpaa2_dev_priv *priv = dev->data->dev_private; 243 uint16_t dist_idx; 244 uint32_t vq_id; 245 struct dpaa2_queue *mc_q, *mcq; 246 uint32_t tot_queues; 247 int i; 248 struct dpaa2_queue *dpaa2_q; 249 250 PMD_INIT_FUNC_TRACE(); 251 252 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; 253 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, 254 RTE_CACHE_LINE_SIZE); 255 if (!mc_q) { 256 PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n"); 257 return -1; 258 } 259 260 for (i = 0; i < priv->nb_rx_queues; i++) { 261 mc_q->dev = dev; 262 priv->rx_vq[i] = mc_q++; 263 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 264 dpaa2_q->q_storage = rte_malloc("dq_storage", 265 sizeof(struct queue_storage_info_t), 266 RTE_CACHE_LINE_SIZE); 267 if (!dpaa2_q->q_storage) 268 goto fail; 269 270 memset(dpaa2_q->q_storage, 0, 271 sizeof(struct queue_storage_info_t)); 272 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) 273 goto fail; 274 } 275 276 for (i = 0; i < priv->nb_tx_queues; i++) { 277 mc_q->dev = dev; 278 mc_q->flow_id = 0xffff; 279 priv->tx_vq[i] = mc_q++; 280 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 281 dpaa2_q->cscn = rte_malloc(NULL, 282 sizeof(struct qbman_result), 16); 283 if (!dpaa2_q->cscn) 284 goto fail_tx; 285 } 286 287 vq_id = 0; 288 for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { 289 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; 290 mcq->tc_index = DPAA2_DEF_TC; 291 mcq->flow_id = dist_idx; 292 vq_id++; 293 } 294 295 return 0; 296 fail_tx: 297 i -= 1; 298 while (i >= 0) { 299 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 300 rte_free(dpaa2_q->cscn); 301 priv->tx_vq[i--] = NULL; 302 } 303 i = priv->nb_rx_queues; 304 fail: 305 i -= 1; 306 mc_q = priv->rx_vq[0]; 307 while (i >= 0) { 308 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 309 dpaa2_free_dq_storage(dpaa2_q->q_storage); 310 rte_free(dpaa2_q->q_storage); 311 priv->rx_vq[i--] = NULL; 312 } 313 rte_free(mc_q); 314 return -1; 315 } 316 317 static int 318 dpaa2_eth_dev_configure(struct rte_eth_dev *dev) 319 { 320 struct dpaa2_dev_priv *priv = dev->data->dev_private; 321 struct fsl_mc_io *dpni = priv->hw; 322 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 323 int rx_ip_csum_offload = false; 324 int ret; 325 326 PMD_INIT_FUNC_TRACE(); 327 328 if (eth_conf->rxmode.jumbo_frame == 1) { 329 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { 330 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, 331 priv->token, eth_conf->rxmode.max_rx_pkt_len); 332 if (ret) { 333 PMD_INIT_LOG(ERR, 334 "unable to set mtu. check config\n"); 335 return ret; 336 } 337 } else { 338 return -1; 339 } 340 } 341 342 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { 343 ret = dpaa2_setup_flow_dist(dev, 344 eth_conf->rx_adv_conf.rss_conf.rss_hf); 345 if (ret) { 346 PMD_INIT_LOG(ERR, "unable to set flow distribution." 347 "please check queue config\n"); 348 return ret; 349 } 350 } 351 352 if (eth_conf->rxmode.hw_ip_checksum) 353 rx_ip_csum_offload = true; 354 355 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 356 DPNI_OFF_RX_L3_CSUM, rx_ip_csum_offload); 357 if (ret) { 358 PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret); 359 return ret; 360 } 361 362 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 363 DPNI_OFF_RX_L4_CSUM, rx_ip_csum_offload); 364 if (ret) { 365 PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret); 366 return ret; 367 } 368 369 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 370 DPNI_OFF_TX_L3_CSUM, true); 371 if (ret) { 372 PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret); 373 return ret; 374 } 375 376 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 377 DPNI_OFF_TX_L4_CSUM, true); 378 if (ret) { 379 PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret); 380 return ret; 381 } 382 383 /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in 384 * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC] 385 * to 0 for LS2 in the hardware thus disabling data/annotation 386 * stashing. For LX2 this is fixed in hardware and thus hash result and 387 * parse results can be received in FD using this option. 388 */ 389 if (dpaa2_svr_family == SVR_LX2160A) { 390 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 391 DPNI_FLCTYPE_HASH, true); 392 if (ret) { 393 PMD_INIT_LOG(ERR, "Error setting FLCTYPE: Err = %d\n", 394 ret); 395 return ret; 396 } 397 } 398 399 if (eth_conf->rxmode.hw_vlan_filter) 400 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); 401 402 /* update the current status */ 403 dpaa2_dev_link_update(dev, 0); 404 405 return 0; 406 } 407 408 /* Function to setup RX flow information. It contains traffic class ID, 409 * flow ID, destination configuration etc. 410 */ 411 static int 412 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, 413 uint16_t rx_queue_id, 414 uint16_t nb_rx_desc __rte_unused, 415 unsigned int socket_id __rte_unused, 416 const struct rte_eth_rxconf *rx_conf __rte_unused, 417 struct rte_mempool *mb_pool) 418 { 419 struct dpaa2_dev_priv *priv = dev->data->dev_private; 420 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 421 struct dpaa2_queue *dpaa2_q; 422 struct dpni_queue cfg; 423 uint8_t options = 0; 424 uint8_t flow_id; 425 uint32_t bpid; 426 int ret; 427 428 PMD_INIT_FUNC_TRACE(); 429 430 PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p", 431 dev, rx_queue_id, mb_pool, rx_conf); 432 433 if (!priv->bp_list || priv->bp_list->mp != mb_pool) { 434 bpid = mempool_to_bpid(mb_pool); 435 ret = dpaa2_attach_bp_list(priv, 436 rte_dpaa2_bpid_info[bpid].bp_list); 437 if (ret) 438 return ret; 439 } 440 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 441 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ 442 443 /*Get the flow id from given VQ id*/ 444 flow_id = rx_queue_id % priv->nb_rx_queues; 445 memset(&cfg, 0, sizeof(struct dpni_queue)); 446 447 options = options | DPNI_QUEUE_OPT_USER_CTX; 448 cfg.user_context = (uint64_t)(dpaa2_q); 449 450 /*if ls2088 or rev2 device, enable the stashing */ 451 452 if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) { 453 options |= DPNI_QUEUE_OPT_FLC; 454 cfg.flc.stash_control = true; 455 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0; 456 /* 00 00 00 - last 6 bit represent annotation, context stashing, 457 * data stashing setting 01 01 00 (0x14) 458 * (in following order ->DS AS CS) 459 * to enable 1 line data, 1 line annotation. 460 * For LX2, this setting should be 01 00 00 (0x10) 461 */ 462 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A) 463 cfg.flc.value |= 0x10; 464 else 465 cfg.flc.value |= 0x14; 466 } 467 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, 468 dpaa2_q->tc_index, flow_id, options, &cfg); 469 if (ret) { 470 PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret); 471 return -1; 472 } 473 474 if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { 475 struct dpni_taildrop taildrop; 476 477 taildrop.enable = 1; 478 /*enabling per rx queue congestion control */ 479 taildrop.threshold = CONG_THRESHOLD_RX_Q; 480 taildrop.units = DPNI_CONGESTION_UNIT_BYTES; 481 taildrop.oal = CONG_RX_OAL; 482 PMD_DRV_LOG(DEBUG, "Enabling Early Drop on queue = %d", 483 rx_queue_id); 484 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 485 DPNI_CP_QUEUE, DPNI_QUEUE_RX, 486 dpaa2_q->tc_index, flow_id, &taildrop); 487 if (ret) { 488 PMD_INIT_LOG(ERR, "Error in setting the rx flow" 489 " err : = %d\n", ret); 490 return -1; 491 } 492 } 493 494 dev->data->rx_queues[rx_queue_id] = dpaa2_q; 495 return 0; 496 } 497 498 static int 499 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, 500 uint16_t tx_queue_id, 501 uint16_t nb_tx_desc __rte_unused, 502 unsigned int socket_id __rte_unused, 503 const struct rte_eth_txconf *tx_conf __rte_unused) 504 { 505 struct dpaa2_dev_priv *priv = dev->data->dev_private; 506 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) 507 priv->tx_vq[tx_queue_id]; 508 struct fsl_mc_io *dpni = priv->hw; 509 struct dpni_queue tx_conf_cfg; 510 struct dpni_queue tx_flow_cfg; 511 uint8_t options = 0, flow_id; 512 uint32_t tc_id; 513 int ret; 514 515 PMD_INIT_FUNC_TRACE(); 516 517 /* Return if queue already configured */ 518 if (dpaa2_q->flow_id != 0xffff) { 519 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 520 return 0; 521 } 522 523 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); 524 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); 525 526 tc_id = tx_queue_id; 527 flow_id = 0; 528 529 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, 530 tc_id, flow_id, options, &tx_flow_cfg); 531 if (ret) { 532 PMD_INIT_LOG(ERR, "Error in setting the tx flow: " 533 "tc_id=%d, flow =%d ErrorCode = %x\n", 534 tc_id, flow_id, -ret); 535 return -1; 536 } 537 538 dpaa2_q->flow_id = flow_id; 539 540 if (tx_queue_id == 0) { 541 /*Set tx-conf and error configuration*/ 542 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 543 priv->token, 544 DPNI_CONF_DISABLE); 545 if (ret) { 546 PMD_INIT_LOG(ERR, "Error in set tx conf mode settings" 547 " ErrorCode = %x", ret); 548 return -1; 549 } 550 } 551 dpaa2_q->tc_index = tc_id; 552 553 if (!(priv->flags & DPAA2_TX_CGR_OFF)) { 554 struct dpni_congestion_notification_cfg cong_notif_cfg; 555 556 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; 557 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; 558 /* Notify that the queue is not congested when the data in 559 * the queue is below this thershold. 560 */ 561 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; 562 cong_notif_cfg.message_ctx = 0; 563 cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn; 564 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; 565 cong_notif_cfg.notification_mode = 566 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | 567 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | 568 DPNI_CONG_OPT_COHERENT_WRITE; 569 570 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, 571 priv->token, 572 DPNI_QUEUE_TX, 573 tc_id, 574 &cong_notif_cfg); 575 if (ret) { 576 PMD_INIT_LOG(ERR, 577 "Error in setting tx congestion notification: = %d", 578 -ret); 579 return -ret; 580 } 581 } 582 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 583 return 0; 584 } 585 586 static void 587 dpaa2_dev_rx_queue_release(void *q __rte_unused) 588 { 589 PMD_INIT_FUNC_TRACE(); 590 } 591 592 static void 593 dpaa2_dev_tx_queue_release(void *q __rte_unused) 594 { 595 PMD_INIT_FUNC_TRACE(); 596 } 597 598 static uint32_t 599 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 600 { 601 int32_t ret; 602 struct dpaa2_dev_priv *priv = dev->data->dev_private; 603 struct dpaa2_queue *dpaa2_q; 604 struct qbman_swp *swp; 605 struct qbman_fq_query_np_rslt state; 606 uint32_t frame_cnt = 0; 607 608 PMD_INIT_FUNC_TRACE(); 609 610 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 611 ret = dpaa2_affine_qbman_swp(); 612 if (ret) { 613 RTE_LOG(ERR, PMD, "Failure in affining portal\n"); 614 return -EINVAL; 615 } 616 } 617 swp = DPAA2_PER_LCORE_PORTAL; 618 619 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 620 621 if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) { 622 frame_cnt = qbman_fq_state_frame_count(&state); 623 RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n", 624 rx_queue_id, frame_cnt); 625 } 626 return frame_cnt; 627 } 628 629 static const uint32_t * 630 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) 631 { 632 static const uint32_t ptypes[] = { 633 /*todo -= add more types */ 634 RTE_PTYPE_L2_ETHER, 635 RTE_PTYPE_L3_IPV4, 636 RTE_PTYPE_L3_IPV4_EXT, 637 RTE_PTYPE_L3_IPV6, 638 RTE_PTYPE_L3_IPV6_EXT, 639 RTE_PTYPE_L4_TCP, 640 RTE_PTYPE_L4_UDP, 641 RTE_PTYPE_L4_SCTP, 642 RTE_PTYPE_L4_ICMP, 643 RTE_PTYPE_UNKNOWN 644 }; 645 646 if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx) 647 return ptypes; 648 return NULL; 649 } 650 651 /** 652 * Dpaa2 link Interrupt handler 653 * 654 * @param param 655 * The address of parameter (struct rte_eth_dev *) regsitered before. 656 * 657 * @return 658 * void 659 */ 660 static void 661 dpaa2_interrupt_handler(void *param) 662 { 663 struct rte_eth_dev *dev = param; 664 struct dpaa2_dev_priv *priv = dev->data->dev_private; 665 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 666 int ret; 667 int irq_index = DPNI_IRQ_INDEX; 668 unsigned int status = 0, clear = 0; 669 670 PMD_INIT_FUNC_TRACE(); 671 672 if (dpni == NULL) { 673 RTE_LOG(ERR, PMD, "dpni is NULL"); 674 return; 675 } 676 677 ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, 678 irq_index, &status); 679 if (unlikely(ret)) { 680 RTE_LOG(ERR, PMD, "Can't get irq status (err %d)", ret); 681 clear = 0xffffffff; 682 goto out; 683 } 684 685 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { 686 clear = DPNI_IRQ_EVENT_LINK_CHANGED; 687 dpaa2_dev_link_update(dev, 0); 688 /* calling all the apps registered for link status event */ 689 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 690 NULL); 691 } 692 out: 693 ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, 694 irq_index, clear); 695 if (unlikely(ret)) 696 RTE_LOG(ERR, PMD, "Can't clear irq status (err %d)", ret); 697 } 698 699 static int 700 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) 701 { 702 int err = 0; 703 struct dpaa2_dev_priv *priv = dev->data->dev_private; 704 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 705 int irq_index = DPNI_IRQ_INDEX; 706 unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; 707 708 PMD_INIT_FUNC_TRACE(); 709 710 err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, 711 irq_index, mask); 712 if (err < 0) { 713 PMD_INIT_LOG(ERR, "Error: dpni_set_irq_mask():%d (%s)", err, 714 strerror(-err)); 715 return err; 716 } 717 718 err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, 719 irq_index, enable); 720 if (err < 0) 721 PMD_INIT_LOG(ERR, "Error: dpni_set_irq_enable():%d (%s)", err, 722 strerror(-err)); 723 724 return err; 725 } 726 727 static int 728 dpaa2_dev_start(struct rte_eth_dev *dev) 729 { 730 struct rte_device *rdev = dev->device; 731 struct rte_dpaa2_device *dpaa2_dev; 732 struct rte_eth_dev_data *data = dev->data; 733 struct dpaa2_dev_priv *priv = data->dev_private; 734 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 735 struct dpni_queue cfg; 736 struct dpni_error_cfg err_cfg; 737 uint16_t qdid; 738 struct dpni_queue_id qid; 739 struct dpaa2_queue *dpaa2_q; 740 int ret, i; 741 struct rte_intr_handle *intr_handle; 742 743 dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); 744 intr_handle = &dpaa2_dev->intr_handle; 745 746 PMD_INIT_FUNC_TRACE(); 747 748 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 749 if (ret) { 750 PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n", 751 ret, priv->hw_id); 752 return ret; 753 } 754 755 /* Power up the phy. Needed to make the link go UP */ 756 dpaa2_dev_set_link_up(dev); 757 758 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, 759 DPNI_QUEUE_TX, &qdid); 760 if (ret) { 761 PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret); 762 return ret; 763 } 764 priv->qdid = qdid; 765 766 for (i = 0; i < data->nb_rx_queues; i++) { 767 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; 768 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 769 DPNI_QUEUE_RX, dpaa2_q->tc_index, 770 dpaa2_q->flow_id, &cfg, &qid); 771 if (ret) { 772 PMD_INIT_LOG(ERR, "Error to get flow " 773 "information Error code = %d\n", ret); 774 return ret; 775 } 776 dpaa2_q->fqid = qid.fqid; 777 } 778 779 /*checksum errors, send them to normal path and set it in annotation */ 780 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; 781 782 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; 783 err_cfg.set_frame_annotation = true; 784 785 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, 786 priv->token, &err_cfg); 787 if (ret) { 788 PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:" 789 "code = %d\n", ret); 790 return ret; 791 } 792 793 /* if the interrupts were configured on this devices*/ 794 if (intr_handle && (intr_handle->fd) && 795 (dev->data->dev_conf.intr_conf.lsc != 0)) { 796 /* Registering LSC interrupt handler */ 797 rte_intr_callback_register(intr_handle, 798 dpaa2_interrupt_handler, 799 (void *)dev); 800 801 /* enable vfio intr/eventfd mapping 802 * Interrupt index 0 is required, so we can not use 803 * rte_intr_enable. 804 */ 805 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX); 806 807 /* enable dpni_irqs */ 808 dpaa2_eth_setup_irqs(dev, 1); 809 } 810 811 return 0; 812 } 813 814 /** 815 * This routine disables all traffic on the adapter by issuing a 816 * global reset on the MAC. 817 */ 818 static void 819 dpaa2_dev_stop(struct rte_eth_dev *dev) 820 { 821 struct dpaa2_dev_priv *priv = dev->data->dev_private; 822 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 823 int ret; 824 struct rte_eth_link link; 825 struct rte_intr_handle *intr_handle = dev->intr_handle; 826 827 PMD_INIT_FUNC_TRACE(); 828 829 /* reset interrupt callback */ 830 if (intr_handle && (intr_handle->fd) && 831 (dev->data->dev_conf.intr_conf.lsc != 0)) { 832 /*disable dpni irqs */ 833 dpaa2_eth_setup_irqs(dev, 0); 834 835 /* disable vfio intr before callback unregister */ 836 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX); 837 838 /* Unregistering LSC interrupt handler */ 839 rte_intr_callback_unregister(intr_handle, 840 dpaa2_interrupt_handler, 841 (void *)dev); 842 } 843 844 dpaa2_dev_set_link_down(dev); 845 846 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); 847 if (ret) { 848 PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n", 849 ret, priv->hw_id); 850 return; 851 } 852 853 /* clear the recorded link status */ 854 memset(&link, 0, sizeof(link)); 855 dpaa2_dev_atomic_write_link_status(dev, &link); 856 } 857 858 static void 859 dpaa2_dev_close(struct rte_eth_dev *dev) 860 { 861 struct rte_eth_dev_data *data = dev->data; 862 struct dpaa2_dev_priv *priv = dev->data->dev_private; 863 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 864 int i, ret; 865 struct rte_eth_link link; 866 struct dpaa2_queue *dpaa2_q; 867 868 PMD_INIT_FUNC_TRACE(); 869 870 for (i = 0; i < data->nb_tx_queues; i++) { 871 dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i]; 872 if (!dpaa2_q->cscn) { 873 rte_free(dpaa2_q->cscn); 874 dpaa2_q->cscn = NULL; 875 } 876 } 877 878 /* Clean the device first */ 879 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); 880 if (ret) { 881 PMD_INIT_LOG(ERR, "Failure cleaning dpni device with" 882 " error code %d\n", ret); 883 return; 884 } 885 886 memset(&link, 0, sizeof(link)); 887 dpaa2_dev_atomic_write_link_status(dev, &link); 888 } 889 890 static void 891 dpaa2_dev_promiscuous_enable( 892 struct rte_eth_dev *dev) 893 { 894 int ret; 895 struct dpaa2_dev_priv *priv = dev->data->dev_private; 896 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 897 898 PMD_INIT_FUNC_TRACE(); 899 900 if (dpni == NULL) { 901 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 902 return; 903 } 904 905 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 906 if (ret < 0) 907 RTE_LOG(ERR, PMD, "Unable to enable U promisc mode %d\n", ret); 908 909 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 910 if (ret < 0) 911 RTE_LOG(ERR, PMD, "Unable to enable M promisc mode %d\n", ret); 912 } 913 914 static void 915 dpaa2_dev_promiscuous_disable( 916 struct rte_eth_dev *dev) 917 { 918 int ret; 919 struct dpaa2_dev_priv *priv = dev->data->dev_private; 920 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 921 922 PMD_INIT_FUNC_TRACE(); 923 924 if (dpni == NULL) { 925 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 926 return; 927 } 928 929 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 930 if (ret < 0) 931 RTE_LOG(ERR, PMD, "Unable to disable U promisc mode %d\n", ret); 932 933 if (dev->data->all_multicast == 0) { 934 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, 935 priv->token, false); 936 if (ret < 0) 937 RTE_LOG(ERR, PMD, 938 "Unable to disable M promisc mode %d\n", 939 ret); 940 } 941 } 942 943 static void 944 dpaa2_dev_allmulticast_enable( 945 struct rte_eth_dev *dev) 946 { 947 int ret; 948 struct dpaa2_dev_priv *priv = dev->data->dev_private; 949 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 950 951 PMD_INIT_FUNC_TRACE(); 952 953 if (dpni == NULL) { 954 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 955 return; 956 } 957 958 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 959 if (ret < 0) 960 RTE_LOG(ERR, PMD, "Unable to enable multicast mode %d\n", ret); 961 } 962 963 static void 964 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) 965 { 966 int ret; 967 struct dpaa2_dev_priv *priv = dev->data->dev_private; 968 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 969 970 PMD_INIT_FUNC_TRACE(); 971 972 if (dpni == NULL) { 973 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 974 return; 975 } 976 977 /* must remain on for all promiscuous */ 978 if (dev->data->promiscuous == 1) 979 return; 980 981 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 982 if (ret < 0) 983 RTE_LOG(ERR, PMD, "Unable to disable multicast mode %d\n", ret); 984 } 985 986 static int 987 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 988 { 989 int ret; 990 struct dpaa2_dev_priv *priv = dev->data->dev_private; 991 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 992 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN 993 + VLAN_TAG_SIZE; 994 995 PMD_INIT_FUNC_TRACE(); 996 997 if (dpni == NULL) { 998 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 999 return -EINVAL; 1000 } 1001 1002 /* check that mtu is within the allowed range */ 1003 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) 1004 return -EINVAL; 1005 1006 if (frame_size > ETHER_MAX_LEN) 1007 dev->data->dev_conf.rxmode.jumbo_frame = 1; 1008 else 1009 dev->data->dev_conf.rxmode.jumbo_frame = 0; 1010 1011 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1012 1013 /* Set the Max Rx frame length as 'mtu' + 1014 * Maximum Ethernet header length 1015 */ 1016 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 1017 frame_size); 1018 if (ret) { 1019 PMD_DRV_LOG(ERR, "setting the max frame length failed"); 1020 return -1; 1021 } 1022 PMD_DRV_LOG(INFO, "MTU is configured %d for the device", mtu); 1023 return 0; 1024 } 1025 1026 static int 1027 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, 1028 struct ether_addr *addr, 1029 __rte_unused uint32_t index, 1030 __rte_unused uint32_t pool) 1031 { 1032 int ret; 1033 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1034 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1035 1036 PMD_INIT_FUNC_TRACE(); 1037 1038 if (dpni == NULL) { 1039 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1040 return -1; 1041 } 1042 1043 ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, 1044 priv->token, addr->addr_bytes); 1045 if (ret) 1046 RTE_LOG(ERR, PMD, 1047 "error: Adding the MAC ADDR failed: err = %d\n", ret); 1048 return 0; 1049 } 1050 1051 static void 1052 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, 1053 uint32_t index) 1054 { 1055 int ret; 1056 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1057 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1058 struct rte_eth_dev_data *data = dev->data; 1059 struct ether_addr *macaddr; 1060 1061 PMD_INIT_FUNC_TRACE(); 1062 1063 macaddr = &data->mac_addrs[index]; 1064 1065 if (dpni == NULL) { 1066 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1067 return; 1068 } 1069 1070 ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, 1071 priv->token, macaddr->addr_bytes); 1072 if (ret) 1073 RTE_LOG(ERR, PMD, 1074 "error: Removing the MAC ADDR failed: err = %d\n", ret); 1075 } 1076 1077 static void 1078 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, 1079 struct ether_addr *addr) 1080 { 1081 int ret; 1082 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1083 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1084 1085 PMD_INIT_FUNC_TRACE(); 1086 1087 if (dpni == NULL) { 1088 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1089 return; 1090 } 1091 1092 ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, 1093 priv->token, addr->addr_bytes); 1094 1095 if (ret) 1096 RTE_LOG(ERR, PMD, 1097 "error: Setting the MAC ADDR failed %d\n", ret); 1098 } 1099 static 1100 int dpaa2_dev_stats_get(struct rte_eth_dev *dev, 1101 struct rte_eth_stats *stats) 1102 { 1103 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1104 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1105 int32_t retcode; 1106 uint8_t page0 = 0, page1 = 1, page2 = 2; 1107 union dpni_statistics value; 1108 1109 memset(&value, 0, sizeof(union dpni_statistics)); 1110 1111 PMD_INIT_FUNC_TRACE(); 1112 1113 if (!dpni) { 1114 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1115 return -EINVAL; 1116 } 1117 1118 if (!stats) { 1119 RTE_LOG(ERR, PMD, "stats is NULL\n"); 1120 return -EINVAL; 1121 } 1122 1123 /*Get Counters from page_0*/ 1124 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1125 page0, 0, &value); 1126 if (retcode) 1127 goto err; 1128 1129 stats->ipackets = value.page_0.ingress_all_frames; 1130 stats->ibytes = value.page_0.ingress_all_bytes; 1131 1132 /*Get Counters from page_1*/ 1133 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1134 page1, 0, &value); 1135 if (retcode) 1136 goto err; 1137 1138 stats->opackets = value.page_1.egress_all_frames; 1139 stats->obytes = value.page_1.egress_all_bytes; 1140 1141 /*Get Counters from page_2*/ 1142 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1143 page2, 0, &value); 1144 if (retcode) 1145 goto err; 1146 1147 /* Ingress drop frame count due to configured rules */ 1148 stats->ierrors = value.page_2.ingress_filtered_frames; 1149 /* Ingress drop frame count due to error */ 1150 stats->ierrors += value.page_2.ingress_discarded_frames; 1151 1152 stats->oerrors = value.page_2.egress_discarded_frames; 1153 stats->imissed = value.page_2.ingress_nobuffer_discards; 1154 1155 return 0; 1156 1157 err: 1158 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); 1159 return retcode; 1160 }; 1161 1162 static int 1163 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1164 unsigned int n) 1165 { 1166 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1167 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1168 int32_t retcode; 1169 union dpni_statistics value[3] = {}; 1170 unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); 1171 1172 if (xstats == NULL) 1173 return 0; 1174 1175 if (n < num) 1176 return num; 1177 1178 /* Get Counters from page_0*/ 1179 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1180 0, 0, &value[0]); 1181 if (retcode) 1182 goto err; 1183 1184 /* Get Counters from page_1*/ 1185 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1186 1, 0, &value[1]); 1187 if (retcode) 1188 goto err; 1189 1190 /* Get Counters from page_2*/ 1191 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1192 2, 0, &value[2]); 1193 if (retcode) 1194 goto err; 1195 1196 for (i = 0; i < num; i++) { 1197 xstats[i].id = i; 1198 xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. 1199 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1200 } 1201 return i; 1202 err: 1203 RTE_LOG(ERR, PMD, "Error in obtaining extended stats (%d)\n", retcode); 1204 return retcode; 1205 } 1206 1207 static int 1208 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1209 struct rte_eth_xstat_name *xstats_names, 1210 __rte_unused unsigned int limit) 1211 { 1212 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1213 1214 if (xstats_names != NULL) 1215 for (i = 0; i < stat_cnt; i++) 1216 snprintf(xstats_names[i].name, 1217 sizeof(xstats_names[i].name), 1218 "%s", 1219 dpaa2_xstats_strings[i].name); 1220 1221 return stat_cnt; 1222 } 1223 1224 static int 1225 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1226 uint64_t *values, unsigned int n) 1227 { 1228 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1229 uint64_t values_copy[stat_cnt]; 1230 1231 if (!ids) { 1232 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1233 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1234 int32_t retcode; 1235 union dpni_statistics value[3] = {}; 1236 1237 if (n < stat_cnt) 1238 return stat_cnt; 1239 1240 if (!values) 1241 return 0; 1242 1243 /* Get Counters from page_0*/ 1244 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1245 0, 0, &value[0]); 1246 if (retcode) 1247 return 0; 1248 1249 /* Get Counters from page_1*/ 1250 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1251 1, 0, &value[1]); 1252 if (retcode) 1253 return 0; 1254 1255 /* Get Counters from page_2*/ 1256 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1257 2, 0, &value[2]); 1258 if (retcode) 1259 return 0; 1260 1261 for (i = 0; i < stat_cnt; i++) { 1262 values[i] = value[dpaa2_xstats_strings[i].page_id]. 1263 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1264 } 1265 return stat_cnt; 1266 } 1267 1268 dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 1269 1270 for (i = 0; i < n; i++) { 1271 if (ids[i] >= stat_cnt) { 1272 PMD_INIT_LOG(ERR, "id value isn't valid"); 1273 return -1; 1274 } 1275 values[i] = values_copy[ids[i]]; 1276 } 1277 return n; 1278 } 1279 1280 static int 1281 dpaa2_xstats_get_names_by_id( 1282 struct rte_eth_dev *dev, 1283 struct rte_eth_xstat_name *xstats_names, 1284 const uint64_t *ids, 1285 unsigned int limit) 1286 { 1287 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1288 struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 1289 1290 if (!ids) 1291 return dpaa2_xstats_get_names(dev, xstats_names, limit); 1292 1293 dpaa2_xstats_get_names(dev, xstats_names_copy, limit); 1294 1295 for (i = 0; i < limit; i++) { 1296 if (ids[i] >= stat_cnt) { 1297 PMD_INIT_LOG(ERR, "id value isn't valid"); 1298 return -1; 1299 } 1300 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 1301 } 1302 return limit; 1303 } 1304 1305 static void 1306 dpaa2_dev_stats_reset(struct rte_eth_dev *dev) 1307 { 1308 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1309 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1310 int32_t retcode; 1311 1312 PMD_INIT_FUNC_TRACE(); 1313 1314 if (dpni == NULL) { 1315 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1316 return; 1317 } 1318 1319 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); 1320 if (retcode) 1321 goto error; 1322 1323 return; 1324 1325 error: 1326 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); 1327 return; 1328 }; 1329 1330 /* return 0 means link status changed, -1 means not changed */ 1331 static int 1332 dpaa2_dev_link_update(struct rte_eth_dev *dev, 1333 int wait_to_complete __rte_unused) 1334 { 1335 int ret; 1336 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1337 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1338 struct rte_eth_link link, old; 1339 struct dpni_link_state state = {0}; 1340 1341 if (dpni == NULL) { 1342 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1343 return 0; 1344 } 1345 memset(&old, 0, sizeof(old)); 1346 dpaa2_dev_atomic_read_link_status(dev, &old); 1347 1348 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1349 if (ret < 0) { 1350 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); 1351 return -1; 1352 } 1353 1354 if ((old.link_status == state.up) && (old.link_speed == state.rate)) { 1355 RTE_LOG(DEBUG, PMD, "No change in status\n"); 1356 return -1; 1357 } 1358 1359 memset(&link, 0, sizeof(struct rte_eth_link)); 1360 link.link_status = state.up; 1361 link.link_speed = state.rate; 1362 1363 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) 1364 link.link_duplex = ETH_LINK_HALF_DUPLEX; 1365 else 1366 link.link_duplex = ETH_LINK_FULL_DUPLEX; 1367 1368 dpaa2_dev_atomic_write_link_status(dev, &link); 1369 1370 if (link.link_status) 1371 PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id); 1372 else 1373 PMD_DRV_LOG(INFO, "Port %d Link is Down", dev->data->port_id); 1374 return 0; 1375 } 1376 1377 /** 1378 * Toggle the DPNI to enable, if not already enabled. 1379 * This is not strictly PHY up/down - it is more of logical toggling. 1380 */ 1381 static int 1382 dpaa2_dev_set_link_up(struct rte_eth_dev *dev) 1383 { 1384 int ret = -EINVAL; 1385 struct dpaa2_dev_priv *priv; 1386 struct fsl_mc_io *dpni; 1387 int en = 0; 1388 struct dpni_link_state state = {0}; 1389 1390 priv = dev->data->dev_private; 1391 dpni = (struct fsl_mc_io *)priv->hw; 1392 1393 if (dpni == NULL) { 1394 RTE_LOG(ERR, PMD, "DPNI is NULL\n"); 1395 return ret; 1396 } 1397 1398 /* Check if DPNI is currently enabled */ 1399 ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); 1400 if (ret) { 1401 /* Unable to obtain dpni status; Not continuing */ 1402 PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret); 1403 return -EINVAL; 1404 } 1405 1406 /* Enable link if not already enabled */ 1407 if (!en) { 1408 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 1409 if (ret) { 1410 PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret); 1411 return -EINVAL; 1412 } 1413 } 1414 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1415 if (ret < 0) { 1416 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); 1417 return -1; 1418 } 1419 1420 /* changing tx burst function to start enqueues */ 1421 dev->tx_pkt_burst = dpaa2_dev_tx; 1422 dev->data->dev_link.link_status = state.up; 1423 1424 if (state.up) 1425 PMD_DRV_LOG(INFO, "Port %d Link is set as UP", 1426 dev->data->port_id); 1427 else 1428 PMD_DRV_LOG(INFO, "Port %d Link is DOWN", dev->data->port_id); 1429 return ret; 1430 } 1431 1432 /** 1433 * Toggle the DPNI to disable, if not already disabled. 1434 * This is not strictly PHY up/down - it is more of logical toggling. 1435 */ 1436 static int 1437 dpaa2_dev_set_link_down(struct rte_eth_dev *dev) 1438 { 1439 int ret = -EINVAL; 1440 struct dpaa2_dev_priv *priv; 1441 struct fsl_mc_io *dpni; 1442 int dpni_enabled = 0; 1443 int retries = 10; 1444 1445 PMD_INIT_FUNC_TRACE(); 1446 1447 priv = dev->data->dev_private; 1448 dpni = (struct fsl_mc_io *)priv->hw; 1449 1450 if (dpni == NULL) { 1451 RTE_LOG(ERR, PMD, "Device has not yet been configured\n"); 1452 return ret; 1453 } 1454 1455 /*changing tx burst function to avoid any more enqueues */ 1456 dev->tx_pkt_burst = dummy_dev_tx; 1457 1458 /* Loop while dpni_disable() attempts to drain the egress FQs 1459 * and confirm them back to us. 1460 */ 1461 do { 1462 ret = dpni_disable(dpni, 0, priv->token); 1463 if (ret) { 1464 PMD_DRV_LOG(ERR, "dpni disable failed (%d)", ret); 1465 return ret; 1466 } 1467 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); 1468 if (ret) { 1469 PMD_DRV_LOG(ERR, "dpni_is_enabled failed (%d)", ret); 1470 return ret; 1471 } 1472 if (dpni_enabled) 1473 /* Allow the MC some slack */ 1474 rte_delay_us(100 * 1000); 1475 } while (dpni_enabled && --retries); 1476 1477 if (!retries) { 1478 PMD_DRV_LOG(WARNING, "Retry count exceeded disabling DPNI\n"); 1479 /* todo- we may have to manually cleanup queues. 1480 */ 1481 } else { 1482 PMD_DRV_LOG(INFO, "Port %d Link DOWN successful", 1483 dev->data->port_id); 1484 } 1485 1486 dev->data->dev_link.link_status = 0; 1487 1488 return ret; 1489 } 1490 1491 static int 1492 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1493 { 1494 int ret = -EINVAL; 1495 struct dpaa2_dev_priv *priv; 1496 struct fsl_mc_io *dpni; 1497 struct dpni_link_state state = {0}; 1498 1499 PMD_INIT_FUNC_TRACE(); 1500 1501 priv = dev->data->dev_private; 1502 dpni = (struct fsl_mc_io *)priv->hw; 1503 1504 if (dpni == NULL || fc_conf == NULL) { 1505 RTE_LOG(ERR, PMD, "device not configured\n"); 1506 return ret; 1507 } 1508 1509 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1510 if (ret) { 1511 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); 1512 return ret; 1513 } 1514 1515 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); 1516 if (state.options & DPNI_LINK_OPT_PAUSE) { 1517 /* DPNI_LINK_OPT_PAUSE set 1518 * if ASYM_PAUSE not set, 1519 * RX Side flow control (handle received Pause frame) 1520 * TX side flow control (send Pause frame) 1521 * if ASYM_PAUSE set, 1522 * RX Side flow control (handle received Pause frame) 1523 * No TX side flow control (send Pause frame disabled) 1524 */ 1525 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) 1526 fc_conf->mode = RTE_FC_FULL; 1527 else 1528 fc_conf->mode = RTE_FC_RX_PAUSE; 1529 } else { 1530 /* DPNI_LINK_OPT_PAUSE not set 1531 * if ASYM_PAUSE set, 1532 * TX side flow control (send Pause frame) 1533 * No RX side flow control (No action on pause frame rx) 1534 * if ASYM_PAUSE not set, 1535 * Flow control disabled 1536 */ 1537 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) 1538 fc_conf->mode = RTE_FC_TX_PAUSE; 1539 else 1540 fc_conf->mode = RTE_FC_NONE; 1541 } 1542 1543 return ret; 1544 } 1545 1546 static int 1547 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1548 { 1549 int ret = -EINVAL; 1550 struct dpaa2_dev_priv *priv; 1551 struct fsl_mc_io *dpni; 1552 struct dpni_link_state state = {0}; 1553 struct dpni_link_cfg cfg = {0}; 1554 1555 PMD_INIT_FUNC_TRACE(); 1556 1557 priv = dev->data->dev_private; 1558 dpni = (struct fsl_mc_io *)priv->hw; 1559 1560 if (dpni == NULL) { 1561 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1562 return ret; 1563 } 1564 1565 /* It is necessary to obtain the current state before setting fc_conf 1566 * as MC would return error in case rate, autoneg or duplex values are 1567 * different. 1568 */ 1569 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1570 if (ret) { 1571 RTE_LOG(ERR, PMD, "Unable to get link state (err=%d)\n", ret); 1572 return -1; 1573 } 1574 1575 /* Disable link before setting configuration */ 1576 dpaa2_dev_set_link_down(dev); 1577 1578 /* Based on fc_conf, update cfg */ 1579 cfg.rate = state.rate; 1580 cfg.options = state.options; 1581 1582 /* update cfg with fc_conf */ 1583 switch (fc_conf->mode) { 1584 case RTE_FC_FULL: 1585 /* Full flow control; 1586 * OPT_PAUSE set, ASYM_PAUSE not set 1587 */ 1588 cfg.options |= DPNI_LINK_OPT_PAUSE; 1589 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1590 break; 1591 case RTE_FC_TX_PAUSE: 1592 /* Enable RX flow control 1593 * OPT_PAUSE not set; 1594 * ASYM_PAUSE set; 1595 */ 1596 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1597 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1598 break; 1599 case RTE_FC_RX_PAUSE: 1600 /* Enable TX Flow control 1601 * OPT_PAUSE set 1602 * ASYM_PAUSE set 1603 */ 1604 cfg.options |= DPNI_LINK_OPT_PAUSE; 1605 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1606 break; 1607 case RTE_FC_NONE: 1608 /* Disable Flow control 1609 * OPT_PAUSE not set 1610 * ASYM_PAUSE not set 1611 */ 1612 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1613 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1614 break; 1615 default: 1616 RTE_LOG(ERR, PMD, "Incorrect Flow control flag (%d)\n", 1617 fc_conf->mode); 1618 return -1; 1619 } 1620 1621 ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); 1622 if (ret) 1623 RTE_LOG(ERR, PMD, 1624 "Unable to set Link configuration (err=%d)\n", 1625 ret); 1626 1627 /* Enable link */ 1628 dpaa2_dev_set_link_up(dev); 1629 1630 return ret; 1631 } 1632 1633 static int 1634 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, 1635 struct rte_eth_rss_conf *rss_conf) 1636 { 1637 struct rte_eth_dev_data *data = dev->data; 1638 struct rte_eth_conf *eth_conf = &data->dev_conf; 1639 int ret; 1640 1641 PMD_INIT_FUNC_TRACE(); 1642 1643 if (rss_conf->rss_hf) { 1644 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf); 1645 if (ret) { 1646 PMD_INIT_LOG(ERR, "unable to set flow dist"); 1647 return ret; 1648 } 1649 } else { 1650 ret = dpaa2_remove_flow_dist(dev, 0); 1651 if (ret) { 1652 PMD_INIT_LOG(ERR, "unable to remove flow dist"); 1653 return ret; 1654 } 1655 } 1656 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; 1657 return 0; 1658 } 1659 1660 static int 1661 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1662 struct rte_eth_rss_conf *rss_conf) 1663 { 1664 struct rte_eth_dev_data *data = dev->data; 1665 struct rte_eth_conf *eth_conf = &data->dev_conf; 1666 1667 /* dpaa2 does not support rss_key, so length should be 0*/ 1668 rss_conf->rss_key_len = 0; 1669 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; 1670 return 0; 1671 } 1672 1673 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, 1674 int eth_rx_queue_id, 1675 uint16_t dpcon_id, 1676 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 1677 { 1678 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1679 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1680 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1681 uint8_t flow_id = dpaa2_ethq->flow_id; 1682 struct dpni_queue cfg; 1683 uint8_t options; 1684 int ret; 1685 1686 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 1687 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; 1688 else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) 1689 dpaa2_ethq->cb = dpaa2_dev_process_atomic_event; 1690 else 1691 return -EINVAL; 1692 1693 memset(&cfg, 0, sizeof(struct dpni_queue)); 1694 options = DPNI_QUEUE_OPT_DEST; 1695 cfg.destination.type = DPNI_DEST_DPCON; 1696 cfg.destination.id = dpcon_id; 1697 cfg.destination.priority = queue_conf->ev.priority; 1698 1699 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { 1700 options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; 1701 cfg.destination.hold_active = 1; 1702 } 1703 1704 options |= DPNI_QUEUE_OPT_USER_CTX; 1705 cfg.user_context = (uint64_t)(dpaa2_ethq); 1706 1707 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1708 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1709 if (ret) { 1710 RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret); 1711 return ret; 1712 } 1713 1714 memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event)); 1715 1716 return 0; 1717 } 1718 1719 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, 1720 int eth_rx_queue_id) 1721 { 1722 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1723 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1724 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1725 uint8_t flow_id = dpaa2_ethq->flow_id; 1726 struct dpni_queue cfg; 1727 uint8_t options; 1728 int ret; 1729 1730 memset(&cfg, 0, sizeof(struct dpni_queue)); 1731 options = DPNI_QUEUE_OPT_DEST; 1732 cfg.destination.type = DPNI_DEST_NONE; 1733 1734 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1735 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1736 if (ret) 1737 RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret); 1738 1739 return ret; 1740 } 1741 1742 static struct eth_dev_ops dpaa2_ethdev_ops = { 1743 .dev_configure = dpaa2_eth_dev_configure, 1744 .dev_start = dpaa2_dev_start, 1745 .dev_stop = dpaa2_dev_stop, 1746 .dev_close = dpaa2_dev_close, 1747 .promiscuous_enable = dpaa2_dev_promiscuous_enable, 1748 .promiscuous_disable = dpaa2_dev_promiscuous_disable, 1749 .allmulticast_enable = dpaa2_dev_allmulticast_enable, 1750 .allmulticast_disable = dpaa2_dev_allmulticast_disable, 1751 .dev_set_link_up = dpaa2_dev_set_link_up, 1752 .dev_set_link_down = dpaa2_dev_set_link_down, 1753 .link_update = dpaa2_dev_link_update, 1754 .stats_get = dpaa2_dev_stats_get, 1755 .xstats_get = dpaa2_dev_xstats_get, 1756 .xstats_get_by_id = dpaa2_xstats_get_by_id, 1757 .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id, 1758 .xstats_get_names = dpaa2_xstats_get_names, 1759 .stats_reset = dpaa2_dev_stats_reset, 1760 .xstats_reset = dpaa2_dev_stats_reset, 1761 .fw_version_get = dpaa2_fw_version_get, 1762 .dev_infos_get = dpaa2_dev_info_get, 1763 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, 1764 .mtu_set = dpaa2_dev_mtu_set, 1765 .vlan_filter_set = dpaa2_vlan_filter_set, 1766 .vlan_offload_set = dpaa2_vlan_offload_set, 1767 .rx_queue_setup = dpaa2_dev_rx_queue_setup, 1768 .rx_queue_release = dpaa2_dev_rx_queue_release, 1769 .tx_queue_setup = dpaa2_dev_tx_queue_setup, 1770 .tx_queue_release = dpaa2_dev_tx_queue_release, 1771 .rx_queue_count = dpaa2_dev_rx_queue_count, 1772 .flow_ctrl_get = dpaa2_flow_ctrl_get, 1773 .flow_ctrl_set = dpaa2_flow_ctrl_set, 1774 .mac_addr_add = dpaa2_dev_add_mac_addr, 1775 .mac_addr_remove = dpaa2_dev_remove_mac_addr, 1776 .mac_addr_set = dpaa2_dev_set_mac_addr, 1777 .rss_hash_update = dpaa2_dev_rss_hash_update, 1778 .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, 1779 }; 1780 1781 static int 1782 dpaa2_dev_init(struct rte_eth_dev *eth_dev) 1783 { 1784 struct rte_device *dev = eth_dev->device; 1785 struct rte_dpaa2_device *dpaa2_dev; 1786 struct fsl_mc_io *dpni_dev; 1787 struct dpni_attr attr; 1788 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1789 struct dpni_buffer_layout layout; 1790 int ret, hw_id; 1791 1792 PMD_INIT_FUNC_TRACE(); 1793 1794 /* For secondary processes, the primary has done all the work */ 1795 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1796 return 0; 1797 1798 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 1799 1800 hw_id = dpaa2_dev->object_id; 1801 1802 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); 1803 if (!dpni_dev) { 1804 PMD_INIT_LOG(ERR, "malloc failed for dpni device\n"); 1805 return -1; 1806 } 1807 1808 dpni_dev->regs = rte_mcp_ptr_list[0]; 1809 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); 1810 if (ret) { 1811 PMD_INIT_LOG(ERR, 1812 "Failure in opening dpni@%d with err code %d\n", 1813 hw_id, ret); 1814 rte_free(dpni_dev); 1815 return -1; 1816 } 1817 1818 /* Clean the device first */ 1819 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); 1820 if (ret) { 1821 PMD_INIT_LOG(ERR, 1822 "Failure cleaning dpni@%d with err code %d\n", 1823 hw_id, ret); 1824 goto init_err; 1825 } 1826 1827 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); 1828 if (ret) { 1829 PMD_INIT_LOG(ERR, 1830 "Failure in get dpni@%d attribute, err code %d\n", 1831 hw_id, ret); 1832 goto init_err; 1833 } 1834 1835 priv->num_rx_tc = attr.num_rx_tcs; 1836 1837 /* Resetting the "num_rx_queues" to equal number of queues in first TC 1838 * as only one TC is supported on Rx Side. Once Multiple TCs will be 1839 * in use for Rx processing then this will be changed or removed. 1840 */ 1841 priv->nb_rx_queues = attr.num_queues; 1842 1843 /* Using number of TX queues as number of TX TCs */ 1844 priv->nb_tx_queues = attr.num_tx_tcs; 1845 1846 PMD_DRV_LOG(DEBUG, "RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", 1847 priv->num_rx_tc, priv->nb_rx_queues, priv->nb_tx_queues); 1848 1849 priv->hw = dpni_dev; 1850 priv->hw_id = hw_id; 1851 priv->options = attr.options; 1852 priv->max_mac_filters = attr.mac_filter_entries; 1853 priv->max_vlan_filters = attr.vlan_filter_entries; 1854 priv->flags = 0; 1855 1856 /* Allocate memory for hardware structure for queues */ 1857 ret = dpaa2_alloc_rx_tx_queues(eth_dev); 1858 if (ret) { 1859 PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n"); 1860 goto init_err; 1861 } 1862 1863 /* Allocate memory for storing MAC addresses */ 1864 eth_dev->data->mac_addrs = rte_zmalloc("dpni", 1865 ETHER_ADDR_LEN * attr.mac_filter_entries, 0); 1866 if (eth_dev->data->mac_addrs == NULL) { 1867 PMD_INIT_LOG(ERR, 1868 "Failed to allocate %d bytes needed to store MAC addresses", 1869 ETHER_ADDR_LEN * attr.mac_filter_entries); 1870 ret = -ENOMEM; 1871 goto init_err; 1872 } 1873 1874 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 1875 priv->token, 1876 (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes)); 1877 if (ret) { 1878 PMD_INIT_LOG(ERR, "DPNI get mac address failed:Err Code = %d\n", 1879 ret); 1880 goto init_err; 1881 } 1882 1883 /* ... tx buffer layout ... */ 1884 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1885 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1886 layout.pass_frame_status = 1; 1887 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1888 DPNI_QUEUE_TX, &layout); 1889 if (ret) { 1890 PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer layout", 1891 ret); 1892 goto init_err; 1893 } 1894 1895 /* ... tx-conf and error buffer layout ... */ 1896 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1897 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1898 layout.pass_frame_status = 1; 1899 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1900 DPNI_QUEUE_TX_CONFIRM, &layout); 1901 if (ret) { 1902 PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer layout", 1903 ret); 1904 goto init_err; 1905 } 1906 1907 eth_dev->dev_ops = &dpaa2_ethdev_ops; 1908 1909 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 1910 eth_dev->tx_pkt_burst = dpaa2_dev_tx; 1911 rte_fslmc_vfio_dmamap(); 1912 1913 RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); 1914 return 0; 1915 init_err: 1916 dpaa2_dev_uninit(eth_dev); 1917 return ret; 1918 } 1919 1920 static int 1921 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) 1922 { 1923 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1924 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1925 int i, ret; 1926 struct dpaa2_queue *dpaa2_q; 1927 1928 PMD_INIT_FUNC_TRACE(); 1929 1930 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1931 return 0; 1932 1933 if (!dpni) { 1934 PMD_INIT_LOG(WARNING, "Already closed or not started"); 1935 return -1; 1936 } 1937 1938 dpaa2_dev_close(eth_dev); 1939 1940 if (priv->rx_vq[0]) { 1941 /* cleaning up queue storage */ 1942 for (i = 0; i < priv->nb_rx_queues; i++) { 1943 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 1944 if (dpaa2_q->q_storage) 1945 rte_free(dpaa2_q->q_storage); 1946 } 1947 /*free the all queue memory */ 1948 rte_free(priv->rx_vq[0]); 1949 priv->rx_vq[0] = NULL; 1950 } 1951 1952 /* free memory for storing MAC addresses */ 1953 if (eth_dev->data->mac_addrs) { 1954 rte_free(eth_dev->data->mac_addrs); 1955 eth_dev->data->mac_addrs = NULL; 1956 } 1957 1958 /* Close the device at underlying layer*/ 1959 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); 1960 if (ret) { 1961 PMD_INIT_LOG(ERR, 1962 "Failure closing dpni device with err code %d\n", 1963 ret); 1964 } 1965 1966 /* Free the allocated memory for ethernet private data and dpni*/ 1967 priv->hw = NULL; 1968 rte_free(dpni); 1969 1970 eth_dev->dev_ops = NULL; 1971 eth_dev->rx_pkt_burst = NULL; 1972 eth_dev->tx_pkt_burst = NULL; 1973 1974 RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); 1975 return 0; 1976 } 1977 1978 static int 1979 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, 1980 struct rte_dpaa2_device *dpaa2_dev) 1981 { 1982 struct rte_eth_dev *eth_dev; 1983 int diag; 1984 1985 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1986 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); 1987 if (!eth_dev) 1988 return -ENODEV; 1989 eth_dev->data->dev_private = rte_zmalloc( 1990 "ethdev private structure", 1991 sizeof(struct dpaa2_dev_priv), 1992 RTE_CACHE_LINE_SIZE); 1993 if (eth_dev->data->dev_private == NULL) { 1994 PMD_INIT_LOG(CRIT, "Cannot allocate memzone for" 1995 " private port data\n"); 1996 rte_eth_dev_release_port(eth_dev); 1997 return -ENOMEM; 1998 } 1999 } else { 2000 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); 2001 if (!eth_dev) 2002 return -ENODEV; 2003 } 2004 2005 eth_dev->device = &dpaa2_dev->device; 2006 eth_dev->device->driver = &dpaa2_drv->driver; 2007 2008 dpaa2_dev->eth_dev = eth_dev; 2009 eth_dev->data->rx_mbuf_alloc_failed = 0; 2010 2011 if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC) 2012 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 2013 2014 /* Invoke PMD device initialization function */ 2015 diag = dpaa2_dev_init(eth_dev); 2016 if (diag == 0) 2017 return 0; 2018 2019 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 2020 rte_free(eth_dev->data->dev_private); 2021 rte_eth_dev_release_port(eth_dev); 2022 return diag; 2023 } 2024 2025 static int 2026 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) 2027 { 2028 struct rte_eth_dev *eth_dev; 2029 2030 eth_dev = dpaa2_dev->eth_dev; 2031 dpaa2_dev_uninit(eth_dev); 2032 2033 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 2034 rte_free(eth_dev->data->dev_private); 2035 rte_eth_dev_release_port(eth_dev); 2036 2037 return 0; 2038 } 2039 2040 static struct rte_dpaa2_driver rte_dpaa2_pmd = { 2041 .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA, 2042 .drv_type = DPAA2_ETH, 2043 .probe = rte_dpaa2_probe, 2044 .remove = rte_dpaa2_remove, 2045 }; 2046 2047 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); 2048