1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 5 * Copyright 2016 NXP. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <time.h> 35 #include <net/if.h> 36 37 #include <rte_mbuf.h> 38 #include <rte_ethdev.h> 39 #include <rte_malloc.h> 40 #include <rte_memcpy.h> 41 #include <rte_string_fns.h> 42 #include <rte_cycles.h> 43 #include <rte_kvargs.h> 44 #include <rte_dev.h> 45 #include <rte_fslmc.h> 46 47 #include <fslmc_logs.h> 48 #include <fslmc_vfio.h> 49 #include <dpaa2_hw_pvt.h> 50 #include <dpaa2_hw_mempool.h> 51 #include <dpaa2_hw_dpio.h> 52 #include <mc/fsl_dpmng.h> 53 #include "dpaa2_ethdev.h" 54 55 struct rte_dpaa2_xstats_name_off { 56 char name[RTE_ETH_XSTATS_NAME_SIZE]; 57 uint8_t page_id; /* dpni statistics page id */ 58 uint8_t stats_id; /* stats id in the given page */ 59 }; 60 61 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { 62 {"ingress_multicast_frames", 0, 2}, 63 {"ingress_multicast_bytes", 0, 3}, 64 {"ingress_broadcast_frames", 0, 4}, 65 {"ingress_broadcast_bytes", 0, 5}, 66 {"egress_multicast_frames", 1, 2}, 67 {"egress_multicast_bytes", 1, 3}, 68 {"egress_broadcast_frames", 1, 4}, 69 {"egress_broadcast_bytes", 1, 5}, 70 {"ingress_filtered_frames", 2, 0}, 71 {"ingress_discarded_frames", 2, 1}, 72 {"ingress_nobuffer_discards", 2, 2}, 73 {"egress_discarded_frames", 2, 3}, 74 {"egress_confirmed_frames", 2, 4}, 75 }; 76 77 static struct rte_dpaa2_driver rte_dpaa2_pmd; 78 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev); 79 static int dpaa2_dev_link_update(struct rte_eth_dev *dev, 80 int wait_to_complete); 81 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); 82 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); 83 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 84 85 /** 86 * Atomically reads the link status information from global 87 * structure rte_eth_dev. 88 * 89 * @param dev 90 * - Pointer to the structure rte_eth_dev to read from. 91 * - Pointer to the buffer to be saved with the link status. 92 * 93 * @return 94 * - On success, zero. 95 * - On failure, negative value. 96 */ 97 static inline int 98 dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev, 99 struct rte_eth_link *link) 100 { 101 struct rte_eth_link *dst = link; 102 struct rte_eth_link *src = &dev->data->dev_link; 103 104 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 105 *(uint64_t *)src) == 0) 106 return -1; 107 108 return 0; 109 } 110 111 /** 112 * Atomically writes the link status information into global 113 * structure rte_eth_dev. 114 * 115 * @param dev 116 * - Pointer to the structure rte_eth_dev to read from. 117 * - Pointer to the buffer to be saved with the link status. 118 * 119 * @return 120 * - On success, zero. 121 * - On failure, negative value. 122 */ 123 static inline int 124 dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev, 125 struct rte_eth_link *link) 126 { 127 struct rte_eth_link *dst = &dev->data->dev_link; 128 struct rte_eth_link *src = link; 129 130 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 131 *(uint64_t *)src) == 0) 132 return -1; 133 134 return 0; 135 } 136 137 static int 138 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 139 { 140 int ret; 141 struct dpaa2_dev_priv *priv = dev->data->dev_private; 142 struct fsl_mc_io *dpni = priv->hw; 143 144 PMD_INIT_FUNC_TRACE(); 145 146 if (dpni == NULL) { 147 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 148 return -1; 149 } 150 151 if (on) 152 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, 153 priv->token, vlan_id); 154 else 155 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, 156 priv->token, vlan_id); 157 158 if (ret < 0) 159 PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d", 160 ret, vlan_id, priv->hw_id); 161 162 return ret; 163 } 164 165 static int 166 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) 167 { 168 struct dpaa2_dev_priv *priv = dev->data->dev_private; 169 struct fsl_mc_io *dpni = priv->hw; 170 int ret; 171 172 PMD_INIT_FUNC_TRACE(); 173 174 if (mask & ETH_VLAN_FILTER_MASK) { 175 if (dev->data->dev_conf.rxmode.hw_vlan_filter) 176 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 177 priv->token, true); 178 else 179 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 180 priv->token, false); 181 if (ret < 0) 182 RTE_LOG(ERR, PMD, "Unable to set vlan filter = %d\n", 183 ret); 184 } 185 186 if (mask & ETH_VLAN_EXTEND_MASK) { 187 if (dev->data->dev_conf.rxmode.hw_vlan_extend) 188 RTE_LOG(INFO, PMD, 189 "VLAN extend offload not supported\n"); 190 } 191 192 return 0; 193 } 194 195 static int 196 dpaa2_fw_version_get(struct rte_eth_dev *dev, 197 char *fw_version, 198 size_t fw_size) 199 { 200 int ret; 201 struct dpaa2_dev_priv *priv = dev->data->dev_private; 202 struct fsl_mc_io *dpni = priv->hw; 203 struct mc_soc_version mc_plat_info = {0}; 204 struct mc_version mc_ver_info = {0}; 205 206 PMD_INIT_FUNC_TRACE(); 207 208 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 209 RTE_LOG(WARNING, PMD, "\tmc_get_soc_version failed\n"); 210 211 if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) 212 RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n"); 213 214 ret = snprintf(fw_version, fw_size, 215 "%x-%d.%d.%d", 216 mc_plat_info.svr, 217 mc_ver_info.major, 218 mc_ver_info.minor, 219 mc_ver_info.revision); 220 221 ret += 1; /* add the size of '\0' */ 222 if (fw_size < (uint32_t)ret) 223 return ret; 224 else 225 return 0; 226 } 227 228 static void 229 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 230 { 231 struct dpaa2_dev_priv *priv = dev->data->dev_private; 232 233 PMD_INIT_FUNC_TRACE(); 234 235 dev_info->if_index = priv->hw_id; 236 237 dev_info->max_mac_addrs = priv->max_mac_filters; 238 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; 239 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; 240 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; 241 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; 242 dev_info->rx_offload_capa = 243 DEV_RX_OFFLOAD_IPV4_CKSUM | 244 DEV_RX_OFFLOAD_UDP_CKSUM | 245 DEV_RX_OFFLOAD_TCP_CKSUM | 246 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; 247 dev_info->tx_offload_capa = 248 DEV_TX_OFFLOAD_IPV4_CKSUM | 249 DEV_TX_OFFLOAD_UDP_CKSUM | 250 DEV_TX_OFFLOAD_TCP_CKSUM | 251 DEV_TX_OFFLOAD_SCTP_CKSUM | 252 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 253 dev_info->speed_capa = ETH_LINK_SPEED_1G | 254 ETH_LINK_SPEED_2_5G | 255 ETH_LINK_SPEED_10G; 256 } 257 258 static int 259 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) 260 { 261 struct dpaa2_dev_priv *priv = dev->data->dev_private; 262 uint16_t dist_idx; 263 uint32_t vq_id; 264 struct dpaa2_queue *mc_q, *mcq; 265 uint32_t tot_queues; 266 int i; 267 struct dpaa2_queue *dpaa2_q; 268 269 PMD_INIT_FUNC_TRACE(); 270 271 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; 272 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, 273 RTE_CACHE_LINE_SIZE); 274 if (!mc_q) { 275 PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n"); 276 return -1; 277 } 278 279 for (i = 0; i < priv->nb_rx_queues; i++) { 280 mc_q->dev = dev; 281 priv->rx_vq[i] = mc_q++; 282 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 283 dpaa2_q->q_storage = rte_malloc("dq_storage", 284 sizeof(struct queue_storage_info_t), 285 RTE_CACHE_LINE_SIZE); 286 if (!dpaa2_q->q_storage) 287 goto fail; 288 289 memset(dpaa2_q->q_storage, 0, 290 sizeof(struct queue_storage_info_t)); 291 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) 292 goto fail; 293 } 294 295 for (i = 0; i < priv->nb_tx_queues; i++) { 296 mc_q->dev = dev; 297 mc_q->flow_id = 0xffff; 298 priv->tx_vq[i] = mc_q++; 299 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 300 dpaa2_q->cscn = rte_malloc(NULL, 301 sizeof(struct qbman_result), 16); 302 if (!dpaa2_q->cscn) 303 goto fail_tx; 304 } 305 306 vq_id = 0; 307 for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { 308 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; 309 mcq->tc_index = DPAA2_DEF_TC; 310 mcq->flow_id = dist_idx; 311 vq_id++; 312 } 313 314 return 0; 315 fail_tx: 316 i -= 1; 317 while (i >= 0) { 318 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 319 rte_free(dpaa2_q->cscn); 320 priv->tx_vq[i--] = NULL; 321 } 322 i = priv->nb_rx_queues; 323 fail: 324 i -= 1; 325 mc_q = priv->rx_vq[0]; 326 while (i >= 0) { 327 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 328 dpaa2_free_dq_storage(dpaa2_q->q_storage); 329 rte_free(dpaa2_q->q_storage); 330 priv->rx_vq[i--] = NULL; 331 } 332 rte_free(mc_q); 333 return -1; 334 } 335 336 static int 337 dpaa2_eth_dev_configure(struct rte_eth_dev *dev) 338 { 339 struct dpaa2_dev_priv *priv = dev->data->dev_private; 340 struct fsl_mc_io *dpni = priv->hw; 341 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 342 int rx_ip_csum_offload = false; 343 int ret; 344 345 PMD_INIT_FUNC_TRACE(); 346 347 if (eth_conf->rxmode.jumbo_frame == 1) { 348 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { 349 ret = dpaa2_dev_mtu_set(dev, 350 eth_conf->rxmode.max_rx_pkt_len); 351 if (ret) { 352 PMD_INIT_LOG(ERR, 353 "unable to set mtu. check config\n"); 354 return ret; 355 } 356 } else { 357 return -1; 358 } 359 } 360 361 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { 362 ret = dpaa2_setup_flow_dist(dev, 363 eth_conf->rx_adv_conf.rss_conf.rss_hf); 364 if (ret) { 365 PMD_INIT_LOG(ERR, "unable to set flow distribution." 366 "please check queue config\n"); 367 return ret; 368 } 369 } 370 371 if (eth_conf->rxmode.hw_ip_checksum) 372 rx_ip_csum_offload = true; 373 374 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 375 DPNI_OFF_RX_L3_CSUM, rx_ip_csum_offload); 376 if (ret) { 377 PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret); 378 return ret; 379 } 380 381 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 382 DPNI_OFF_RX_L4_CSUM, rx_ip_csum_offload); 383 if (ret) { 384 PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret); 385 return ret; 386 } 387 388 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 389 DPNI_OFF_TX_L3_CSUM, true); 390 if (ret) { 391 PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret); 392 return ret; 393 } 394 395 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 396 DPNI_OFF_TX_L4_CSUM, true); 397 if (ret) { 398 PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret); 399 return ret; 400 } 401 402 /* update the current status */ 403 dpaa2_dev_link_update(dev, 0); 404 405 return 0; 406 } 407 408 /* Function to setup RX flow information. It contains traffic class ID, 409 * flow ID, destination configuration etc. 410 */ 411 static int 412 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, 413 uint16_t rx_queue_id, 414 uint16_t nb_rx_desc __rte_unused, 415 unsigned int socket_id __rte_unused, 416 const struct rte_eth_rxconf *rx_conf __rte_unused, 417 struct rte_mempool *mb_pool) 418 { 419 struct dpaa2_dev_priv *priv = dev->data->dev_private; 420 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 421 struct mc_soc_version mc_plat_info = {0}; 422 struct dpaa2_queue *dpaa2_q; 423 struct dpni_queue cfg; 424 uint8_t options = 0; 425 uint8_t flow_id; 426 uint32_t bpid; 427 int ret; 428 429 PMD_INIT_FUNC_TRACE(); 430 431 PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p", 432 dev, rx_queue_id, mb_pool, rx_conf); 433 434 if (!priv->bp_list || priv->bp_list->mp != mb_pool) { 435 bpid = mempool_to_bpid(mb_pool); 436 ret = dpaa2_attach_bp_list(priv, 437 rte_dpaa2_bpid_info[bpid].bp_list); 438 if (ret) 439 return ret; 440 } 441 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 442 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ 443 444 /*Get the flow id from given VQ id*/ 445 flow_id = rx_queue_id % priv->nb_rx_queues; 446 memset(&cfg, 0, sizeof(struct dpni_queue)); 447 448 options = options | DPNI_QUEUE_OPT_USER_CTX; 449 cfg.user_context = (uint64_t)(dpaa2_q); 450 451 /*if ls2088 or rev2 device, enable the stashing */ 452 453 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 454 PMD_INIT_LOG(ERR, "\tmc_get_soc_version failed\n"); 455 456 if ((mc_plat_info.svr & 0xffff0000) != SVR_LS2080A) { 457 options |= DPNI_QUEUE_OPT_FLC; 458 cfg.flc.stash_control = true; 459 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0; 460 /* 00 00 00 - last 6 bit represent annotation, context stashing, 461 * data stashing setting 01 01 00 (0x14) to enable 462 * 1 line data, 1 line annotation 463 */ 464 cfg.flc.value |= 0x14; 465 } 466 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, 467 dpaa2_q->tc_index, flow_id, options, &cfg); 468 if (ret) { 469 PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret); 470 return -1; 471 } 472 473 if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { 474 struct dpni_taildrop taildrop; 475 476 taildrop.enable = 1; 477 /*enabling per rx queue congestion control */ 478 taildrop.threshold = CONG_THRESHOLD_RX_Q; 479 taildrop.units = DPNI_CONGESTION_UNIT_BYTES; 480 taildrop.oal = CONG_RX_OAL; 481 PMD_DRV_LOG(DEBUG, "Enabling Early Drop on queue = %d", 482 rx_queue_id); 483 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 484 DPNI_CP_QUEUE, DPNI_QUEUE_RX, 485 dpaa2_q->tc_index, flow_id, &taildrop); 486 if (ret) { 487 PMD_INIT_LOG(ERR, "Error in setting the rx flow" 488 " err : = %d\n", ret); 489 return -1; 490 } 491 } 492 493 dev->data->rx_queues[rx_queue_id] = dpaa2_q; 494 return 0; 495 } 496 497 static int 498 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, 499 uint16_t tx_queue_id, 500 uint16_t nb_tx_desc __rte_unused, 501 unsigned int socket_id __rte_unused, 502 const struct rte_eth_txconf *tx_conf __rte_unused) 503 { 504 struct dpaa2_dev_priv *priv = dev->data->dev_private; 505 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) 506 priv->tx_vq[tx_queue_id]; 507 struct fsl_mc_io *dpni = priv->hw; 508 struct dpni_queue tx_conf_cfg; 509 struct dpni_queue tx_flow_cfg; 510 uint8_t options = 0, flow_id; 511 uint32_t tc_id; 512 int ret; 513 514 PMD_INIT_FUNC_TRACE(); 515 516 /* Return if queue already configured */ 517 if (dpaa2_q->flow_id != 0xffff) { 518 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 519 return 0; 520 } 521 522 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); 523 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); 524 525 tc_id = tx_queue_id; 526 flow_id = 0; 527 528 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, 529 tc_id, flow_id, options, &tx_flow_cfg); 530 if (ret) { 531 PMD_INIT_LOG(ERR, "Error in setting the tx flow: " 532 "tc_id=%d, flow =%d ErrorCode = %x\n", 533 tc_id, flow_id, -ret); 534 return -1; 535 } 536 537 dpaa2_q->flow_id = flow_id; 538 539 if (tx_queue_id == 0) { 540 /*Set tx-conf and error configuration*/ 541 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 542 priv->token, 543 DPNI_CONF_DISABLE); 544 if (ret) { 545 PMD_INIT_LOG(ERR, "Error in set tx conf mode settings" 546 " ErrorCode = %x", ret); 547 return -1; 548 } 549 } 550 dpaa2_q->tc_index = tc_id; 551 552 if (!(priv->flags & DPAA2_TX_CGR_OFF)) { 553 struct dpni_congestion_notification_cfg cong_notif_cfg; 554 555 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; 556 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; 557 /* Notify that the queue is not congested when the data in 558 * the queue is below this thershold. 559 */ 560 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; 561 cong_notif_cfg.message_ctx = 0; 562 cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn; 563 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; 564 cong_notif_cfg.notification_mode = 565 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | 566 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | 567 DPNI_CONG_OPT_COHERENT_WRITE; 568 569 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, 570 priv->token, 571 DPNI_QUEUE_TX, 572 tc_id, 573 &cong_notif_cfg); 574 if (ret) { 575 PMD_INIT_LOG(ERR, 576 "Error in setting tx congestion notification: = %d", 577 -ret); 578 return -ret; 579 } 580 } 581 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 582 return 0; 583 } 584 585 static void 586 dpaa2_dev_rx_queue_release(void *q __rte_unused) 587 { 588 PMD_INIT_FUNC_TRACE(); 589 } 590 591 static void 592 dpaa2_dev_tx_queue_release(void *q __rte_unused) 593 { 594 PMD_INIT_FUNC_TRACE(); 595 } 596 597 static const uint32_t * 598 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) 599 { 600 static const uint32_t ptypes[] = { 601 /*todo -= add more types */ 602 RTE_PTYPE_L2_ETHER, 603 RTE_PTYPE_L3_IPV4, 604 RTE_PTYPE_L3_IPV4_EXT, 605 RTE_PTYPE_L3_IPV6, 606 RTE_PTYPE_L3_IPV6_EXT, 607 RTE_PTYPE_L4_TCP, 608 RTE_PTYPE_L4_UDP, 609 RTE_PTYPE_L4_SCTP, 610 RTE_PTYPE_L4_ICMP, 611 RTE_PTYPE_UNKNOWN 612 }; 613 614 if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx) 615 return ptypes; 616 return NULL; 617 } 618 619 /** 620 * Dpaa2 link Interrupt handler 621 * 622 * @param param 623 * The address of parameter (struct rte_eth_dev *) regsitered before. 624 * 625 * @return 626 * void 627 */ 628 static void 629 dpaa2_interrupt_handler(void *param) 630 { 631 struct rte_eth_dev *dev = param; 632 struct dpaa2_dev_priv *priv = dev->data->dev_private; 633 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 634 int ret; 635 int irq_index = DPNI_IRQ_INDEX; 636 unsigned int status = 0, clear = 0; 637 638 PMD_INIT_FUNC_TRACE(); 639 640 if (dpni == NULL) { 641 RTE_LOG(ERR, PMD, "dpni is NULL"); 642 return; 643 } 644 645 ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, 646 irq_index, &status); 647 if (unlikely(ret)) { 648 RTE_LOG(ERR, PMD, "Can't get irq status (err %d)", ret); 649 clear = 0xffffffff; 650 goto out; 651 } 652 653 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { 654 clear = DPNI_IRQ_EVENT_LINK_CHANGED; 655 dpaa2_dev_link_update(dev, 0); 656 /* calling all the apps registered for link status event */ 657 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 658 NULL, NULL); 659 } 660 out: 661 ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, 662 irq_index, clear); 663 if (unlikely(ret)) 664 RTE_LOG(ERR, PMD, "Can't clear irq status (err %d)", ret); 665 } 666 667 static int 668 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) 669 { 670 int err = 0; 671 struct dpaa2_dev_priv *priv = dev->data->dev_private; 672 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 673 int irq_index = DPNI_IRQ_INDEX; 674 unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; 675 676 PMD_INIT_FUNC_TRACE(); 677 678 err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, 679 irq_index, mask); 680 if (err < 0) { 681 PMD_INIT_LOG(ERR, "Error: dpni_set_irq_mask():%d (%s)", err, 682 strerror(-err)); 683 return err; 684 } 685 686 err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, 687 irq_index, enable); 688 if (err < 0) 689 PMD_INIT_LOG(ERR, "Error: dpni_set_irq_enable():%d (%s)", err, 690 strerror(-err)); 691 692 return err; 693 } 694 695 static int 696 dpaa2_dev_start(struct rte_eth_dev *dev) 697 { 698 struct rte_device *rdev = dev->device; 699 struct rte_dpaa2_device *dpaa2_dev; 700 struct rte_eth_dev_data *data = dev->data; 701 struct dpaa2_dev_priv *priv = data->dev_private; 702 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 703 struct dpni_queue cfg; 704 struct dpni_error_cfg err_cfg; 705 uint16_t qdid; 706 struct dpni_queue_id qid; 707 struct dpaa2_queue *dpaa2_q; 708 int ret, i; 709 struct rte_intr_handle *intr_handle; 710 711 dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); 712 intr_handle = &dpaa2_dev->intr_handle; 713 714 PMD_INIT_FUNC_TRACE(); 715 716 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 717 if (ret) { 718 PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n", 719 ret, priv->hw_id); 720 return ret; 721 } 722 723 /* Power up the phy. Needed to make the link go UP */ 724 dpaa2_dev_set_link_up(dev); 725 726 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, 727 DPNI_QUEUE_TX, &qdid); 728 if (ret) { 729 PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret); 730 return ret; 731 } 732 priv->qdid = qdid; 733 734 for (i = 0; i < data->nb_rx_queues; i++) { 735 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; 736 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 737 DPNI_QUEUE_RX, dpaa2_q->tc_index, 738 dpaa2_q->flow_id, &cfg, &qid); 739 if (ret) { 740 PMD_INIT_LOG(ERR, "Error to get flow " 741 "information Error code = %d\n", ret); 742 return ret; 743 } 744 dpaa2_q->fqid = qid.fqid; 745 } 746 747 /*checksum errors, send them to normal path and set it in annotation */ 748 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; 749 750 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; 751 err_cfg.set_frame_annotation = true; 752 753 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, 754 priv->token, &err_cfg); 755 if (ret) { 756 PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:" 757 "code = %d\n", ret); 758 return ret; 759 } 760 /* VLAN Offload Settings */ 761 if (priv->max_vlan_filters) { 762 ret = dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); 763 if (ret) { 764 PMD_INIT_LOG(ERR, "Error to dpaa2_vlan_offload_set:" 765 "code = %d\n", ret); 766 return ret; 767 } 768 } 769 770 771 /* if the interrupts were configured on this devices*/ 772 if (intr_handle && (intr_handle->fd) && 773 (dev->data->dev_conf.intr_conf.lsc != 0)) { 774 /* Registering LSC interrupt handler */ 775 rte_intr_callback_register(intr_handle, 776 dpaa2_interrupt_handler, 777 (void *)dev); 778 779 /* enable vfio intr/eventfd mapping 780 * Interrupt index 0 is required, so we can not use 781 * rte_intr_enable. 782 */ 783 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX); 784 785 /* enable dpni_irqs */ 786 dpaa2_eth_setup_irqs(dev, 1); 787 } 788 789 return 0; 790 } 791 792 /** 793 * This routine disables all traffic on the adapter by issuing a 794 * global reset on the MAC. 795 */ 796 static void 797 dpaa2_dev_stop(struct rte_eth_dev *dev) 798 { 799 struct dpaa2_dev_priv *priv = dev->data->dev_private; 800 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 801 int ret; 802 struct rte_eth_link link; 803 struct rte_intr_handle *intr_handle = dev->intr_handle; 804 805 PMD_INIT_FUNC_TRACE(); 806 807 /* reset interrupt callback */ 808 if (intr_handle && (intr_handle->fd) && 809 (dev->data->dev_conf.intr_conf.lsc != 0)) { 810 /*disable dpni irqs */ 811 dpaa2_eth_setup_irqs(dev, 0); 812 813 /* disable vfio intr before callback unregister */ 814 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX); 815 816 /* Unregistering LSC interrupt handler */ 817 rte_intr_callback_unregister(intr_handle, 818 dpaa2_interrupt_handler, 819 (void *)dev); 820 } 821 822 dpaa2_dev_set_link_down(dev); 823 824 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); 825 if (ret) { 826 PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n", 827 ret, priv->hw_id); 828 return; 829 } 830 831 /* clear the recorded link status */ 832 memset(&link, 0, sizeof(link)); 833 dpaa2_dev_atomic_write_link_status(dev, &link); 834 } 835 836 static void 837 dpaa2_dev_close(struct rte_eth_dev *dev) 838 { 839 struct rte_eth_dev_data *data = dev->data; 840 struct dpaa2_dev_priv *priv = dev->data->dev_private; 841 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 842 int i, ret; 843 struct rte_eth_link link; 844 struct dpaa2_queue *dpaa2_q; 845 846 PMD_INIT_FUNC_TRACE(); 847 848 for (i = 0; i < data->nb_tx_queues; i++) { 849 dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i]; 850 if (!dpaa2_q->cscn) { 851 rte_free(dpaa2_q->cscn); 852 dpaa2_q->cscn = NULL; 853 } 854 } 855 856 /* Clean the device first */ 857 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); 858 if (ret) { 859 PMD_INIT_LOG(ERR, "Failure cleaning dpni device with" 860 " error code %d\n", ret); 861 return; 862 } 863 864 memset(&link, 0, sizeof(link)); 865 dpaa2_dev_atomic_write_link_status(dev, &link); 866 } 867 868 static void 869 dpaa2_dev_promiscuous_enable( 870 struct rte_eth_dev *dev) 871 { 872 int ret; 873 struct dpaa2_dev_priv *priv = dev->data->dev_private; 874 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 875 876 PMD_INIT_FUNC_TRACE(); 877 878 if (dpni == NULL) { 879 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 880 return; 881 } 882 883 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 884 if (ret < 0) 885 RTE_LOG(ERR, PMD, "Unable to enable U promisc mode %d\n", ret); 886 887 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 888 if (ret < 0) 889 RTE_LOG(ERR, PMD, "Unable to enable M promisc mode %d\n", ret); 890 } 891 892 static void 893 dpaa2_dev_promiscuous_disable( 894 struct rte_eth_dev *dev) 895 { 896 int ret; 897 struct dpaa2_dev_priv *priv = dev->data->dev_private; 898 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 899 900 PMD_INIT_FUNC_TRACE(); 901 902 if (dpni == NULL) { 903 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 904 return; 905 } 906 907 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 908 if (ret < 0) 909 RTE_LOG(ERR, PMD, "Unable to disable U promisc mode %d\n", ret); 910 911 if (dev->data->all_multicast == 0) { 912 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, 913 priv->token, false); 914 if (ret < 0) 915 RTE_LOG(ERR, PMD, 916 "Unable to disable M promisc mode %d\n", 917 ret); 918 } 919 } 920 921 static void 922 dpaa2_dev_allmulticast_enable( 923 struct rte_eth_dev *dev) 924 { 925 int ret; 926 struct dpaa2_dev_priv *priv = dev->data->dev_private; 927 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 928 929 PMD_INIT_FUNC_TRACE(); 930 931 if (dpni == NULL) { 932 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 933 return; 934 } 935 936 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 937 if (ret < 0) 938 RTE_LOG(ERR, PMD, "Unable to enable multicast mode %d\n", ret); 939 } 940 941 static void 942 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) 943 { 944 int ret; 945 struct dpaa2_dev_priv *priv = dev->data->dev_private; 946 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 947 948 PMD_INIT_FUNC_TRACE(); 949 950 if (dpni == NULL) { 951 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 952 return; 953 } 954 955 /* must remain on for all promiscuous */ 956 if (dev->data->promiscuous == 1) 957 return; 958 959 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 960 if (ret < 0) 961 RTE_LOG(ERR, PMD, "Unable to disable multicast mode %d\n", ret); 962 } 963 964 static int 965 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 966 { 967 int ret; 968 struct dpaa2_dev_priv *priv = dev->data->dev_private; 969 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 970 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 971 972 PMD_INIT_FUNC_TRACE(); 973 974 if (dpni == NULL) { 975 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 976 return -EINVAL; 977 } 978 979 /* check that mtu is within the allowed range */ 980 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) 981 return -EINVAL; 982 983 if (frame_size > ETHER_MAX_LEN) 984 dev->data->dev_conf.rxmode.jumbo_frame = 1; 985 else 986 dev->data->dev_conf.rxmode.jumbo_frame = 0; 987 988 /* Set the Max Rx frame length as 'mtu' + 989 * Maximum Ethernet header length 990 */ 991 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 992 mtu + ETH_VLAN_HLEN); 993 if (ret) { 994 PMD_DRV_LOG(ERR, "setting the max frame length failed"); 995 return -1; 996 } 997 PMD_DRV_LOG(INFO, "MTU is configured %d for the device", mtu); 998 return 0; 999 } 1000 1001 static int 1002 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, 1003 struct ether_addr *addr, 1004 __rte_unused uint32_t index, 1005 __rte_unused uint32_t pool) 1006 { 1007 int ret; 1008 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1009 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1010 1011 PMD_INIT_FUNC_TRACE(); 1012 1013 if (dpni == NULL) { 1014 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1015 return -1; 1016 } 1017 1018 ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, 1019 priv->token, addr->addr_bytes); 1020 if (ret) 1021 RTE_LOG(ERR, PMD, 1022 "error: Adding the MAC ADDR failed: err = %d\n", ret); 1023 return 0; 1024 } 1025 1026 static void 1027 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, 1028 uint32_t index) 1029 { 1030 int ret; 1031 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1032 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1033 struct rte_eth_dev_data *data = dev->data; 1034 struct ether_addr *macaddr; 1035 1036 PMD_INIT_FUNC_TRACE(); 1037 1038 macaddr = &data->mac_addrs[index]; 1039 1040 if (dpni == NULL) { 1041 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1042 return; 1043 } 1044 1045 ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, 1046 priv->token, macaddr->addr_bytes); 1047 if (ret) 1048 RTE_LOG(ERR, PMD, 1049 "error: Removing the MAC ADDR failed: err = %d\n", ret); 1050 } 1051 1052 static void 1053 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, 1054 struct ether_addr *addr) 1055 { 1056 int ret; 1057 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1058 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1059 1060 PMD_INIT_FUNC_TRACE(); 1061 1062 if (dpni == NULL) { 1063 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1064 return; 1065 } 1066 1067 ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, 1068 priv->token, addr->addr_bytes); 1069 1070 if (ret) 1071 RTE_LOG(ERR, PMD, 1072 "error: Setting the MAC ADDR failed %d\n", ret); 1073 } 1074 static 1075 int dpaa2_dev_stats_get(struct rte_eth_dev *dev, 1076 struct rte_eth_stats *stats) 1077 { 1078 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1079 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1080 int32_t retcode; 1081 uint8_t page0 = 0, page1 = 1, page2 = 2; 1082 union dpni_statistics value; 1083 1084 memset(&value, 0, sizeof(union dpni_statistics)); 1085 1086 PMD_INIT_FUNC_TRACE(); 1087 1088 if (!dpni) { 1089 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1090 return -EINVAL; 1091 } 1092 1093 if (!stats) { 1094 RTE_LOG(ERR, PMD, "stats is NULL\n"); 1095 return -EINVAL; 1096 } 1097 1098 /*Get Counters from page_0*/ 1099 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1100 page0, 0, &value); 1101 if (retcode) 1102 goto err; 1103 1104 stats->ipackets = value.page_0.ingress_all_frames; 1105 stats->ibytes = value.page_0.ingress_all_bytes; 1106 1107 /*Get Counters from page_1*/ 1108 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1109 page1, 0, &value); 1110 if (retcode) 1111 goto err; 1112 1113 stats->opackets = value.page_1.egress_all_frames; 1114 stats->obytes = value.page_1.egress_all_bytes; 1115 1116 /*Get Counters from page_2*/ 1117 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1118 page2, 0, &value); 1119 if (retcode) 1120 goto err; 1121 1122 /* Ingress drop frame count due to configured rules */ 1123 stats->ierrors = value.page_2.ingress_filtered_frames; 1124 /* Ingress drop frame count due to error */ 1125 stats->ierrors += value.page_2.ingress_discarded_frames; 1126 1127 stats->oerrors = value.page_2.egress_discarded_frames; 1128 stats->imissed = value.page_2.ingress_nobuffer_discards; 1129 1130 return 0; 1131 1132 err: 1133 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); 1134 return retcode; 1135 }; 1136 1137 static int 1138 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1139 unsigned int n) 1140 { 1141 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1142 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1143 int32_t retcode; 1144 union dpni_statistics value[3] = {}; 1145 unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); 1146 1147 if (xstats == NULL) 1148 return 0; 1149 1150 if (n < num) 1151 return num; 1152 1153 /* Get Counters from page_0*/ 1154 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1155 0, 0, &value[0]); 1156 if (retcode) 1157 goto err; 1158 1159 /* Get Counters from page_1*/ 1160 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1161 1, 0, &value[1]); 1162 if (retcode) 1163 goto err; 1164 1165 /* Get Counters from page_2*/ 1166 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1167 2, 0, &value[2]); 1168 if (retcode) 1169 goto err; 1170 1171 for (i = 0; i < num; i++) { 1172 xstats[i].id = i; 1173 xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. 1174 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1175 } 1176 return i; 1177 err: 1178 RTE_LOG(ERR, PMD, "Error in obtaining extended stats (%d)\n", retcode); 1179 return retcode; 1180 } 1181 1182 static int 1183 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1184 struct rte_eth_xstat_name *xstats_names, 1185 __rte_unused unsigned int limit) 1186 { 1187 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1188 1189 if (xstats_names != NULL) 1190 for (i = 0; i < stat_cnt; i++) 1191 snprintf(xstats_names[i].name, 1192 sizeof(xstats_names[i].name), 1193 "%s", 1194 dpaa2_xstats_strings[i].name); 1195 1196 return stat_cnt; 1197 } 1198 1199 static int 1200 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1201 uint64_t *values, unsigned int n) 1202 { 1203 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1204 uint64_t values_copy[stat_cnt]; 1205 1206 if (!ids) { 1207 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1208 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1209 int32_t retcode; 1210 union dpni_statistics value[3] = {}; 1211 1212 if (n < stat_cnt) 1213 return stat_cnt; 1214 1215 if (!values) 1216 return 0; 1217 1218 /* Get Counters from page_0*/ 1219 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1220 0, 0, &value[0]); 1221 if (retcode) 1222 return 0; 1223 1224 /* Get Counters from page_1*/ 1225 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1226 1, 0, &value[1]); 1227 if (retcode) 1228 return 0; 1229 1230 /* Get Counters from page_2*/ 1231 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1232 2, 0, &value[2]); 1233 if (retcode) 1234 return 0; 1235 1236 for (i = 0; i < stat_cnt; i++) { 1237 values[i] = value[dpaa2_xstats_strings[i].page_id]. 1238 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1239 } 1240 return stat_cnt; 1241 } 1242 1243 dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 1244 1245 for (i = 0; i < n; i++) { 1246 if (ids[i] >= stat_cnt) { 1247 PMD_INIT_LOG(ERR, "id value isn't valid"); 1248 return -1; 1249 } 1250 values[i] = values_copy[ids[i]]; 1251 } 1252 return n; 1253 } 1254 1255 static int 1256 dpaa2_xstats_get_names_by_id( 1257 struct rte_eth_dev *dev, 1258 struct rte_eth_xstat_name *xstats_names, 1259 const uint64_t *ids, 1260 unsigned int limit) 1261 { 1262 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1263 struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 1264 1265 if (!ids) 1266 return dpaa2_xstats_get_names(dev, xstats_names, limit); 1267 1268 dpaa2_xstats_get_names(dev, xstats_names_copy, limit); 1269 1270 for (i = 0; i < limit; i++) { 1271 if (ids[i] >= stat_cnt) { 1272 PMD_INIT_LOG(ERR, "id value isn't valid"); 1273 return -1; 1274 } 1275 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 1276 } 1277 return limit; 1278 } 1279 1280 static void 1281 dpaa2_dev_stats_reset(struct rte_eth_dev *dev) 1282 { 1283 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1284 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1285 int32_t retcode; 1286 1287 PMD_INIT_FUNC_TRACE(); 1288 1289 if (dpni == NULL) { 1290 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1291 return; 1292 } 1293 1294 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); 1295 if (retcode) 1296 goto error; 1297 1298 return; 1299 1300 error: 1301 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); 1302 return; 1303 }; 1304 1305 /* return 0 means link status changed, -1 means not changed */ 1306 static int 1307 dpaa2_dev_link_update(struct rte_eth_dev *dev, 1308 int wait_to_complete __rte_unused) 1309 { 1310 int ret; 1311 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1312 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1313 struct rte_eth_link link, old; 1314 struct dpni_link_state state = {0}; 1315 1316 if (dpni == NULL) { 1317 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1318 return 0; 1319 } 1320 memset(&old, 0, sizeof(old)); 1321 dpaa2_dev_atomic_read_link_status(dev, &old); 1322 1323 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1324 if (ret < 0) { 1325 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); 1326 return -1; 1327 } 1328 1329 if ((old.link_status == state.up) && (old.link_speed == state.rate)) { 1330 RTE_LOG(DEBUG, PMD, "No change in status\n"); 1331 return -1; 1332 } 1333 1334 memset(&link, 0, sizeof(struct rte_eth_link)); 1335 link.link_status = state.up; 1336 link.link_speed = state.rate; 1337 1338 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) 1339 link.link_duplex = ETH_LINK_HALF_DUPLEX; 1340 else 1341 link.link_duplex = ETH_LINK_FULL_DUPLEX; 1342 1343 dpaa2_dev_atomic_write_link_status(dev, &link); 1344 1345 if (link.link_status) 1346 PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id); 1347 else 1348 PMD_DRV_LOG(INFO, "Port %d Link is Down", dev->data->port_id); 1349 return 0; 1350 } 1351 1352 /** 1353 * Toggle the DPNI to enable, if not already enabled. 1354 * This is not strictly PHY up/down - it is more of logical toggling. 1355 */ 1356 static int 1357 dpaa2_dev_set_link_up(struct rte_eth_dev *dev) 1358 { 1359 int ret = -EINVAL; 1360 struct dpaa2_dev_priv *priv; 1361 struct fsl_mc_io *dpni; 1362 int en = 0; 1363 struct dpni_link_state state = {0}; 1364 1365 priv = dev->data->dev_private; 1366 dpni = (struct fsl_mc_io *)priv->hw; 1367 1368 if (dpni == NULL) { 1369 RTE_LOG(ERR, PMD, "DPNI is NULL\n"); 1370 return ret; 1371 } 1372 1373 /* Check if DPNI is currently enabled */ 1374 ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); 1375 if (ret) { 1376 /* Unable to obtain dpni status; Not continuing */ 1377 PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret); 1378 return -EINVAL; 1379 } 1380 1381 /* Enable link if not already enabled */ 1382 if (!en) { 1383 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 1384 if (ret) { 1385 PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret); 1386 return -EINVAL; 1387 } 1388 } 1389 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1390 if (ret < 0) { 1391 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); 1392 return -1; 1393 } 1394 1395 /* changing tx burst function to start enqueues */ 1396 dev->tx_pkt_burst = dpaa2_dev_tx; 1397 dev->data->dev_link.link_status = state.up; 1398 1399 if (state.up) 1400 PMD_DRV_LOG(INFO, "Port %d Link is set as UP", 1401 dev->data->port_id); 1402 else 1403 PMD_DRV_LOG(INFO, "Port %d Link is DOWN", dev->data->port_id); 1404 return ret; 1405 } 1406 1407 /** 1408 * Toggle the DPNI to disable, if not already disabled. 1409 * This is not strictly PHY up/down - it is more of logical toggling. 1410 */ 1411 static int 1412 dpaa2_dev_set_link_down(struct rte_eth_dev *dev) 1413 { 1414 int ret = -EINVAL; 1415 struct dpaa2_dev_priv *priv; 1416 struct fsl_mc_io *dpni; 1417 int dpni_enabled = 0; 1418 int retries = 10; 1419 1420 PMD_INIT_FUNC_TRACE(); 1421 1422 priv = dev->data->dev_private; 1423 dpni = (struct fsl_mc_io *)priv->hw; 1424 1425 if (dpni == NULL) { 1426 RTE_LOG(ERR, PMD, "Device has not yet been configured\n"); 1427 return ret; 1428 } 1429 1430 /*changing tx burst function to avoid any more enqueues */ 1431 dev->tx_pkt_burst = dummy_dev_tx; 1432 1433 /* Loop while dpni_disable() attempts to drain the egress FQs 1434 * and confirm them back to us. 1435 */ 1436 do { 1437 ret = dpni_disable(dpni, 0, priv->token); 1438 if (ret) { 1439 PMD_DRV_LOG(ERR, "dpni disable failed (%d)", ret); 1440 return ret; 1441 } 1442 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); 1443 if (ret) { 1444 PMD_DRV_LOG(ERR, "dpni_is_enabled failed (%d)", ret); 1445 return ret; 1446 } 1447 if (dpni_enabled) 1448 /* Allow the MC some slack */ 1449 rte_delay_us(100 * 1000); 1450 } while (dpni_enabled && --retries); 1451 1452 if (!retries) { 1453 PMD_DRV_LOG(WARNING, "Retry count exceeded disabling DPNI\n"); 1454 /* todo- we may have to manually cleanup queues. 1455 */ 1456 } else { 1457 PMD_DRV_LOG(INFO, "Port %d Link DOWN successful", 1458 dev->data->port_id); 1459 } 1460 1461 dev->data->dev_link.link_status = 0; 1462 1463 return ret; 1464 } 1465 1466 static int 1467 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1468 { 1469 int ret = -EINVAL; 1470 struct dpaa2_dev_priv *priv; 1471 struct fsl_mc_io *dpni; 1472 struct dpni_link_state state = {0}; 1473 1474 PMD_INIT_FUNC_TRACE(); 1475 1476 priv = dev->data->dev_private; 1477 dpni = (struct fsl_mc_io *)priv->hw; 1478 1479 if (dpni == NULL || fc_conf == NULL) { 1480 RTE_LOG(ERR, PMD, "device not configured\n"); 1481 return ret; 1482 } 1483 1484 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1485 if (ret) { 1486 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); 1487 return ret; 1488 } 1489 1490 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); 1491 if (state.options & DPNI_LINK_OPT_PAUSE) { 1492 /* DPNI_LINK_OPT_PAUSE set 1493 * if ASYM_PAUSE not set, 1494 * RX Side flow control (handle received Pause frame) 1495 * TX side flow control (send Pause frame) 1496 * if ASYM_PAUSE set, 1497 * RX Side flow control (handle received Pause frame) 1498 * No TX side flow control (send Pause frame disabled) 1499 */ 1500 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) 1501 fc_conf->mode = RTE_FC_FULL; 1502 else 1503 fc_conf->mode = RTE_FC_RX_PAUSE; 1504 } else { 1505 /* DPNI_LINK_OPT_PAUSE not set 1506 * if ASYM_PAUSE set, 1507 * TX side flow control (send Pause frame) 1508 * No RX side flow control (No action on pause frame rx) 1509 * if ASYM_PAUSE not set, 1510 * Flow control disabled 1511 */ 1512 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) 1513 fc_conf->mode = RTE_FC_TX_PAUSE; 1514 else 1515 fc_conf->mode = RTE_FC_NONE; 1516 } 1517 1518 return ret; 1519 } 1520 1521 static int 1522 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1523 { 1524 int ret = -EINVAL; 1525 struct dpaa2_dev_priv *priv; 1526 struct fsl_mc_io *dpni; 1527 struct dpni_link_state state = {0}; 1528 struct dpni_link_cfg cfg = {0}; 1529 1530 PMD_INIT_FUNC_TRACE(); 1531 1532 priv = dev->data->dev_private; 1533 dpni = (struct fsl_mc_io *)priv->hw; 1534 1535 if (dpni == NULL) { 1536 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1537 return ret; 1538 } 1539 1540 /* It is necessary to obtain the current state before setting fc_conf 1541 * as MC would return error in case rate, autoneg or duplex values are 1542 * different. 1543 */ 1544 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1545 if (ret) { 1546 RTE_LOG(ERR, PMD, "Unable to get link state (err=%d)\n", ret); 1547 return -1; 1548 } 1549 1550 /* Disable link before setting configuration */ 1551 dpaa2_dev_set_link_down(dev); 1552 1553 /* Based on fc_conf, update cfg */ 1554 cfg.rate = state.rate; 1555 cfg.options = state.options; 1556 1557 /* update cfg with fc_conf */ 1558 switch (fc_conf->mode) { 1559 case RTE_FC_FULL: 1560 /* Full flow control; 1561 * OPT_PAUSE set, ASYM_PAUSE not set 1562 */ 1563 cfg.options |= DPNI_LINK_OPT_PAUSE; 1564 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1565 break; 1566 case RTE_FC_TX_PAUSE: 1567 /* Enable RX flow control 1568 * OPT_PAUSE not set; 1569 * ASYM_PAUSE set; 1570 */ 1571 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1572 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1573 break; 1574 case RTE_FC_RX_PAUSE: 1575 /* Enable TX Flow control 1576 * OPT_PAUSE set 1577 * ASYM_PAUSE set 1578 */ 1579 cfg.options |= DPNI_LINK_OPT_PAUSE; 1580 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1581 break; 1582 case RTE_FC_NONE: 1583 /* Disable Flow control 1584 * OPT_PAUSE not set 1585 * ASYM_PAUSE not set 1586 */ 1587 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1588 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1589 break; 1590 default: 1591 RTE_LOG(ERR, PMD, "Incorrect Flow control flag (%d)\n", 1592 fc_conf->mode); 1593 return -1; 1594 } 1595 1596 ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); 1597 if (ret) 1598 RTE_LOG(ERR, PMD, 1599 "Unable to set Link configuration (err=%d)\n", 1600 ret); 1601 1602 /* Enable link */ 1603 dpaa2_dev_set_link_up(dev); 1604 1605 return ret; 1606 } 1607 1608 static int 1609 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, 1610 struct rte_eth_rss_conf *rss_conf) 1611 { 1612 struct rte_eth_dev_data *data = dev->data; 1613 struct rte_eth_conf *eth_conf = &data->dev_conf; 1614 int ret; 1615 1616 PMD_INIT_FUNC_TRACE(); 1617 1618 if (rss_conf->rss_hf) { 1619 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf); 1620 if (ret) { 1621 PMD_INIT_LOG(ERR, "unable to set flow dist"); 1622 return ret; 1623 } 1624 } else { 1625 ret = dpaa2_remove_flow_dist(dev, 0); 1626 if (ret) { 1627 PMD_INIT_LOG(ERR, "unable to remove flow dist"); 1628 return ret; 1629 } 1630 } 1631 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; 1632 return 0; 1633 } 1634 1635 static int 1636 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1637 struct rte_eth_rss_conf *rss_conf) 1638 { 1639 struct rte_eth_dev_data *data = dev->data; 1640 struct rte_eth_conf *eth_conf = &data->dev_conf; 1641 1642 /* dpaa2 does not support rss_key, so length should be 0*/ 1643 rss_conf->rss_key_len = 0; 1644 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; 1645 return 0; 1646 } 1647 1648 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, 1649 int eth_rx_queue_id, 1650 uint16_t dpcon_id, 1651 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 1652 { 1653 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1654 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1655 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1656 uint8_t flow_id = dpaa2_ethq->flow_id; 1657 struct dpni_queue cfg; 1658 uint8_t options; 1659 int ret; 1660 1661 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 1662 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; 1663 else 1664 return -EINVAL; 1665 1666 memset(&cfg, 0, sizeof(struct dpni_queue)); 1667 options = DPNI_QUEUE_OPT_DEST; 1668 cfg.destination.type = DPNI_DEST_DPCON; 1669 cfg.destination.id = dpcon_id; 1670 cfg.destination.priority = queue_conf->ev.priority; 1671 1672 options |= DPNI_QUEUE_OPT_USER_CTX; 1673 cfg.user_context = (uint64_t)(dpaa2_ethq); 1674 1675 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1676 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1677 if (ret) { 1678 RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret); 1679 return ret; 1680 } 1681 1682 memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event)); 1683 1684 return 0; 1685 } 1686 1687 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, 1688 int eth_rx_queue_id) 1689 { 1690 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1691 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1692 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1693 uint8_t flow_id = dpaa2_ethq->flow_id; 1694 struct dpni_queue cfg; 1695 uint8_t options; 1696 int ret; 1697 1698 memset(&cfg, 0, sizeof(struct dpni_queue)); 1699 options = DPNI_QUEUE_OPT_DEST; 1700 cfg.destination.type = DPNI_DEST_NONE; 1701 1702 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1703 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1704 if (ret) 1705 RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret); 1706 1707 return ret; 1708 } 1709 1710 static struct eth_dev_ops dpaa2_ethdev_ops = { 1711 .dev_configure = dpaa2_eth_dev_configure, 1712 .dev_start = dpaa2_dev_start, 1713 .dev_stop = dpaa2_dev_stop, 1714 .dev_close = dpaa2_dev_close, 1715 .promiscuous_enable = dpaa2_dev_promiscuous_enable, 1716 .promiscuous_disable = dpaa2_dev_promiscuous_disable, 1717 .allmulticast_enable = dpaa2_dev_allmulticast_enable, 1718 .allmulticast_disable = dpaa2_dev_allmulticast_disable, 1719 .dev_set_link_up = dpaa2_dev_set_link_up, 1720 .dev_set_link_down = dpaa2_dev_set_link_down, 1721 .link_update = dpaa2_dev_link_update, 1722 .stats_get = dpaa2_dev_stats_get, 1723 .xstats_get = dpaa2_dev_xstats_get, 1724 .xstats_get_by_id = dpaa2_xstats_get_by_id, 1725 .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id, 1726 .xstats_get_names = dpaa2_xstats_get_names, 1727 .stats_reset = dpaa2_dev_stats_reset, 1728 .xstats_reset = dpaa2_dev_stats_reset, 1729 .fw_version_get = dpaa2_fw_version_get, 1730 .dev_infos_get = dpaa2_dev_info_get, 1731 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, 1732 .mtu_set = dpaa2_dev_mtu_set, 1733 .vlan_filter_set = dpaa2_vlan_filter_set, 1734 .vlan_offload_set = dpaa2_vlan_offload_set, 1735 .rx_queue_setup = dpaa2_dev_rx_queue_setup, 1736 .rx_queue_release = dpaa2_dev_rx_queue_release, 1737 .tx_queue_setup = dpaa2_dev_tx_queue_setup, 1738 .tx_queue_release = dpaa2_dev_tx_queue_release, 1739 .flow_ctrl_get = dpaa2_flow_ctrl_get, 1740 .flow_ctrl_set = dpaa2_flow_ctrl_set, 1741 .mac_addr_add = dpaa2_dev_add_mac_addr, 1742 .mac_addr_remove = dpaa2_dev_remove_mac_addr, 1743 .mac_addr_set = dpaa2_dev_set_mac_addr, 1744 .rss_hash_update = dpaa2_dev_rss_hash_update, 1745 .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, 1746 }; 1747 1748 static int 1749 dpaa2_dev_init(struct rte_eth_dev *eth_dev) 1750 { 1751 struct rte_device *dev = eth_dev->device; 1752 struct rte_dpaa2_device *dpaa2_dev; 1753 struct fsl_mc_io *dpni_dev; 1754 struct dpni_attr attr; 1755 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1756 struct dpni_buffer_layout layout; 1757 int ret, hw_id; 1758 1759 PMD_INIT_FUNC_TRACE(); 1760 1761 /* For secondary processes, the primary has done all the work */ 1762 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1763 return 0; 1764 1765 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 1766 1767 hw_id = dpaa2_dev->object_id; 1768 1769 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); 1770 if (!dpni_dev) { 1771 PMD_INIT_LOG(ERR, "malloc failed for dpni device\n"); 1772 return -1; 1773 } 1774 1775 dpni_dev->regs = rte_mcp_ptr_list[0]; 1776 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); 1777 if (ret) { 1778 PMD_INIT_LOG(ERR, 1779 "Failure in opening dpni@%d with err code %d\n", 1780 hw_id, ret); 1781 rte_free(dpni_dev); 1782 return -1; 1783 } 1784 1785 /* Clean the device first */ 1786 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); 1787 if (ret) { 1788 PMD_INIT_LOG(ERR, 1789 "Failure cleaning dpni@%d with err code %d\n", 1790 hw_id, ret); 1791 goto init_err; 1792 } 1793 1794 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); 1795 if (ret) { 1796 PMD_INIT_LOG(ERR, 1797 "Failure in get dpni@%d attribute, err code %d\n", 1798 hw_id, ret); 1799 goto init_err; 1800 } 1801 1802 priv->num_rx_tc = attr.num_rx_tcs; 1803 1804 /* Resetting the "num_rx_queues" to equal number of queues in first TC 1805 * as only one TC is supported on Rx Side. Once Multiple TCs will be 1806 * in use for Rx processing then this will be changed or removed. 1807 */ 1808 priv->nb_rx_queues = attr.num_queues; 1809 1810 /* Using number of TX queues as number of TX TCs */ 1811 priv->nb_tx_queues = attr.num_tx_tcs; 1812 1813 PMD_DRV_LOG(DEBUG, "RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", 1814 priv->num_rx_tc, priv->nb_rx_queues, priv->nb_tx_queues); 1815 1816 priv->hw = dpni_dev; 1817 priv->hw_id = hw_id; 1818 priv->options = attr.options; 1819 priv->max_mac_filters = attr.mac_filter_entries; 1820 priv->max_vlan_filters = attr.vlan_filter_entries; 1821 priv->flags = 0; 1822 1823 /* Allocate memory for hardware structure for queues */ 1824 ret = dpaa2_alloc_rx_tx_queues(eth_dev); 1825 if (ret) { 1826 PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n"); 1827 goto init_err; 1828 } 1829 1830 /* Allocate memory for storing MAC addresses */ 1831 eth_dev->data->mac_addrs = rte_zmalloc("dpni", 1832 ETHER_ADDR_LEN * attr.mac_filter_entries, 0); 1833 if (eth_dev->data->mac_addrs == NULL) { 1834 PMD_INIT_LOG(ERR, 1835 "Failed to allocate %d bytes needed to store MAC addresses", 1836 ETHER_ADDR_LEN * attr.mac_filter_entries); 1837 ret = -ENOMEM; 1838 goto init_err; 1839 } 1840 1841 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 1842 priv->token, 1843 (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes)); 1844 if (ret) { 1845 PMD_INIT_LOG(ERR, "DPNI get mac address failed:Err Code = %d\n", 1846 ret); 1847 goto init_err; 1848 } 1849 1850 /* ... tx buffer layout ... */ 1851 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1852 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1853 layout.pass_frame_status = 1; 1854 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1855 DPNI_QUEUE_TX, &layout); 1856 if (ret) { 1857 PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer layout", 1858 ret); 1859 goto init_err; 1860 } 1861 1862 /* ... tx-conf and error buffer layout ... */ 1863 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1864 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1865 layout.pass_frame_status = 1; 1866 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1867 DPNI_QUEUE_TX_CONFIRM, &layout); 1868 if (ret) { 1869 PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer layout", 1870 ret); 1871 goto init_err; 1872 } 1873 1874 eth_dev->dev_ops = &dpaa2_ethdev_ops; 1875 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 1876 1877 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 1878 eth_dev->tx_pkt_burst = dpaa2_dev_tx; 1879 rte_fslmc_vfio_dmamap(); 1880 1881 RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); 1882 return 0; 1883 init_err: 1884 dpaa2_dev_uninit(eth_dev); 1885 return ret; 1886 } 1887 1888 static int 1889 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) 1890 { 1891 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1892 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1893 int i, ret; 1894 struct dpaa2_queue *dpaa2_q; 1895 1896 PMD_INIT_FUNC_TRACE(); 1897 1898 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1899 return 0; 1900 1901 if (!dpni) { 1902 PMD_INIT_LOG(WARNING, "Already closed or not started"); 1903 return -1; 1904 } 1905 1906 dpaa2_dev_close(eth_dev); 1907 1908 if (priv->rx_vq[0]) { 1909 /* cleaning up queue storage */ 1910 for (i = 0; i < priv->nb_rx_queues; i++) { 1911 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 1912 if (dpaa2_q->q_storage) 1913 rte_free(dpaa2_q->q_storage); 1914 } 1915 /*free the all queue memory */ 1916 rte_free(priv->rx_vq[0]); 1917 priv->rx_vq[0] = NULL; 1918 } 1919 1920 /* free memory for storing MAC addresses */ 1921 if (eth_dev->data->mac_addrs) { 1922 rte_free(eth_dev->data->mac_addrs); 1923 eth_dev->data->mac_addrs = NULL; 1924 } 1925 1926 /* Close the device at underlying layer*/ 1927 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); 1928 if (ret) { 1929 PMD_INIT_LOG(ERR, 1930 "Failure closing dpni device with err code %d\n", 1931 ret); 1932 } 1933 1934 /* Free the allocated memory for ethernet private data and dpni*/ 1935 priv->hw = NULL; 1936 rte_free(dpni); 1937 1938 eth_dev->dev_ops = NULL; 1939 eth_dev->rx_pkt_burst = NULL; 1940 eth_dev->tx_pkt_burst = NULL; 1941 1942 RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); 1943 return 0; 1944 } 1945 1946 static int 1947 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, 1948 struct rte_dpaa2_device *dpaa2_dev) 1949 { 1950 struct rte_eth_dev *eth_dev; 1951 int diag; 1952 1953 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1954 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); 1955 if (!eth_dev) 1956 return -ENODEV; 1957 eth_dev->data->dev_private = rte_zmalloc( 1958 "ethdev private structure", 1959 sizeof(struct dpaa2_dev_priv), 1960 RTE_CACHE_LINE_SIZE); 1961 if (eth_dev->data->dev_private == NULL) { 1962 PMD_INIT_LOG(CRIT, "Cannot allocate memzone for" 1963 " private port data\n"); 1964 rte_eth_dev_release_port(eth_dev); 1965 return -ENOMEM; 1966 } 1967 } else { 1968 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); 1969 if (!eth_dev) 1970 return -ENODEV; 1971 } 1972 1973 eth_dev->device = &dpaa2_dev->device; 1974 eth_dev->device->driver = &dpaa2_drv->driver; 1975 1976 dpaa2_dev->eth_dev = eth_dev; 1977 eth_dev->data->rx_mbuf_alloc_failed = 0; 1978 1979 /* Invoke PMD device initialization function */ 1980 diag = dpaa2_dev_init(eth_dev); 1981 if (diag == 0) 1982 return 0; 1983 1984 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1985 rte_free(eth_dev->data->dev_private); 1986 rte_eth_dev_release_port(eth_dev); 1987 return diag; 1988 } 1989 1990 static int 1991 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) 1992 { 1993 struct rte_eth_dev *eth_dev; 1994 1995 eth_dev = dpaa2_dev->eth_dev; 1996 dpaa2_dev_uninit(eth_dev); 1997 1998 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1999 rte_free(eth_dev->data->dev_private); 2000 rte_eth_dev_release_port(eth_dev); 2001 2002 return 0; 2003 } 2004 2005 static struct rte_dpaa2_driver rte_dpaa2_pmd = { 2006 .drv_type = DPAA2_ETH, 2007 .probe = rte_dpaa2_probe, 2008 .remove = rte_dpaa2_remove, 2009 }; 2010 2011 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); 2012