1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 5 * Copyright 2016 NXP. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <time.h> 35 #include <net/if.h> 36 37 #include <rte_mbuf.h> 38 #include <rte_ethdev.h> 39 #include <rte_malloc.h> 40 #include <rte_memcpy.h> 41 #include <rte_string_fns.h> 42 #include <rte_cycles.h> 43 #include <rte_kvargs.h> 44 #include <rte_dev.h> 45 #include <rte_fslmc.h> 46 47 #include <fslmc_logs.h> 48 #include <fslmc_vfio.h> 49 #include <dpaa2_hw_pvt.h> 50 #include <dpaa2_hw_mempool.h> 51 #include <dpaa2_hw_dpio.h> 52 #include <mc/fsl_dpmng.h> 53 #include "dpaa2_ethdev.h" 54 55 struct rte_dpaa2_xstats_name_off { 56 char name[RTE_ETH_XSTATS_NAME_SIZE]; 57 uint8_t page_id; /* dpni statistics page id */ 58 uint8_t stats_id; /* stats id in the given page */ 59 }; 60 61 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { 62 {"ingress_multicast_frames", 0, 2}, 63 {"ingress_multicast_bytes", 0, 3}, 64 {"ingress_broadcast_frames", 0, 4}, 65 {"ingress_broadcast_bytes", 0, 5}, 66 {"egress_multicast_frames", 1, 2}, 67 {"egress_multicast_bytes", 1, 3}, 68 {"egress_broadcast_frames", 1, 4}, 69 {"egress_broadcast_bytes", 1, 5}, 70 {"ingress_filtered_frames", 2, 0}, 71 {"ingress_discarded_frames", 2, 1}, 72 {"ingress_nobuffer_discards", 2, 2}, 73 {"egress_discarded_frames", 2, 3}, 74 {"egress_confirmed_frames", 2, 4}, 75 }; 76 77 static struct rte_dpaa2_driver rte_dpaa2_pmd; 78 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev); 79 static int dpaa2_dev_link_update(struct rte_eth_dev *dev, 80 int wait_to_complete); 81 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); 82 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); 83 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 84 85 /** 86 * Atomically reads the link status information from global 87 * structure rte_eth_dev. 88 * 89 * @param dev 90 * - Pointer to the structure rte_eth_dev to read from. 91 * - Pointer to the buffer to be saved with the link status. 92 * 93 * @return 94 * - On success, zero. 95 * - On failure, negative value. 96 */ 97 static inline int 98 dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev, 99 struct rte_eth_link *link) 100 { 101 struct rte_eth_link *dst = link; 102 struct rte_eth_link *src = &dev->data->dev_link; 103 104 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 105 *(uint64_t *)src) == 0) 106 return -1; 107 108 return 0; 109 } 110 111 /** 112 * Atomically writes the link status information into global 113 * structure rte_eth_dev. 114 * 115 * @param dev 116 * - Pointer to the structure rte_eth_dev to read from. 117 * - Pointer to the buffer to be saved with the link status. 118 * 119 * @return 120 * - On success, zero. 121 * - On failure, negative value. 122 */ 123 static inline int 124 dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev, 125 struct rte_eth_link *link) 126 { 127 struct rte_eth_link *dst = &dev->data->dev_link; 128 struct rte_eth_link *src = link; 129 130 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 131 *(uint64_t *)src) == 0) 132 return -1; 133 134 return 0; 135 } 136 137 static int 138 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 139 { 140 int ret; 141 struct dpaa2_dev_priv *priv = dev->data->dev_private; 142 struct fsl_mc_io *dpni = priv->hw; 143 144 PMD_INIT_FUNC_TRACE(); 145 146 if (dpni == NULL) { 147 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 148 return -1; 149 } 150 151 if (on) 152 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, 153 priv->token, vlan_id); 154 else 155 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, 156 priv->token, vlan_id); 157 158 if (ret < 0) 159 PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d", 160 ret, vlan_id, priv->hw_id); 161 162 return ret; 163 } 164 165 static int 166 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) 167 { 168 struct dpaa2_dev_priv *priv = dev->data->dev_private; 169 struct fsl_mc_io *dpni = priv->hw; 170 int ret; 171 172 PMD_INIT_FUNC_TRACE(); 173 174 if (mask & ETH_VLAN_FILTER_MASK) { 175 if (dev->data->dev_conf.rxmode.hw_vlan_filter) 176 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 177 priv->token, true); 178 else 179 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 180 priv->token, false); 181 if (ret < 0) 182 RTE_LOG(ERR, PMD, "Unable to set vlan filter = %d\n", 183 ret); 184 } 185 186 if (mask & ETH_VLAN_EXTEND_MASK) { 187 if (dev->data->dev_conf.rxmode.hw_vlan_extend) 188 RTE_LOG(INFO, PMD, 189 "VLAN extend offload not supported\n"); 190 } 191 192 return 0; 193 } 194 195 static int 196 dpaa2_fw_version_get(struct rte_eth_dev *dev, 197 char *fw_version, 198 size_t fw_size) 199 { 200 int ret; 201 struct dpaa2_dev_priv *priv = dev->data->dev_private; 202 struct fsl_mc_io *dpni = priv->hw; 203 struct mc_soc_version mc_plat_info = {0}; 204 struct mc_version mc_ver_info = {0}; 205 206 PMD_INIT_FUNC_TRACE(); 207 208 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 209 RTE_LOG(WARNING, PMD, "\tmc_get_soc_version failed\n"); 210 211 if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) 212 RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n"); 213 214 ret = snprintf(fw_version, fw_size, 215 "%x-%d.%d.%d", 216 mc_plat_info.svr, 217 mc_ver_info.major, 218 mc_ver_info.minor, 219 mc_ver_info.revision); 220 221 ret += 1; /* add the size of '\0' */ 222 if (fw_size < (uint32_t)ret) 223 return ret; 224 else 225 return 0; 226 } 227 228 static void 229 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 230 { 231 struct dpaa2_dev_priv *priv = dev->data->dev_private; 232 233 PMD_INIT_FUNC_TRACE(); 234 235 dev_info->if_index = priv->hw_id; 236 237 dev_info->max_mac_addrs = priv->max_mac_filters; 238 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; 239 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; 240 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; 241 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; 242 dev_info->rx_offload_capa = 243 DEV_RX_OFFLOAD_IPV4_CKSUM | 244 DEV_RX_OFFLOAD_UDP_CKSUM | 245 DEV_RX_OFFLOAD_TCP_CKSUM | 246 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; 247 dev_info->tx_offload_capa = 248 DEV_TX_OFFLOAD_IPV4_CKSUM | 249 DEV_TX_OFFLOAD_UDP_CKSUM | 250 DEV_TX_OFFLOAD_TCP_CKSUM | 251 DEV_TX_OFFLOAD_SCTP_CKSUM | 252 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 253 dev_info->speed_capa = ETH_LINK_SPEED_1G | 254 ETH_LINK_SPEED_2_5G | 255 ETH_LINK_SPEED_10G; 256 } 257 258 static int 259 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) 260 { 261 struct dpaa2_dev_priv *priv = dev->data->dev_private; 262 uint16_t dist_idx; 263 uint32_t vq_id; 264 struct dpaa2_queue *mc_q, *mcq; 265 uint32_t tot_queues; 266 int i; 267 struct dpaa2_queue *dpaa2_q; 268 269 PMD_INIT_FUNC_TRACE(); 270 271 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; 272 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, 273 RTE_CACHE_LINE_SIZE); 274 if (!mc_q) { 275 PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n"); 276 return -1; 277 } 278 279 for (i = 0; i < priv->nb_rx_queues; i++) { 280 mc_q->dev = dev; 281 priv->rx_vq[i] = mc_q++; 282 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 283 dpaa2_q->q_storage = rte_malloc("dq_storage", 284 sizeof(struct queue_storage_info_t), 285 RTE_CACHE_LINE_SIZE); 286 if (!dpaa2_q->q_storage) 287 goto fail; 288 289 memset(dpaa2_q->q_storage, 0, 290 sizeof(struct queue_storage_info_t)); 291 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) 292 goto fail; 293 } 294 295 for (i = 0; i < priv->nb_tx_queues; i++) { 296 mc_q->dev = dev; 297 mc_q->flow_id = 0xffff; 298 priv->tx_vq[i] = mc_q++; 299 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 300 dpaa2_q->cscn = rte_malloc(NULL, 301 sizeof(struct qbman_result), 16); 302 if (!dpaa2_q->cscn) 303 goto fail_tx; 304 } 305 306 vq_id = 0; 307 for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { 308 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; 309 mcq->tc_index = DPAA2_DEF_TC; 310 mcq->flow_id = dist_idx; 311 vq_id++; 312 } 313 314 return 0; 315 fail_tx: 316 i -= 1; 317 while (i >= 0) { 318 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 319 rte_free(dpaa2_q->cscn); 320 priv->tx_vq[i--] = NULL; 321 } 322 i = priv->nb_rx_queues; 323 fail: 324 i -= 1; 325 mc_q = priv->rx_vq[0]; 326 while (i >= 0) { 327 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 328 dpaa2_free_dq_storage(dpaa2_q->q_storage); 329 rte_free(dpaa2_q->q_storage); 330 priv->rx_vq[i--] = NULL; 331 } 332 rte_free(mc_q); 333 return -1; 334 } 335 336 static int 337 dpaa2_eth_dev_configure(struct rte_eth_dev *dev) 338 { 339 struct dpaa2_dev_priv *priv = dev->data->dev_private; 340 struct fsl_mc_io *dpni = priv->hw; 341 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 342 int rx_ip_csum_offload = false; 343 int ret; 344 345 PMD_INIT_FUNC_TRACE(); 346 347 if (eth_conf->rxmode.jumbo_frame == 1) { 348 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { 349 ret = dpaa2_dev_mtu_set(dev, 350 eth_conf->rxmode.max_rx_pkt_len); 351 if (ret) { 352 PMD_INIT_LOG(ERR, 353 "unable to set mtu. check config\n"); 354 return ret; 355 } 356 } else { 357 return -1; 358 } 359 } 360 361 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { 362 ret = dpaa2_setup_flow_dist(dev, 363 eth_conf->rx_adv_conf.rss_conf.rss_hf); 364 if (ret) { 365 PMD_INIT_LOG(ERR, "unable to set flow distribution." 366 "please check queue config\n"); 367 return ret; 368 } 369 } 370 371 if (eth_conf->rxmode.hw_ip_checksum) 372 rx_ip_csum_offload = true; 373 374 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 375 DPNI_OFF_RX_L3_CSUM, rx_ip_csum_offload); 376 if (ret) { 377 PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret); 378 return ret; 379 } 380 381 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 382 DPNI_OFF_RX_L4_CSUM, rx_ip_csum_offload); 383 if (ret) { 384 PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret); 385 return ret; 386 } 387 388 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 389 DPNI_OFF_TX_L3_CSUM, true); 390 if (ret) { 391 PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret); 392 return ret; 393 } 394 395 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 396 DPNI_OFF_TX_L4_CSUM, true); 397 if (ret) { 398 PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret); 399 return ret; 400 } 401 402 /* update the current status */ 403 dpaa2_dev_link_update(dev, 0); 404 405 return 0; 406 } 407 408 /* Function to setup RX flow information. It contains traffic class ID, 409 * flow ID, destination configuration etc. 410 */ 411 static int 412 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, 413 uint16_t rx_queue_id, 414 uint16_t nb_rx_desc __rte_unused, 415 unsigned int socket_id __rte_unused, 416 const struct rte_eth_rxconf *rx_conf __rte_unused, 417 struct rte_mempool *mb_pool) 418 { 419 struct dpaa2_dev_priv *priv = dev->data->dev_private; 420 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 421 struct mc_soc_version mc_plat_info = {0}; 422 struct dpaa2_queue *dpaa2_q; 423 struct dpni_queue cfg; 424 uint8_t options = 0; 425 uint8_t flow_id; 426 uint32_t bpid; 427 int ret; 428 429 PMD_INIT_FUNC_TRACE(); 430 431 PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p", 432 dev, rx_queue_id, mb_pool, rx_conf); 433 434 if (!priv->bp_list || priv->bp_list->mp != mb_pool) { 435 bpid = mempool_to_bpid(mb_pool); 436 ret = dpaa2_attach_bp_list(priv, 437 rte_dpaa2_bpid_info[bpid].bp_list); 438 if (ret) 439 return ret; 440 } 441 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 442 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ 443 444 /*Get the flow id from given VQ id*/ 445 flow_id = rx_queue_id % priv->nb_rx_queues; 446 memset(&cfg, 0, sizeof(struct dpni_queue)); 447 448 options = options | DPNI_QUEUE_OPT_USER_CTX; 449 cfg.user_context = (uint64_t)(dpaa2_q); 450 451 /*if ls2088 or rev2 device, enable the stashing */ 452 453 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 454 PMD_INIT_LOG(ERR, "\tmc_get_soc_version failed\n"); 455 456 if ((mc_plat_info.svr & 0xffff0000) != SVR_LS2080A) { 457 options |= DPNI_QUEUE_OPT_FLC; 458 cfg.flc.stash_control = true; 459 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0; 460 /* 00 00 00 - last 6 bit represent annotation, context stashing, 461 * data stashing setting 01 01 00 (0x14) to enable 462 * 1 line data, 1 line annotation 463 */ 464 cfg.flc.value |= 0x14; 465 } 466 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, 467 dpaa2_q->tc_index, flow_id, options, &cfg); 468 if (ret) { 469 PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret); 470 return -1; 471 } 472 473 if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { 474 struct dpni_taildrop taildrop; 475 476 taildrop.enable = 1; 477 /*enabling per rx queue congestion control */ 478 taildrop.threshold = CONG_THRESHOLD_RX_Q; 479 taildrop.units = DPNI_CONGESTION_UNIT_BYTES; 480 taildrop.oal = CONG_RX_OAL; 481 PMD_DRV_LOG(DEBUG, "Enabling Early Drop on queue = %d", 482 rx_queue_id); 483 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 484 DPNI_CP_QUEUE, DPNI_QUEUE_RX, 485 dpaa2_q->tc_index, flow_id, &taildrop); 486 if (ret) { 487 PMD_INIT_LOG(ERR, "Error in setting the rx flow" 488 " err : = %d\n", ret); 489 return -1; 490 } 491 } 492 493 dev->data->rx_queues[rx_queue_id] = dpaa2_q; 494 return 0; 495 } 496 497 static int 498 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, 499 uint16_t tx_queue_id, 500 uint16_t nb_tx_desc __rte_unused, 501 unsigned int socket_id __rte_unused, 502 const struct rte_eth_txconf *tx_conf __rte_unused) 503 { 504 struct dpaa2_dev_priv *priv = dev->data->dev_private; 505 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) 506 priv->tx_vq[tx_queue_id]; 507 struct fsl_mc_io *dpni = priv->hw; 508 struct dpni_queue tx_conf_cfg; 509 struct dpni_queue tx_flow_cfg; 510 uint8_t options = 0, flow_id; 511 uint32_t tc_id; 512 int ret; 513 514 PMD_INIT_FUNC_TRACE(); 515 516 /* Return if queue already configured */ 517 if (dpaa2_q->flow_id != 0xffff) 518 return 0; 519 520 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); 521 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); 522 523 tc_id = tx_queue_id; 524 flow_id = 0; 525 526 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, 527 tc_id, flow_id, options, &tx_flow_cfg); 528 if (ret) { 529 PMD_INIT_LOG(ERR, "Error in setting the tx flow: " 530 "tc_id=%d, flow =%d ErrorCode = %x\n", 531 tc_id, flow_id, -ret); 532 return -1; 533 } 534 535 dpaa2_q->flow_id = flow_id; 536 537 if (tx_queue_id == 0) { 538 /*Set tx-conf and error configuration*/ 539 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 540 priv->token, 541 DPNI_CONF_DISABLE); 542 if (ret) { 543 PMD_INIT_LOG(ERR, "Error in set tx conf mode settings" 544 " ErrorCode = %x", ret); 545 return -1; 546 } 547 } 548 dpaa2_q->tc_index = tc_id; 549 550 if (!(priv->flags & DPAA2_TX_CGR_OFF)) { 551 struct dpni_congestion_notification_cfg cong_notif_cfg; 552 553 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; 554 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; 555 /* Notify that the queue is not congested when the data in 556 * the queue is below this thershold. 557 */ 558 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; 559 cong_notif_cfg.message_ctx = 0; 560 cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn; 561 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; 562 cong_notif_cfg.notification_mode = 563 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | 564 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | 565 DPNI_CONG_OPT_COHERENT_WRITE; 566 567 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, 568 priv->token, 569 DPNI_QUEUE_TX, 570 tc_id, 571 &cong_notif_cfg); 572 if (ret) { 573 PMD_INIT_LOG(ERR, 574 "Error in setting tx congestion notification: = %d", 575 -ret); 576 return -ret; 577 } 578 } 579 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 580 return 0; 581 } 582 583 static void 584 dpaa2_dev_rx_queue_release(void *q __rte_unused) 585 { 586 PMD_INIT_FUNC_TRACE(); 587 } 588 589 static void 590 dpaa2_dev_tx_queue_release(void *q __rte_unused) 591 { 592 PMD_INIT_FUNC_TRACE(); 593 } 594 595 static const uint32_t * 596 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) 597 { 598 static const uint32_t ptypes[] = { 599 /*todo -= add more types */ 600 RTE_PTYPE_L2_ETHER, 601 RTE_PTYPE_L3_IPV4, 602 RTE_PTYPE_L3_IPV4_EXT, 603 RTE_PTYPE_L3_IPV6, 604 RTE_PTYPE_L3_IPV6_EXT, 605 RTE_PTYPE_L4_TCP, 606 RTE_PTYPE_L4_UDP, 607 RTE_PTYPE_L4_SCTP, 608 RTE_PTYPE_L4_ICMP, 609 RTE_PTYPE_UNKNOWN 610 }; 611 612 if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx) 613 return ptypes; 614 return NULL; 615 } 616 617 /** 618 * Dpaa2 link Interrupt handler 619 * 620 * @param param 621 * The address of parameter (struct rte_eth_dev *) regsitered before. 622 * 623 * @return 624 * void 625 */ 626 static void 627 dpaa2_interrupt_handler(void *param) 628 { 629 struct rte_eth_dev *dev = param; 630 struct dpaa2_dev_priv *priv = dev->data->dev_private; 631 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 632 int ret; 633 int irq_index = DPNI_IRQ_INDEX; 634 unsigned int status = 0, clear = 0; 635 636 PMD_INIT_FUNC_TRACE(); 637 638 if (dpni == NULL) { 639 RTE_LOG(ERR, PMD, "dpni is NULL"); 640 return; 641 } 642 643 ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, 644 irq_index, &status); 645 if (unlikely(ret)) { 646 RTE_LOG(ERR, PMD, "Can't get irq status (err %d)", ret); 647 clear = 0xffffffff; 648 goto out; 649 } 650 651 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { 652 clear = DPNI_IRQ_EVENT_LINK_CHANGED; 653 dpaa2_dev_link_update(dev, 0); 654 /* calling all the apps registered for link status event */ 655 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 656 NULL, NULL); 657 } 658 out: 659 ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, 660 irq_index, clear); 661 if (unlikely(ret)) 662 RTE_LOG(ERR, PMD, "Can't clear irq status (err %d)", ret); 663 } 664 665 static int 666 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) 667 { 668 int err = 0; 669 struct dpaa2_dev_priv *priv = dev->data->dev_private; 670 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 671 int irq_index = DPNI_IRQ_INDEX; 672 unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; 673 674 PMD_INIT_FUNC_TRACE(); 675 676 err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, 677 irq_index, mask); 678 if (err < 0) { 679 PMD_INIT_LOG(ERR, "Error: dpni_set_irq_mask():%d (%s)", err, 680 strerror(-err)); 681 return err; 682 } 683 684 err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, 685 irq_index, enable); 686 if (err < 0) 687 PMD_INIT_LOG(ERR, "Error: dpni_set_irq_enable():%d (%s)", err, 688 strerror(-err)); 689 690 return err; 691 } 692 693 static int 694 dpaa2_dev_start(struct rte_eth_dev *dev) 695 { 696 struct rte_device *rdev = dev->device; 697 struct rte_dpaa2_device *dpaa2_dev; 698 struct rte_eth_dev_data *data = dev->data; 699 struct dpaa2_dev_priv *priv = data->dev_private; 700 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 701 struct dpni_queue cfg; 702 struct dpni_error_cfg err_cfg; 703 uint16_t qdid; 704 struct dpni_queue_id qid; 705 struct dpaa2_queue *dpaa2_q; 706 int ret, i; 707 struct rte_intr_handle *intr_handle; 708 709 dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); 710 intr_handle = &dpaa2_dev->intr_handle; 711 712 PMD_INIT_FUNC_TRACE(); 713 714 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 715 if (ret) { 716 PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n", 717 ret, priv->hw_id); 718 return ret; 719 } 720 721 /* Power up the phy. Needed to make the link go UP */ 722 dpaa2_dev_set_link_up(dev); 723 724 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, 725 DPNI_QUEUE_TX, &qdid); 726 if (ret) { 727 PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret); 728 return ret; 729 } 730 priv->qdid = qdid; 731 732 for (i = 0; i < data->nb_rx_queues; i++) { 733 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; 734 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 735 DPNI_QUEUE_RX, dpaa2_q->tc_index, 736 dpaa2_q->flow_id, &cfg, &qid); 737 if (ret) { 738 PMD_INIT_LOG(ERR, "Error to get flow " 739 "information Error code = %d\n", ret); 740 return ret; 741 } 742 dpaa2_q->fqid = qid.fqid; 743 } 744 745 /*checksum errors, send them to normal path and set it in annotation */ 746 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; 747 748 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; 749 err_cfg.set_frame_annotation = true; 750 751 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, 752 priv->token, &err_cfg); 753 if (ret) { 754 PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:" 755 "code = %d\n", ret); 756 return ret; 757 } 758 /* VLAN Offload Settings */ 759 if (priv->max_vlan_filters) { 760 ret = dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); 761 if (ret) { 762 PMD_INIT_LOG(ERR, "Error to dpaa2_vlan_offload_set:" 763 "code = %d\n", ret); 764 return ret; 765 } 766 } 767 768 769 /* if the interrupts were configured on this devices*/ 770 if (intr_handle && (intr_handle->fd) && 771 (dev->data->dev_conf.intr_conf.lsc != 0)) { 772 /* Registering LSC interrupt handler */ 773 rte_intr_callback_register(intr_handle, 774 dpaa2_interrupt_handler, 775 (void *)dev); 776 777 /* enable vfio intr/eventfd mapping 778 * Interrupt index 0 is required, so we can not use 779 * rte_intr_enable. 780 */ 781 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX); 782 783 /* enable dpni_irqs */ 784 dpaa2_eth_setup_irqs(dev, 1); 785 } 786 787 return 0; 788 } 789 790 /** 791 * This routine disables all traffic on the adapter by issuing a 792 * global reset on the MAC. 793 */ 794 static void 795 dpaa2_dev_stop(struct rte_eth_dev *dev) 796 { 797 struct dpaa2_dev_priv *priv = dev->data->dev_private; 798 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 799 int ret; 800 struct rte_eth_link link; 801 struct rte_intr_handle *intr_handle = dev->intr_handle; 802 803 PMD_INIT_FUNC_TRACE(); 804 805 /* reset interrupt callback */ 806 if (intr_handle && (intr_handle->fd) && 807 (dev->data->dev_conf.intr_conf.lsc != 0)) { 808 /*disable dpni irqs */ 809 dpaa2_eth_setup_irqs(dev, 0); 810 811 /* disable vfio intr before callback unregister */ 812 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX); 813 814 /* Unregistering LSC interrupt handler */ 815 rte_intr_callback_unregister(intr_handle, 816 dpaa2_interrupt_handler, 817 (void *)dev); 818 } 819 820 dpaa2_dev_set_link_down(dev); 821 822 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); 823 if (ret) { 824 PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n", 825 ret, priv->hw_id); 826 return; 827 } 828 829 /* clear the recorded link status */ 830 memset(&link, 0, sizeof(link)); 831 dpaa2_dev_atomic_write_link_status(dev, &link); 832 } 833 834 static void 835 dpaa2_dev_close(struct rte_eth_dev *dev) 836 { 837 struct rte_eth_dev_data *data = dev->data; 838 struct dpaa2_dev_priv *priv = dev->data->dev_private; 839 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 840 int i, ret; 841 struct rte_eth_link link; 842 struct dpaa2_queue *dpaa2_q; 843 844 PMD_INIT_FUNC_TRACE(); 845 846 for (i = 0; i < data->nb_tx_queues; i++) { 847 dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i]; 848 if (!dpaa2_q->cscn) { 849 rte_free(dpaa2_q->cscn); 850 dpaa2_q->cscn = NULL; 851 } 852 } 853 854 /* Clean the device first */ 855 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); 856 if (ret) { 857 PMD_INIT_LOG(ERR, "Failure cleaning dpni device with" 858 " error code %d\n", ret); 859 return; 860 } 861 862 memset(&link, 0, sizeof(link)); 863 dpaa2_dev_atomic_write_link_status(dev, &link); 864 } 865 866 static void 867 dpaa2_dev_promiscuous_enable( 868 struct rte_eth_dev *dev) 869 { 870 int ret; 871 struct dpaa2_dev_priv *priv = dev->data->dev_private; 872 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 873 874 PMD_INIT_FUNC_TRACE(); 875 876 if (dpni == NULL) { 877 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 878 return; 879 } 880 881 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 882 if (ret < 0) 883 RTE_LOG(ERR, PMD, "Unable to enable U promisc mode %d\n", ret); 884 885 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 886 if (ret < 0) 887 RTE_LOG(ERR, PMD, "Unable to enable M promisc mode %d\n", ret); 888 } 889 890 static void 891 dpaa2_dev_promiscuous_disable( 892 struct rte_eth_dev *dev) 893 { 894 int ret; 895 struct dpaa2_dev_priv *priv = dev->data->dev_private; 896 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 897 898 PMD_INIT_FUNC_TRACE(); 899 900 if (dpni == NULL) { 901 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 902 return; 903 } 904 905 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 906 if (ret < 0) 907 RTE_LOG(ERR, PMD, "Unable to disable U promisc mode %d\n", ret); 908 909 if (dev->data->all_multicast == 0) { 910 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, 911 priv->token, false); 912 if (ret < 0) 913 RTE_LOG(ERR, PMD, 914 "Unable to disable M promisc mode %d\n", 915 ret); 916 } 917 } 918 919 static void 920 dpaa2_dev_allmulticast_enable( 921 struct rte_eth_dev *dev) 922 { 923 int ret; 924 struct dpaa2_dev_priv *priv = dev->data->dev_private; 925 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 926 927 PMD_INIT_FUNC_TRACE(); 928 929 if (dpni == NULL) { 930 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 931 return; 932 } 933 934 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 935 if (ret < 0) 936 RTE_LOG(ERR, PMD, "Unable to enable multicast mode %d\n", ret); 937 } 938 939 static void 940 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) 941 { 942 int ret; 943 struct dpaa2_dev_priv *priv = dev->data->dev_private; 944 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 945 946 PMD_INIT_FUNC_TRACE(); 947 948 if (dpni == NULL) { 949 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 950 return; 951 } 952 953 /* must remain on for all promiscuous */ 954 if (dev->data->promiscuous == 1) 955 return; 956 957 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 958 if (ret < 0) 959 RTE_LOG(ERR, PMD, "Unable to disable multicast mode %d\n", ret); 960 } 961 962 static int 963 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 964 { 965 int ret; 966 struct dpaa2_dev_priv *priv = dev->data->dev_private; 967 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 968 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 969 970 PMD_INIT_FUNC_TRACE(); 971 972 if (dpni == NULL) { 973 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 974 return -EINVAL; 975 } 976 977 /* check that mtu is within the allowed range */ 978 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) 979 return -EINVAL; 980 981 if (frame_size > ETHER_MAX_LEN) 982 dev->data->dev_conf.rxmode.jumbo_frame = 1; 983 else 984 dev->data->dev_conf.rxmode.jumbo_frame = 0; 985 986 /* Set the Max Rx frame length as 'mtu' + 987 * Maximum Ethernet header length 988 */ 989 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 990 mtu + ETH_VLAN_HLEN); 991 if (ret) { 992 PMD_DRV_LOG(ERR, "setting the max frame length failed"); 993 return -1; 994 } 995 PMD_DRV_LOG(INFO, "MTU is configured %d for the device", mtu); 996 return 0; 997 } 998 999 static int 1000 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, 1001 struct ether_addr *addr, 1002 __rte_unused uint32_t index, 1003 __rte_unused uint32_t pool) 1004 { 1005 int ret; 1006 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1007 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1008 1009 PMD_INIT_FUNC_TRACE(); 1010 1011 if (dpni == NULL) { 1012 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1013 return -1; 1014 } 1015 1016 ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, 1017 priv->token, addr->addr_bytes); 1018 if (ret) 1019 RTE_LOG(ERR, PMD, 1020 "error: Adding the MAC ADDR failed: err = %d\n", ret); 1021 return 0; 1022 } 1023 1024 static void 1025 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, 1026 uint32_t index) 1027 { 1028 int ret; 1029 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1030 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1031 struct rte_eth_dev_data *data = dev->data; 1032 struct ether_addr *macaddr; 1033 1034 PMD_INIT_FUNC_TRACE(); 1035 1036 macaddr = &data->mac_addrs[index]; 1037 1038 if (dpni == NULL) { 1039 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1040 return; 1041 } 1042 1043 ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, 1044 priv->token, macaddr->addr_bytes); 1045 if (ret) 1046 RTE_LOG(ERR, PMD, 1047 "error: Removing the MAC ADDR failed: err = %d\n", ret); 1048 } 1049 1050 static void 1051 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, 1052 struct ether_addr *addr) 1053 { 1054 int ret; 1055 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1056 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1057 1058 PMD_INIT_FUNC_TRACE(); 1059 1060 if (dpni == NULL) { 1061 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1062 return; 1063 } 1064 1065 ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, 1066 priv->token, addr->addr_bytes); 1067 1068 if (ret) 1069 RTE_LOG(ERR, PMD, 1070 "error: Setting the MAC ADDR failed %d\n", ret); 1071 } 1072 static 1073 int dpaa2_dev_stats_get(struct rte_eth_dev *dev, 1074 struct rte_eth_stats *stats) 1075 { 1076 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1077 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1078 int32_t retcode; 1079 uint8_t page0 = 0, page1 = 1, page2 = 2; 1080 union dpni_statistics value; 1081 1082 memset(&value, 0, sizeof(union dpni_statistics)); 1083 1084 PMD_INIT_FUNC_TRACE(); 1085 1086 if (!dpni) { 1087 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1088 return -EINVAL; 1089 } 1090 1091 if (!stats) { 1092 RTE_LOG(ERR, PMD, "stats is NULL\n"); 1093 return -EINVAL; 1094 } 1095 1096 /*Get Counters from page_0*/ 1097 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1098 page0, 0, &value); 1099 if (retcode) 1100 goto err; 1101 1102 stats->ipackets = value.page_0.ingress_all_frames; 1103 stats->ibytes = value.page_0.ingress_all_bytes; 1104 1105 /*Get Counters from page_1*/ 1106 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1107 page1, 0, &value); 1108 if (retcode) 1109 goto err; 1110 1111 stats->opackets = value.page_1.egress_all_frames; 1112 stats->obytes = value.page_1.egress_all_bytes; 1113 1114 /*Get Counters from page_2*/ 1115 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1116 page2, 0, &value); 1117 if (retcode) 1118 goto err; 1119 1120 /* Ingress drop frame count due to configured rules */ 1121 stats->ierrors = value.page_2.ingress_filtered_frames; 1122 /* Ingress drop frame count due to error */ 1123 stats->ierrors += value.page_2.ingress_discarded_frames; 1124 1125 stats->oerrors = value.page_2.egress_discarded_frames; 1126 stats->imissed = value.page_2.ingress_nobuffer_discards; 1127 1128 return 0; 1129 1130 err: 1131 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); 1132 return retcode; 1133 }; 1134 1135 static int 1136 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1137 unsigned int n) 1138 { 1139 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1140 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1141 int32_t retcode; 1142 union dpni_statistics value[3] = {}; 1143 unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); 1144 1145 if (xstats == NULL) 1146 return 0; 1147 1148 if (n < num) 1149 return num; 1150 1151 /* Get Counters from page_0*/ 1152 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1153 0, 0, &value[0]); 1154 if (retcode) 1155 goto err; 1156 1157 /* Get Counters from page_1*/ 1158 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1159 1, 0, &value[1]); 1160 if (retcode) 1161 goto err; 1162 1163 /* Get Counters from page_2*/ 1164 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1165 2, 0, &value[2]); 1166 if (retcode) 1167 goto err; 1168 1169 for (i = 0; i < num; i++) { 1170 xstats[i].id = i; 1171 xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. 1172 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1173 } 1174 return i; 1175 err: 1176 RTE_LOG(ERR, PMD, "Error in obtaining extended stats (%d)\n", retcode); 1177 return retcode; 1178 } 1179 1180 static int 1181 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1182 struct rte_eth_xstat_name *xstats_names, 1183 __rte_unused unsigned int limit) 1184 { 1185 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1186 1187 if (xstats_names != NULL) 1188 for (i = 0; i < stat_cnt; i++) 1189 snprintf(xstats_names[i].name, 1190 sizeof(xstats_names[i].name), 1191 "%s", 1192 dpaa2_xstats_strings[i].name); 1193 1194 return stat_cnt; 1195 } 1196 1197 static int 1198 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1199 uint64_t *values, unsigned int n) 1200 { 1201 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1202 uint64_t values_copy[stat_cnt]; 1203 1204 if (!ids) { 1205 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1206 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1207 int32_t retcode; 1208 union dpni_statistics value[3] = {}; 1209 1210 if (n < stat_cnt) 1211 return stat_cnt; 1212 1213 if (!values) 1214 return 0; 1215 1216 /* Get Counters from page_0*/ 1217 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1218 0, 0, &value[0]); 1219 if (retcode) 1220 return 0; 1221 1222 /* Get Counters from page_1*/ 1223 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1224 1, 0, &value[1]); 1225 if (retcode) 1226 return 0; 1227 1228 /* Get Counters from page_2*/ 1229 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1230 2, 0, &value[2]); 1231 if (retcode) 1232 return 0; 1233 1234 for (i = 0; i < stat_cnt; i++) { 1235 values[i] = value[dpaa2_xstats_strings[i].page_id]. 1236 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1237 } 1238 return stat_cnt; 1239 } 1240 1241 dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 1242 1243 for (i = 0; i < n; i++) { 1244 if (ids[i] >= stat_cnt) { 1245 PMD_INIT_LOG(ERR, "id value isn't valid"); 1246 return -1; 1247 } 1248 values[i] = values_copy[ids[i]]; 1249 } 1250 return n; 1251 } 1252 1253 static int 1254 dpaa2_xstats_get_names_by_id( 1255 struct rte_eth_dev *dev, 1256 struct rte_eth_xstat_name *xstats_names, 1257 const uint64_t *ids, 1258 unsigned int limit) 1259 { 1260 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1261 struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 1262 1263 if (!ids) 1264 return dpaa2_xstats_get_names(dev, xstats_names, limit); 1265 1266 dpaa2_xstats_get_names(dev, xstats_names_copy, limit); 1267 1268 for (i = 0; i < limit; i++) { 1269 if (ids[i] >= stat_cnt) { 1270 PMD_INIT_LOG(ERR, "id value isn't valid"); 1271 return -1; 1272 } 1273 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 1274 } 1275 return limit; 1276 } 1277 1278 static void 1279 dpaa2_dev_stats_reset(struct rte_eth_dev *dev) 1280 { 1281 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1282 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1283 int32_t retcode; 1284 1285 PMD_INIT_FUNC_TRACE(); 1286 1287 if (dpni == NULL) { 1288 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1289 return; 1290 } 1291 1292 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); 1293 if (retcode) 1294 goto error; 1295 1296 return; 1297 1298 error: 1299 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); 1300 return; 1301 }; 1302 1303 /* return 0 means link status changed, -1 means not changed */ 1304 static int 1305 dpaa2_dev_link_update(struct rte_eth_dev *dev, 1306 int wait_to_complete __rte_unused) 1307 { 1308 int ret; 1309 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1310 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1311 struct rte_eth_link link, old; 1312 struct dpni_link_state state = {0}; 1313 1314 if (dpni == NULL) { 1315 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1316 return 0; 1317 } 1318 memset(&old, 0, sizeof(old)); 1319 dpaa2_dev_atomic_read_link_status(dev, &old); 1320 1321 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1322 if (ret < 0) { 1323 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); 1324 return -1; 1325 } 1326 1327 if ((old.link_status == state.up) && (old.link_speed == state.rate)) { 1328 RTE_LOG(DEBUG, PMD, "No change in status\n"); 1329 return -1; 1330 } 1331 1332 memset(&link, 0, sizeof(struct rte_eth_link)); 1333 link.link_status = state.up; 1334 link.link_speed = state.rate; 1335 1336 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) 1337 link.link_duplex = ETH_LINK_HALF_DUPLEX; 1338 else 1339 link.link_duplex = ETH_LINK_FULL_DUPLEX; 1340 1341 dpaa2_dev_atomic_write_link_status(dev, &link); 1342 1343 if (link.link_status) 1344 PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id); 1345 else 1346 PMD_DRV_LOG(INFO, "Port %d Link is Down", dev->data->port_id); 1347 return 0; 1348 } 1349 1350 /** 1351 * Toggle the DPNI to enable, if not already enabled. 1352 * This is not strictly PHY up/down - it is more of logical toggling. 1353 */ 1354 static int 1355 dpaa2_dev_set_link_up(struct rte_eth_dev *dev) 1356 { 1357 int ret = -EINVAL; 1358 struct dpaa2_dev_priv *priv; 1359 struct fsl_mc_io *dpni; 1360 int en = 0; 1361 struct dpni_link_state state = {0}; 1362 1363 priv = dev->data->dev_private; 1364 dpni = (struct fsl_mc_io *)priv->hw; 1365 1366 if (dpni == NULL) { 1367 RTE_LOG(ERR, PMD, "DPNI is NULL\n"); 1368 return ret; 1369 } 1370 1371 /* Check if DPNI is currently enabled */ 1372 ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); 1373 if (ret) { 1374 /* Unable to obtain dpni status; Not continuing */ 1375 PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret); 1376 return -EINVAL; 1377 } 1378 1379 /* Enable link if not already enabled */ 1380 if (!en) { 1381 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 1382 if (ret) { 1383 PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret); 1384 return -EINVAL; 1385 } 1386 } 1387 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1388 if (ret < 0) { 1389 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); 1390 return -1; 1391 } 1392 1393 /* changing tx burst function to start enqueues */ 1394 dev->tx_pkt_burst = dpaa2_dev_tx; 1395 dev->data->dev_link.link_status = state.up; 1396 1397 if (state.up) 1398 PMD_DRV_LOG(INFO, "Port %d Link is set as UP", 1399 dev->data->port_id); 1400 else 1401 PMD_DRV_LOG(INFO, "Port %d Link is DOWN", dev->data->port_id); 1402 return ret; 1403 } 1404 1405 /** 1406 * Toggle the DPNI to disable, if not already disabled. 1407 * This is not strictly PHY up/down - it is more of logical toggling. 1408 */ 1409 static int 1410 dpaa2_dev_set_link_down(struct rte_eth_dev *dev) 1411 { 1412 int ret = -EINVAL; 1413 struct dpaa2_dev_priv *priv; 1414 struct fsl_mc_io *dpni; 1415 int dpni_enabled = 0; 1416 int retries = 10; 1417 1418 PMD_INIT_FUNC_TRACE(); 1419 1420 priv = dev->data->dev_private; 1421 dpni = (struct fsl_mc_io *)priv->hw; 1422 1423 if (dpni == NULL) { 1424 RTE_LOG(ERR, PMD, "Device has not yet been configured\n"); 1425 return ret; 1426 } 1427 1428 /*changing tx burst function to avoid any more enqueues */ 1429 dev->tx_pkt_burst = dummy_dev_tx; 1430 1431 /* Loop while dpni_disable() attempts to drain the egress FQs 1432 * and confirm them back to us. 1433 */ 1434 do { 1435 ret = dpni_disable(dpni, 0, priv->token); 1436 if (ret) { 1437 PMD_DRV_LOG(ERR, "dpni disable failed (%d)", ret); 1438 return ret; 1439 } 1440 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); 1441 if (ret) { 1442 PMD_DRV_LOG(ERR, "dpni_is_enabled failed (%d)", ret); 1443 return ret; 1444 } 1445 if (dpni_enabled) 1446 /* Allow the MC some slack */ 1447 rte_delay_us(100 * 1000); 1448 } while (dpni_enabled && --retries); 1449 1450 if (!retries) { 1451 PMD_DRV_LOG(WARNING, "Retry count exceeded disabling DPNI\n"); 1452 /* todo- we may have to manually cleanup queues. 1453 */ 1454 } else { 1455 PMD_DRV_LOG(INFO, "Port %d Link DOWN successful", 1456 dev->data->port_id); 1457 } 1458 1459 dev->data->dev_link.link_status = 0; 1460 1461 return ret; 1462 } 1463 1464 static int 1465 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1466 { 1467 int ret = -EINVAL; 1468 struct dpaa2_dev_priv *priv; 1469 struct fsl_mc_io *dpni; 1470 struct dpni_link_state state = {0}; 1471 1472 PMD_INIT_FUNC_TRACE(); 1473 1474 priv = dev->data->dev_private; 1475 dpni = (struct fsl_mc_io *)priv->hw; 1476 1477 if (dpni == NULL || fc_conf == NULL) { 1478 RTE_LOG(ERR, PMD, "device not configured\n"); 1479 return ret; 1480 } 1481 1482 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1483 if (ret) { 1484 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); 1485 return ret; 1486 } 1487 1488 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); 1489 if (state.options & DPNI_LINK_OPT_PAUSE) { 1490 /* DPNI_LINK_OPT_PAUSE set 1491 * if ASYM_PAUSE not set, 1492 * RX Side flow control (handle received Pause frame) 1493 * TX side flow control (send Pause frame) 1494 * if ASYM_PAUSE set, 1495 * RX Side flow control (handle received Pause frame) 1496 * No TX side flow control (send Pause frame disabled) 1497 */ 1498 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) 1499 fc_conf->mode = RTE_FC_FULL; 1500 else 1501 fc_conf->mode = RTE_FC_RX_PAUSE; 1502 } else { 1503 /* DPNI_LINK_OPT_PAUSE not set 1504 * if ASYM_PAUSE set, 1505 * TX side flow control (send Pause frame) 1506 * No RX side flow control (No action on pause frame rx) 1507 * if ASYM_PAUSE not set, 1508 * Flow control disabled 1509 */ 1510 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) 1511 fc_conf->mode = RTE_FC_TX_PAUSE; 1512 else 1513 fc_conf->mode = RTE_FC_NONE; 1514 } 1515 1516 return ret; 1517 } 1518 1519 static int 1520 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1521 { 1522 int ret = -EINVAL; 1523 struct dpaa2_dev_priv *priv; 1524 struct fsl_mc_io *dpni; 1525 struct dpni_link_state state = {0}; 1526 struct dpni_link_cfg cfg = {0}; 1527 1528 PMD_INIT_FUNC_TRACE(); 1529 1530 priv = dev->data->dev_private; 1531 dpni = (struct fsl_mc_io *)priv->hw; 1532 1533 if (dpni == NULL) { 1534 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1535 return ret; 1536 } 1537 1538 /* It is necessary to obtain the current state before setting fc_conf 1539 * as MC would return error in case rate, autoneg or duplex values are 1540 * different. 1541 */ 1542 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1543 if (ret) { 1544 RTE_LOG(ERR, PMD, "Unable to get link state (err=%d)\n", ret); 1545 return -1; 1546 } 1547 1548 /* Disable link before setting configuration */ 1549 dpaa2_dev_set_link_down(dev); 1550 1551 /* Based on fc_conf, update cfg */ 1552 cfg.rate = state.rate; 1553 cfg.options = state.options; 1554 1555 /* update cfg with fc_conf */ 1556 switch (fc_conf->mode) { 1557 case RTE_FC_FULL: 1558 /* Full flow control; 1559 * OPT_PAUSE set, ASYM_PAUSE not set 1560 */ 1561 cfg.options |= DPNI_LINK_OPT_PAUSE; 1562 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1563 break; 1564 case RTE_FC_TX_PAUSE: 1565 /* Enable RX flow control 1566 * OPT_PAUSE not set; 1567 * ASYM_PAUSE set; 1568 */ 1569 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1570 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1571 break; 1572 case RTE_FC_RX_PAUSE: 1573 /* Enable TX Flow control 1574 * OPT_PAUSE set 1575 * ASYM_PAUSE set 1576 */ 1577 cfg.options |= DPNI_LINK_OPT_PAUSE; 1578 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1579 break; 1580 case RTE_FC_NONE: 1581 /* Disable Flow control 1582 * OPT_PAUSE not set 1583 * ASYM_PAUSE not set 1584 */ 1585 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1586 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1587 break; 1588 default: 1589 RTE_LOG(ERR, PMD, "Incorrect Flow control flag (%d)\n", 1590 fc_conf->mode); 1591 return -1; 1592 } 1593 1594 ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); 1595 if (ret) 1596 RTE_LOG(ERR, PMD, 1597 "Unable to set Link configuration (err=%d)\n", 1598 ret); 1599 1600 /* Enable link */ 1601 dpaa2_dev_set_link_up(dev); 1602 1603 return ret; 1604 } 1605 1606 static int 1607 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, 1608 struct rte_eth_rss_conf *rss_conf) 1609 { 1610 struct rte_eth_dev_data *data = dev->data; 1611 struct rte_eth_conf *eth_conf = &data->dev_conf; 1612 int ret; 1613 1614 PMD_INIT_FUNC_TRACE(); 1615 1616 if (rss_conf->rss_hf) { 1617 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf); 1618 if (ret) { 1619 PMD_INIT_LOG(ERR, "unable to set flow dist"); 1620 return ret; 1621 } 1622 } else { 1623 ret = dpaa2_remove_flow_dist(dev, 0); 1624 if (ret) { 1625 PMD_INIT_LOG(ERR, "unable to remove flow dist"); 1626 return ret; 1627 } 1628 } 1629 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; 1630 return 0; 1631 } 1632 1633 static int 1634 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1635 struct rte_eth_rss_conf *rss_conf) 1636 { 1637 struct rte_eth_dev_data *data = dev->data; 1638 struct rte_eth_conf *eth_conf = &data->dev_conf; 1639 1640 /* dpaa2 does not support rss_key, so length should be 0*/ 1641 rss_conf->rss_key_len = 0; 1642 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; 1643 return 0; 1644 } 1645 1646 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, 1647 int eth_rx_queue_id, 1648 uint16_t dpcon_id, 1649 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 1650 { 1651 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1652 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1653 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1654 uint8_t flow_id = dpaa2_ethq->flow_id; 1655 struct dpni_queue cfg; 1656 uint8_t options; 1657 int ret; 1658 1659 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 1660 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; 1661 else 1662 return -EINVAL; 1663 1664 memset(&cfg, 0, sizeof(struct dpni_queue)); 1665 options = DPNI_QUEUE_OPT_DEST; 1666 cfg.destination.type = DPNI_DEST_DPCON; 1667 cfg.destination.id = dpcon_id; 1668 cfg.destination.priority = queue_conf->ev.priority; 1669 1670 options |= DPNI_QUEUE_OPT_USER_CTX; 1671 cfg.user_context = (uint64_t)(dpaa2_ethq); 1672 1673 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1674 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1675 if (ret) { 1676 RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret); 1677 return ret; 1678 } 1679 1680 memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event)); 1681 1682 return 0; 1683 } 1684 1685 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, 1686 int eth_rx_queue_id) 1687 { 1688 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1689 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1690 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1691 uint8_t flow_id = dpaa2_ethq->flow_id; 1692 struct dpni_queue cfg; 1693 uint8_t options; 1694 int ret; 1695 1696 memset(&cfg, 0, sizeof(struct dpni_queue)); 1697 options = DPNI_QUEUE_OPT_DEST; 1698 cfg.destination.type = DPNI_DEST_NONE; 1699 1700 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1701 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1702 if (ret) 1703 RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret); 1704 1705 return ret; 1706 } 1707 1708 static struct eth_dev_ops dpaa2_ethdev_ops = { 1709 .dev_configure = dpaa2_eth_dev_configure, 1710 .dev_start = dpaa2_dev_start, 1711 .dev_stop = dpaa2_dev_stop, 1712 .dev_close = dpaa2_dev_close, 1713 .promiscuous_enable = dpaa2_dev_promiscuous_enable, 1714 .promiscuous_disable = dpaa2_dev_promiscuous_disable, 1715 .allmulticast_enable = dpaa2_dev_allmulticast_enable, 1716 .allmulticast_disable = dpaa2_dev_allmulticast_disable, 1717 .dev_set_link_up = dpaa2_dev_set_link_up, 1718 .dev_set_link_down = dpaa2_dev_set_link_down, 1719 .link_update = dpaa2_dev_link_update, 1720 .stats_get = dpaa2_dev_stats_get, 1721 .xstats_get = dpaa2_dev_xstats_get, 1722 .xstats_get_by_id = dpaa2_xstats_get_by_id, 1723 .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id, 1724 .xstats_get_names = dpaa2_xstats_get_names, 1725 .stats_reset = dpaa2_dev_stats_reset, 1726 .xstats_reset = dpaa2_dev_stats_reset, 1727 .fw_version_get = dpaa2_fw_version_get, 1728 .dev_infos_get = dpaa2_dev_info_get, 1729 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, 1730 .mtu_set = dpaa2_dev_mtu_set, 1731 .vlan_filter_set = dpaa2_vlan_filter_set, 1732 .vlan_offload_set = dpaa2_vlan_offload_set, 1733 .rx_queue_setup = dpaa2_dev_rx_queue_setup, 1734 .rx_queue_release = dpaa2_dev_rx_queue_release, 1735 .tx_queue_setup = dpaa2_dev_tx_queue_setup, 1736 .tx_queue_release = dpaa2_dev_tx_queue_release, 1737 .flow_ctrl_get = dpaa2_flow_ctrl_get, 1738 .flow_ctrl_set = dpaa2_flow_ctrl_set, 1739 .mac_addr_add = dpaa2_dev_add_mac_addr, 1740 .mac_addr_remove = dpaa2_dev_remove_mac_addr, 1741 .mac_addr_set = dpaa2_dev_set_mac_addr, 1742 .rss_hash_update = dpaa2_dev_rss_hash_update, 1743 .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, 1744 }; 1745 1746 static int 1747 dpaa2_dev_init(struct rte_eth_dev *eth_dev) 1748 { 1749 struct rte_device *dev = eth_dev->device; 1750 struct rte_dpaa2_device *dpaa2_dev; 1751 struct fsl_mc_io *dpni_dev; 1752 struct dpni_attr attr; 1753 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1754 struct dpni_buffer_layout layout; 1755 int ret, hw_id; 1756 1757 PMD_INIT_FUNC_TRACE(); 1758 1759 /* For secondary processes, the primary has done all the work */ 1760 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1761 return 0; 1762 1763 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 1764 1765 hw_id = dpaa2_dev->object_id; 1766 1767 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); 1768 if (!dpni_dev) { 1769 PMD_INIT_LOG(ERR, "malloc failed for dpni device\n"); 1770 return -1; 1771 } 1772 1773 dpni_dev->regs = rte_mcp_ptr_list[0]; 1774 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); 1775 if (ret) { 1776 PMD_INIT_LOG(ERR, 1777 "Failure in opening dpni@%d with err code %d\n", 1778 hw_id, ret); 1779 rte_free(dpni_dev); 1780 return -1; 1781 } 1782 1783 /* Clean the device first */ 1784 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); 1785 if (ret) { 1786 PMD_INIT_LOG(ERR, 1787 "Failure cleaning dpni@%d with err code %d\n", 1788 hw_id, ret); 1789 goto init_err; 1790 } 1791 1792 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); 1793 if (ret) { 1794 PMD_INIT_LOG(ERR, 1795 "Failure in get dpni@%d attribute, err code %d\n", 1796 hw_id, ret); 1797 goto init_err; 1798 } 1799 1800 priv->num_rx_tc = attr.num_rx_tcs; 1801 1802 /* Resetting the "num_rx_queues" to equal number of queues in first TC 1803 * as only one TC is supported on Rx Side. Once Multiple TCs will be 1804 * in use for Rx processing then this will be changed or removed. 1805 */ 1806 priv->nb_rx_queues = attr.num_queues; 1807 1808 /* Using number of TX queues as number of TX TCs */ 1809 priv->nb_tx_queues = attr.num_tx_tcs; 1810 1811 PMD_DRV_LOG(DEBUG, "RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", 1812 priv->num_rx_tc, priv->nb_rx_queues, priv->nb_tx_queues); 1813 1814 priv->hw = dpni_dev; 1815 priv->hw_id = hw_id; 1816 priv->options = attr.options; 1817 priv->max_mac_filters = attr.mac_filter_entries; 1818 priv->max_vlan_filters = attr.vlan_filter_entries; 1819 priv->flags = 0; 1820 1821 /* Allocate memory for hardware structure for queues */ 1822 ret = dpaa2_alloc_rx_tx_queues(eth_dev); 1823 if (ret) { 1824 PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n"); 1825 goto init_err; 1826 } 1827 1828 /* Allocate memory for storing MAC addresses */ 1829 eth_dev->data->mac_addrs = rte_zmalloc("dpni", 1830 ETHER_ADDR_LEN * attr.mac_filter_entries, 0); 1831 if (eth_dev->data->mac_addrs == NULL) { 1832 PMD_INIT_LOG(ERR, 1833 "Failed to allocate %d bytes needed to store MAC addresses", 1834 ETHER_ADDR_LEN * attr.mac_filter_entries); 1835 ret = -ENOMEM; 1836 goto init_err; 1837 } 1838 1839 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 1840 priv->token, 1841 (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes)); 1842 if (ret) { 1843 PMD_INIT_LOG(ERR, "DPNI get mac address failed:Err Code = %d\n", 1844 ret); 1845 goto init_err; 1846 } 1847 1848 /* ... tx buffer layout ... */ 1849 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1850 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1851 layout.pass_frame_status = 1; 1852 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1853 DPNI_QUEUE_TX, &layout); 1854 if (ret) { 1855 PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer layout", 1856 ret); 1857 goto init_err; 1858 } 1859 1860 /* ... tx-conf and error buffer layout ... */ 1861 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1862 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1863 layout.pass_frame_status = 1; 1864 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1865 DPNI_QUEUE_TX_CONFIRM, &layout); 1866 if (ret) { 1867 PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer layout", 1868 ret); 1869 goto init_err; 1870 } 1871 1872 eth_dev->dev_ops = &dpaa2_ethdev_ops; 1873 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 1874 1875 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 1876 eth_dev->tx_pkt_burst = dpaa2_dev_tx; 1877 rte_fslmc_vfio_dmamap(); 1878 1879 RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); 1880 return 0; 1881 init_err: 1882 dpaa2_dev_uninit(eth_dev); 1883 return ret; 1884 } 1885 1886 static int 1887 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) 1888 { 1889 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1890 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1891 int i, ret; 1892 struct dpaa2_queue *dpaa2_q; 1893 1894 PMD_INIT_FUNC_TRACE(); 1895 1896 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1897 return 0; 1898 1899 if (!dpni) { 1900 PMD_INIT_LOG(WARNING, "Already closed or not started"); 1901 return -1; 1902 } 1903 1904 dpaa2_dev_close(eth_dev); 1905 1906 if (priv->rx_vq[0]) { 1907 /* cleaning up queue storage */ 1908 for (i = 0; i < priv->nb_rx_queues; i++) { 1909 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 1910 if (dpaa2_q->q_storage) 1911 rte_free(dpaa2_q->q_storage); 1912 } 1913 /*free the all queue memory */ 1914 rte_free(priv->rx_vq[0]); 1915 priv->rx_vq[0] = NULL; 1916 } 1917 1918 /* free memory for storing MAC addresses */ 1919 if (eth_dev->data->mac_addrs) { 1920 rte_free(eth_dev->data->mac_addrs); 1921 eth_dev->data->mac_addrs = NULL; 1922 } 1923 1924 /* Close the device at underlying layer*/ 1925 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); 1926 if (ret) { 1927 PMD_INIT_LOG(ERR, 1928 "Failure closing dpni device with err code %d\n", 1929 ret); 1930 } 1931 1932 /* Free the allocated memory for ethernet private data and dpni*/ 1933 priv->hw = NULL; 1934 rte_free(dpni); 1935 1936 eth_dev->dev_ops = NULL; 1937 eth_dev->rx_pkt_burst = NULL; 1938 eth_dev->tx_pkt_burst = NULL; 1939 1940 RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); 1941 return 0; 1942 } 1943 1944 static int 1945 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, 1946 struct rte_dpaa2_device *dpaa2_dev) 1947 { 1948 struct rte_eth_dev *eth_dev; 1949 int diag; 1950 1951 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1952 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); 1953 if (!eth_dev) 1954 return -ENODEV; 1955 eth_dev->data->dev_private = rte_zmalloc( 1956 "ethdev private structure", 1957 sizeof(struct dpaa2_dev_priv), 1958 RTE_CACHE_LINE_SIZE); 1959 if (eth_dev->data->dev_private == NULL) { 1960 PMD_INIT_LOG(CRIT, "Cannot allocate memzone for" 1961 " private port data\n"); 1962 rte_eth_dev_release_port(eth_dev); 1963 return -ENOMEM; 1964 } 1965 } else { 1966 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); 1967 if (!eth_dev) 1968 return -ENODEV; 1969 } 1970 1971 eth_dev->device = &dpaa2_dev->device; 1972 eth_dev->device->driver = &dpaa2_drv->driver; 1973 1974 dpaa2_dev->eth_dev = eth_dev; 1975 eth_dev->data->rx_mbuf_alloc_failed = 0; 1976 1977 /* Invoke PMD device initialization function */ 1978 diag = dpaa2_dev_init(eth_dev); 1979 if (diag == 0) 1980 return 0; 1981 1982 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1983 rte_free(eth_dev->data->dev_private); 1984 rte_eth_dev_release_port(eth_dev); 1985 return diag; 1986 } 1987 1988 static int 1989 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) 1990 { 1991 struct rte_eth_dev *eth_dev; 1992 1993 eth_dev = dpaa2_dev->eth_dev; 1994 dpaa2_dev_uninit(eth_dev); 1995 1996 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1997 rte_free(eth_dev->data->dev_private); 1998 rte_eth_dev_release_port(eth_dev); 1999 2000 return 0; 2001 } 2002 2003 static struct rte_dpaa2_driver rte_dpaa2_pmd = { 2004 .drv_type = DPAA2_ETH, 2005 .probe = rte_dpaa2_probe, 2006 .remove = rte_dpaa2_remove, 2007 }; 2008 2009 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); 2010