1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 5 * Copyright (c) 2016 NXP. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <time.h> 35 #include <net/if.h> 36 37 #include <rte_mbuf.h> 38 #include <rte_ethdev.h> 39 #include <rte_malloc.h> 40 #include <rte_memcpy.h> 41 #include <rte_string_fns.h> 42 #include <rte_cycles.h> 43 #include <rte_kvargs.h> 44 #include <rte_dev.h> 45 #include <rte_ethdev.h> 46 #include <rte_fslmc.h> 47 48 #include <fslmc_logs.h> 49 #include <fslmc_vfio.h> 50 #include <dpaa2_hw_pvt.h> 51 #include <dpaa2_hw_mempool.h> 52 #include <dpaa2_hw_dpio.h> 53 54 #include "dpaa2_ethdev.h" 55 56 static struct rte_dpaa2_driver rte_dpaa2_pmd; 57 58 /** 59 * Atomically reads the link status information from global 60 * structure rte_eth_dev. 61 * 62 * @param dev 63 * - Pointer to the structure rte_eth_dev to read from. 64 * - Pointer to the buffer to be saved with the link status. 65 * 66 * @return 67 * - On success, zero. 68 * - On failure, negative value. 69 */ 70 static inline int 71 dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev, 72 struct rte_eth_link *link) 73 { 74 struct rte_eth_link *dst = link; 75 struct rte_eth_link *src = &dev->data->dev_link; 76 77 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 78 *(uint64_t *)src) == 0) 79 return -1; 80 81 return 0; 82 } 83 84 /** 85 * Atomically writes the link status information into global 86 * structure rte_eth_dev. 87 * 88 * @param dev 89 * - Pointer to the structure rte_eth_dev to read from. 90 * - Pointer to the buffer to be saved with the link status. 91 * 92 * @return 93 * - On success, zero. 94 * - On failure, negative value. 95 */ 96 static inline int 97 dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev, 98 struct rte_eth_link *link) 99 { 100 struct rte_eth_link *dst = &dev->data->dev_link; 101 struct rte_eth_link *src = link; 102 103 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 104 *(uint64_t *)src) == 0) 105 return -1; 106 107 return 0; 108 } 109 110 static void 111 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 112 { 113 struct dpaa2_dev_priv *priv = dev->data->dev_private; 114 115 PMD_INIT_FUNC_TRACE(); 116 117 dev_info->if_index = priv->hw_id; 118 119 dev_info->max_mac_addrs = priv->max_mac_filters; 120 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; 121 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; 122 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; 123 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; 124 dev_info->rx_offload_capa = 125 DEV_RX_OFFLOAD_IPV4_CKSUM | 126 DEV_RX_OFFLOAD_UDP_CKSUM | 127 DEV_RX_OFFLOAD_TCP_CKSUM | 128 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; 129 dev_info->tx_offload_capa = 130 DEV_TX_OFFLOAD_IPV4_CKSUM | 131 DEV_TX_OFFLOAD_UDP_CKSUM | 132 DEV_TX_OFFLOAD_TCP_CKSUM | 133 DEV_TX_OFFLOAD_SCTP_CKSUM | 134 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 135 dev_info->speed_capa = ETH_LINK_SPEED_1G | 136 ETH_LINK_SPEED_2_5G | 137 ETH_LINK_SPEED_10G; 138 } 139 140 static int 141 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) 142 { 143 struct dpaa2_dev_priv *priv = dev->data->dev_private; 144 uint16_t dist_idx; 145 uint32_t vq_id; 146 struct dpaa2_queue *mc_q, *mcq; 147 uint32_t tot_queues; 148 int i; 149 struct dpaa2_queue *dpaa2_q; 150 151 PMD_INIT_FUNC_TRACE(); 152 153 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; 154 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, 155 RTE_CACHE_LINE_SIZE); 156 if (!mc_q) { 157 PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n"); 158 return -1; 159 } 160 161 for (i = 0; i < priv->nb_rx_queues; i++) { 162 mc_q->dev = dev; 163 priv->rx_vq[i] = mc_q++; 164 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 165 dpaa2_q->q_storage = rte_malloc("dq_storage", 166 sizeof(struct queue_storage_info_t), 167 RTE_CACHE_LINE_SIZE); 168 if (!dpaa2_q->q_storage) 169 goto fail; 170 171 memset(dpaa2_q->q_storage, 0, 172 sizeof(struct queue_storage_info_t)); 173 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) 174 goto fail; 175 } 176 177 for (i = 0; i < priv->nb_tx_queues; i++) { 178 mc_q->dev = dev; 179 mc_q->flow_id = DPNI_NEW_FLOW_ID; 180 priv->tx_vq[i] = mc_q++; 181 } 182 183 vq_id = 0; 184 for (dist_idx = 0; dist_idx < priv->num_dist_per_tc[DPAA2_DEF_TC]; 185 dist_idx++) { 186 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; 187 mcq->tc_index = DPAA2_DEF_TC; 188 mcq->flow_id = dist_idx; 189 vq_id++; 190 } 191 192 return 0; 193 fail: 194 i -= 1; 195 mc_q = priv->rx_vq[0]; 196 while (i >= 0) { 197 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 198 dpaa2_free_dq_storage(dpaa2_q->q_storage); 199 rte_free(dpaa2_q->q_storage); 200 priv->rx_vq[i--] = NULL; 201 } 202 rte_free(mc_q); 203 return -1; 204 } 205 206 static int 207 dpaa2_eth_dev_configure(struct rte_eth_dev *dev) 208 { 209 struct rte_eth_dev_data *data = dev->data; 210 struct rte_eth_conf *eth_conf = &data->dev_conf; 211 int ret; 212 213 PMD_INIT_FUNC_TRACE(); 214 215 /* Check for correct configuration */ 216 if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS && 217 data->nb_rx_queues > 1) { 218 PMD_INIT_LOG(ERR, "Distribution is not enabled, " 219 "but Rx queues more than 1\n"); 220 return -1; 221 } 222 223 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { 224 /* Return in case number of Rx queues is 1 */ 225 if (data->nb_rx_queues == 1) 226 return 0; 227 ret = dpaa2_setup_flow_dist(dev, 228 eth_conf->rx_adv_conf.rss_conf.rss_hf); 229 if (ret) { 230 PMD_INIT_LOG(ERR, "unable to set flow distribution." 231 "please check queue config\n"); 232 return ret; 233 } 234 } 235 return 0; 236 } 237 238 /* Function to setup RX flow information. It contains traffic class ID, 239 * flow ID, destination configuration etc. 240 */ 241 static int 242 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, 243 uint16_t rx_queue_id, 244 uint16_t nb_rx_desc __rte_unused, 245 unsigned int socket_id __rte_unused, 246 const struct rte_eth_rxconf *rx_conf __rte_unused, 247 struct rte_mempool *mb_pool) 248 { 249 struct dpaa2_dev_priv *priv = dev->data->dev_private; 250 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 251 struct dpaa2_queue *dpaa2_q; 252 struct dpni_queue cfg; 253 uint8_t options = 0; 254 uint8_t flow_id; 255 uint32_t bpid; 256 int ret; 257 258 PMD_INIT_FUNC_TRACE(); 259 260 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p", 261 dev, rx_queue_id, mb_pool, rx_conf); 262 263 if (!priv->bp_list || priv->bp_list->mp != mb_pool) { 264 bpid = mempool_to_bpid(mb_pool); 265 ret = dpaa2_attach_bp_list(priv, 266 rte_dpaa2_bpid_info[bpid].bp_list); 267 if (ret) 268 return ret; 269 } 270 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 271 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ 272 273 /*Get the tc id and flow id from given VQ id*/ 274 flow_id = rx_queue_id % priv->num_dist_per_tc[dpaa2_q->tc_index]; 275 memset(&cfg, 0, sizeof(struct dpni_queue)); 276 277 options = options | DPNI_QUEUE_OPT_USER_CTX; 278 cfg.user_context = (uint64_t)(dpaa2_q); 279 280 /*if ls2088 or rev2 device, enable the stashing */ 281 if ((qbman_get_version() & 0xFFFF0000) > QMAN_REV_4000) { 282 options |= DPNI_QUEUE_OPT_FLC; 283 cfg.flc.stash_control = true; 284 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0; 285 /* 00 00 00 - last 6 bit represent annotation, context stashing, 286 * data stashing setting 01 01 00 (0x14) to enable 287 * 1 line annotation, 1 line context 288 */ 289 cfg.flc.value |= 0x14; 290 } 291 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, 292 dpaa2_q->tc_index, flow_id, options, &cfg); 293 if (ret) { 294 PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret); 295 return -1; 296 } 297 298 dev->data->rx_queues[rx_queue_id] = dpaa2_q; 299 return 0; 300 } 301 302 static int 303 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, 304 uint16_t tx_queue_id, 305 uint16_t nb_tx_desc __rte_unused, 306 unsigned int socket_id __rte_unused, 307 const struct rte_eth_txconf *tx_conf __rte_unused) 308 { 309 struct dpaa2_dev_priv *priv = dev->data->dev_private; 310 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) 311 priv->tx_vq[tx_queue_id]; 312 struct fsl_mc_io *dpni = priv->hw; 313 struct dpni_queue tx_conf_cfg; 314 struct dpni_queue tx_flow_cfg; 315 uint8_t options = 0, flow_id; 316 uint32_t tc_id; 317 int ret; 318 319 PMD_INIT_FUNC_TRACE(); 320 321 /* Return if queue already configured */ 322 if (dpaa2_q->flow_id != DPNI_NEW_FLOW_ID) 323 return 0; 324 325 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); 326 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); 327 328 if (priv->num_tc == 1) { 329 tc_id = 0; 330 flow_id = tx_queue_id % priv->num_dist_per_tc[tc_id]; 331 } else { 332 tc_id = tx_queue_id; 333 flow_id = 0; 334 } 335 336 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, 337 tc_id, flow_id, options, &tx_flow_cfg); 338 if (ret) { 339 PMD_INIT_LOG(ERR, "Error in setting the tx flow: " 340 "tc_id=%d, flow =%d ErrorCode = %x\n", 341 tc_id, flow_id, -ret); 342 return -1; 343 } 344 345 dpaa2_q->flow_id = flow_id; 346 347 if (tx_queue_id == 0) { 348 /*Set tx-conf and error configuration*/ 349 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 350 priv->token, 351 DPNI_CONF_DISABLE); 352 if (ret) { 353 PMD_INIT_LOG(ERR, "Error in set tx conf mode settings" 354 " ErrorCode = %x", ret); 355 return -1; 356 } 357 } 358 dpaa2_q->tc_index = tc_id; 359 360 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 361 return 0; 362 } 363 364 static void 365 dpaa2_dev_rx_queue_release(void *q __rte_unused) 366 { 367 PMD_INIT_FUNC_TRACE(); 368 } 369 370 static void 371 dpaa2_dev_tx_queue_release(void *q __rte_unused) 372 { 373 PMD_INIT_FUNC_TRACE(); 374 } 375 376 static const uint32_t * 377 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) 378 { 379 static const uint32_t ptypes[] = { 380 /*todo -= add more types */ 381 RTE_PTYPE_L2_ETHER, 382 RTE_PTYPE_L3_IPV4, 383 RTE_PTYPE_L3_IPV4_EXT, 384 RTE_PTYPE_L3_IPV6, 385 RTE_PTYPE_L3_IPV6_EXT, 386 RTE_PTYPE_L4_TCP, 387 RTE_PTYPE_L4_UDP, 388 RTE_PTYPE_L4_SCTP, 389 RTE_PTYPE_L4_ICMP, 390 RTE_PTYPE_UNKNOWN 391 }; 392 393 if (dev->rx_pkt_burst == dpaa2_dev_rx) 394 return ptypes; 395 return NULL; 396 } 397 398 static int 399 dpaa2_dev_start(struct rte_eth_dev *dev) 400 { 401 struct rte_eth_dev_data *data = dev->data; 402 struct dpaa2_dev_priv *priv = data->dev_private; 403 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 404 struct dpni_queue cfg; 405 struct dpni_error_cfg err_cfg; 406 uint16_t qdid; 407 struct dpni_queue_id qid; 408 struct dpaa2_queue *dpaa2_q; 409 int ret, i; 410 411 PMD_INIT_FUNC_TRACE(); 412 413 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 414 if (ret) { 415 PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n", 416 ret, priv->hw_id); 417 return ret; 418 } 419 420 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, 421 DPNI_QUEUE_TX, &qdid); 422 if (ret) { 423 PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret); 424 return ret; 425 } 426 priv->qdid = qdid; 427 428 for (i = 0; i < data->nb_rx_queues; i++) { 429 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; 430 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 431 DPNI_QUEUE_RX, dpaa2_q->tc_index, 432 dpaa2_q->flow_id, &cfg, &qid); 433 if (ret) { 434 PMD_INIT_LOG(ERR, "Error to get flow " 435 "information Error code = %d\n", ret); 436 return ret; 437 } 438 dpaa2_q->fqid = qid.fqid; 439 } 440 441 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 442 DPNI_OFF_RX_L3_CSUM, true); 443 if (ret) { 444 PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret); 445 return ret; 446 } 447 448 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 449 DPNI_OFF_RX_L4_CSUM, true); 450 if (ret) { 451 PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret); 452 return ret; 453 } 454 455 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 456 DPNI_OFF_TX_L3_CSUM, true); 457 if (ret) { 458 PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret); 459 return ret; 460 } 461 462 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 463 DPNI_OFF_TX_L4_CSUM, true); 464 if (ret) { 465 PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret); 466 return ret; 467 } 468 469 /*checksum errors, send them to normal path and set it in annotation */ 470 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; 471 472 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; 473 err_cfg.set_frame_annotation = true; 474 475 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, 476 priv->token, &err_cfg); 477 if (ret) { 478 PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:" 479 "code = %d\n", ret); 480 return ret; 481 } 482 483 return 0; 484 } 485 486 /** 487 * This routine disables all traffic on the adapter by issuing a 488 * global reset on the MAC. 489 */ 490 static void 491 dpaa2_dev_stop(struct rte_eth_dev *dev) 492 { 493 struct dpaa2_dev_priv *priv = dev->data->dev_private; 494 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 495 int ret; 496 struct rte_eth_link link; 497 498 PMD_INIT_FUNC_TRACE(); 499 500 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); 501 if (ret) { 502 PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n", 503 ret, priv->hw_id); 504 return; 505 } 506 507 /* clear the recorded link status */ 508 memset(&link, 0, sizeof(link)); 509 dpaa2_dev_atomic_write_link_status(dev, &link); 510 } 511 512 static void 513 dpaa2_dev_close(struct rte_eth_dev *dev) 514 { 515 struct dpaa2_dev_priv *priv = dev->data->dev_private; 516 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 517 int ret; 518 519 PMD_INIT_FUNC_TRACE(); 520 521 /* Clean the device first */ 522 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); 523 if (ret) { 524 PMD_INIT_LOG(ERR, "Failure cleaning dpni device with" 525 " error code %d\n", ret); 526 return; 527 } 528 } 529 530 static void 531 dpaa2_dev_promiscuous_enable( 532 struct rte_eth_dev *dev) 533 { 534 int ret; 535 struct dpaa2_dev_priv *priv = dev->data->dev_private; 536 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 537 538 PMD_INIT_FUNC_TRACE(); 539 540 if (dpni == NULL) { 541 RTE_LOG(ERR, PMD, "dpni is NULL"); 542 return; 543 } 544 545 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 546 if (ret < 0) 547 RTE_LOG(ERR, PMD, "Unable to enable promiscuous mode %d", ret); 548 } 549 550 static void 551 dpaa2_dev_promiscuous_disable( 552 struct rte_eth_dev *dev) 553 { 554 int ret; 555 struct dpaa2_dev_priv *priv = dev->data->dev_private; 556 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 557 558 PMD_INIT_FUNC_TRACE(); 559 560 if (dpni == NULL) { 561 RTE_LOG(ERR, PMD, "dpni is NULL"); 562 return; 563 } 564 565 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 566 if (ret < 0) 567 RTE_LOG(ERR, PMD, "Unable to disable promiscuous mode %d", ret); 568 } 569 570 static int 571 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 572 { 573 int ret; 574 struct dpaa2_dev_priv *priv = dev->data->dev_private; 575 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 576 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 577 578 PMD_INIT_FUNC_TRACE(); 579 580 if (dpni == NULL) { 581 RTE_LOG(ERR, PMD, "dpni is NULL"); 582 return -EINVAL; 583 } 584 585 /* check that mtu is within the allowed range */ 586 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) 587 return -EINVAL; 588 589 /* Set the Max Rx frame length as 'mtu' + 590 * Maximum Ethernet header length 591 */ 592 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 593 mtu + ETH_VLAN_HLEN); 594 if (ret) { 595 PMD_DRV_LOG(ERR, "setting the max frame length failed"); 596 return -1; 597 } 598 PMD_DRV_LOG(INFO, "MTU is configured %d for the device\n", mtu); 599 return 0; 600 } 601 602 static 603 void dpaa2_dev_stats_get(struct rte_eth_dev *dev, 604 struct rte_eth_stats *stats) 605 { 606 struct dpaa2_dev_priv *priv = dev->data->dev_private; 607 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 608 int32_t retcode; 609 uint8_t page0 = 0, page1 = 1, page2 = 2; 610 union dpni_statistics value; 611 612 memset(&value, 0, sizeof(union dpni_statistics)); 613 614 PMD_INIT_FUNC_TRACE(); 615 616 if (!dpni) { 617 RTE_LOG(ERR, PMD, "dpni is NULL"); 618 return; 619 } 620 621 if (!stats) { 622 RTE_LOG(ERR, PMD, "stats is NULL"); 623 return; 624 } 625 626 /*Get Counters from page_0*/ 627 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 628 page0, &value); 629 if (retcode) 630 goto err; 631 632 stats->ipackets = value.page_0.ingress_all_frames; 633 stats->ibytes = value.page_0.ingress_all_bytes; 634 635 /*Get Counters from page_1*/ 636 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 637 page1, &value); 638 if (retcode) 639 goto err; 640 641 stats->opackets = value.page_1.egress_all_frames; 642 stats->obytes = value.page_1.egress_all_bytes; 643 644 /*Get Counters from page_2*/ 645 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 646 page2, &value); 647 if (retcode) 648 goto err; 649 650 stats->ierrors = value.page_2.ingress_discarded_frames; 651 stats->oerrors = value.page_2.egress_discarded_frames; 652 stats->imissed = value.page_2.ingress_nobuffer_discards; 653 654 return; 655 656 err: 657 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); 658 return; 659 }; 660 661 static 662 void dpaa2_dev_stats_reset(struct rte_eth_dev *dev) 663 { 664 struct dpaa2_dev_priv *priv = dev->data->dev_private; 665 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 666 int32_t retcode; 667 668 PMD_INIT_FUNC_TRACE(); 669 670 if (dpni == NULL) { 671 RTE_LOG(ERR, PMD, "dpni is NULL"); 672 return; 673 } 674 675 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); 676 if (retcode) 677 goto error; 678 679 return; 680 681 error: 682 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); 683 return; 684 }; 685 686 /* return 0 means link status changed, -1 means not changed */ 687 static int 688 dpaa2_dev_link_update(struct rte_eth_dev *dev, 689 int wait_to_complete __rte_unused) 690 { 691 int ret; 692 struct dpaa2_dev_priv *priv = dev->data->dev_private; 693 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 694 struct rte_eth_link link, old; 695 struct dpni_link_state state = {0}; 696 697 PMD_INIT_FUNC_TRACE(); 698 699 if (dpni == NULL) { 700 RTE_LOG(ERR, PMD, "error : dpni is NULL"); 701 return 0; 702 } 703 memset(&old, 0, sizeof(old)); 704 dpaa2_dev_atomic_read_link_status(dev, &old); 705 706 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 707 if (ret < 0) { 708 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d", ret); 709 return -1; 710 } 711 712 if ((old.link_status == state.up) && (old.link_speed == state.rate)) { 713 RTE_LOG(DEBUG, PMD, "No change in status\n"); 714 return -1; 715 } 716 717 memset(&link, 0, sizeof(struct rte_eth_link)); 718 link.link_status = state.up; 719 link.link_speed = state.rate; 720 721 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) 722 link.link_duplex = ETH_LINK_HALF_DUPLEX; 723 else 724 link.link_duplex = ETH_LINK_FULL_DUPLEX; 725 726 dpaa2_dev_atomic_write_link_status(dev, &link); 727 728 if (link.link_status) 729 PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id); 730 else 731 PMD_DRV_LOG(INFO, "Port %d Link is Down\n", dev->data->port_id); 732 return 0; 733 } 734 735 static struct eth_dev_ops dpaa2_ethdev_ops = { 736 .dev_configure = dpaa2_eth_dev_configure, 737 .dev_start = dpaa2_dev_start, 738 .dev_stop = dpaa2_dev_stop, 739 .dev_close = dpaa2_dev_close, 740 .promiscuous_enable = dpaa2_dev_promiscuous_enable, 741 .promiscuous_disable = dpaa2_dev_promiscuous_disable, 742 .link_update = dpaa2_dev_link_update, 743 .stats_get = dpaa2_dev_stats_get, 744 .stats_reset = dpaa2_dev_stats_reset, 745 .dev_infos_get = dpaa2_dev_info_get, 746 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, 747 .mtu_set = dpaa2_dev_mtu_set, 748 .rx_queue_setup = dpaa2_dev_rx_queue_setup, 749 .rx_queue_release = dpaa2_dev_rx_queue_release, 750 .tx_queue_setup = dpaa2_dev_tx_queue_setup, 751 .tx_queue_release = dpaa2_dev_tx_queue_release, 752 }; 753 754 static int 755 dpaa2_dev_init(struct rte_eth_dev *eth_dev) 756 { 757 struct rte_device *dev = eth_dev->device; 758 struct rte_dpaa2_device *dpaa2_dev; 759 struct fsl_mc_io *dpni_dev; 760 struct dpni_attr attr; 761 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 762 struct dpni_buffer_layout layout; 763 int i, ret, hw_id; 764 int tot_size; 765 766 PMD_INIT_FUNC_TRACE(); 767 768 /* For secondary processes, the primary has done all the work */ 769 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 770 return 0; 771 772 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 773 774 hw_id = dpaa2_dev->object_id; 775 776 dpni_dev = (struct fsl_mc_io *)malloc(sizeof(struct fsl_mc_io)); 777 if (!dpni_dev) { 778 PMD_INIT_LOG(ERR, "malloc failed for dpni device\n"); 779 return -1; 780 } 781 782 dpni_dev->regs = rte_mcp_ptr_list[0]; 783 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); 784 if (ret) { 785 PMD_INIT_LOG(ERR, "Failure in opening dpni@%d device with" 786 " error code %d\n", hw_id, ret); 787 return -1; 788 } 789 790 /* Clean the device first */ 791 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); 792 if (ret) { 793 PMD_INIT_LOG(ERR, "Failure cleaning dpni@%d device with" 794 " error code %d\n", hw_id, ret); 795 return -1; 796 } 797 798 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); 799 if (ret) { 800 PMD_INIT_LOG(ERR, "Failure in getting dpni@%d attribute, " 801 " error code %d\n", hw_id, ret); 802 return -1; 803 } 804 805 priv->num_tc = attr.num_tcs; 806 for (i = 0; i < attr.num_tcs; i++) { 807 priv->num_dist_per_tc[i] = attr.num_queues; 808 break; 809 } 810 811 /* Distribution is per Tc only, 812 * so choosing RX queues from default TC only 813 */ 814 priv->nb_rx_queues = priv->num_dist_per_tc[DPAA2_DEF_TC]; 815 816 if (attr.num_tcs == 1) 817 priv->nb_tx_queues = attr.num_queues; 818 else 819 priv->nb_tx_queues = attr.num_tcs; 820 821 PMD_INIT_LOG(DEBUG, "num_tc %d", priv->num_tc); 822 PMD_INIT_LOG(DEBUG, "nb_rx_queues %d", priv->nb_rx_queues); 823 824 priv->hw = dpni_dev; 825 priv->hw_id = hw_id; 826 priv->options = attr.options; 827 priv->max_mac_filters = attr.mac_filter_entries; 828 priv->max_vlan_filters = attr.vlan_filter_entries; 829 priv->flags = 0; 830 831 /* Allocate memory for hardware structure for queues */ 832 ret = dpaa2_alloc_rx_tx_queues(eth_dev); 833 if (ret) { 834 PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n"); 835 return -ret; 836 } 837 838 /* Allocate memory for storing MAC addresses */ 839 eth_dev->data->mac_addrs = rte_zmalloc("dpni", 840 ETHER_ADDR_LEN * attr.mac_filter_entries, 0); 841 if (eth_dev->data->mac_addrs == NULL) { 842 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " 843 "store MAC addresses", 844 ETHER_ADDR_LEN * attr.mac_filter_entries); 845 return -ENOMEM; 846 } 847 848 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 849 priv->token, 850 (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes)); 851 if (ret) { 852 PMD_INIT_LOG(ERR, "DPNI get mac address failed:" 853 " Error Code = %d\n", ret); 854 return -ret; 855 } 856 857 /* ... rx buffer layout ... */ 858 tot_size = DPAA2_HW_BUF_RESERVE + RTE_PKTMBUF_HEADROOM; 859 tot_size = RTE_ALIGN_CEIL(tot_size, 860 DPAA2_PACKET_LAYOUT_ALIGN); 861 862 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 863 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | 864 DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | 865 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | 866 DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; 867 868 layout.pass_frame_status = 1; 869 layout.data_head_room = tot_size 870 - DPAA2_FD_PTA_SIZE - DPAA2_MBUF_HW_ANNOTATION; 871 layout.private_data_size = DPAA2_FD_PTA_SIZE; 872 layout.pass_parser_result = 1; 873 PMD_INIT_LOG(DEBUG, "Tot_size = %d, head room = %d, private = %d", 874 tot_size, layout.data_head_room, layout.private_data_size); 875 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 876 DPNI_QUEUE_RX, &layout); 877 if (ret) { 878 PMD_INIT_LOG(ERR, "Err(%d) in setting rx buffer layout", ret); 879 return -1; 880 } 881 882 /* ... tx buffer layout ... */ 883 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 884 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 885 layout.pass_frame_status = 1; 886 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 887 DPNI_QUEUE_TX, &layout); 888 if (ret) { 889 PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer" 890 " layout", ret); 891 return -1; 892 } 893 894 /* ... tx-conf and error buffer layout ... */ 895 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 896 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 897 layout.pass_frame_status = 1; 898 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 899 DPNI_QUEUE_TX_CONFIRM, &layout); 900 if (ret) { 901 PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer" 902 " layout", ret); 903 return -1; 904 } 905 906 eth_dev->dev_ops = &dpaa2_ethdev_ops; 907 eth_dev->data->drv_name = rte_dpaa2_pmd.driver.name; 908 909 eth_dev->rx_pkt_burst = dpaa2_dev_rx; 910 eth_dev->tx_pkt_burst = dpaa2_dev_tx; 911 rte_fslmc_vfio_dmamap(); 912 913 return 0; 914 } 915 916 static int 917 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) 918 { 919 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 920 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 921 int i, ret; 922 struct dpaa2_queue *dpaa2_q; 923 924 PMD_INIT_FUNC_TRACE(); 925 926 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 927 return -EPERM; 928 929 if (!dpni) { 930 PMD_INIT_LOG(WARNING, "Already closed or not started"); 931 return -1; 932 } 933 934 dpaa2_dev_close(eth_dev); 935 936 if (priv->rx_vq[0]) { 937 /* cleaning up queue storage */ 938 for (i = 0; i < priv->nb_rx_queues; i++) { 939 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 940 if (dpaa2_q->q_storage) 941 rte_free(dpaa2_q->q_storage); 942 } 943 /*free the all queue memory */ 944 rte_free(priv->rx_vq[0]); 945 priv->rx_vq[0] = NULL; 946 } 947 948 /* Allocate memory for storing MAC addresses */ 949 if (eth_dev->data->mac_addrs) { 950 rte_free(eth_dev->data->mac_addrs); 951 eth_dev->data->mac_addrs = NULL; 952 } 953 954 /*Close the device at underlying layer*/ 955 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); 956 if (ret) { 957 PMD_INIT_LOG(ERR, "Failure closing dpni device with" 958 " error code %d\n", ret); 959 } 960 961 /*Free the allocated memory for ethernet private data and dpni*/ 962 priv->hw = NULL; 963 free(dpni); 964 965 eth_dev->dev_ops = NULL; 966 eth_dev->rx_pkt_burst = NULL; 967 eth_dev->tx_pkt_burst = NULL; 968 969 return 0; 970 } 971 972 static int 973 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused, 974 struct rte_dpaa2_device *dpaa2_dev) 975 { 976 struct rte_eth_dev *eth_dev; 977 char ethdev_name[RTE_ETH_NAME_MAX_LEN]; 978 979 int diag; 980 981 sprintf(ethdev_name, "dpni-%d", dpaa2_dev->object_id); 982 983 eth_dev = rte_eth_dev_allocate(ethdev_name); 984 if (eth_dev == NULL) 985 return -ENOMEM; 986 987 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 988 eth_dev->data->dev_private = rte_zmalloc( 989 "ethdev private structure", 990 sizeof(struct dpaa2_dev_priv), 991 RTE_CACHE_LINE_SIZE); 992 if (eth_dev->data->dev_private == NULL) { 993 PMD_INIT_LOG(CRIT, "Cannot allocate memzone for" 994 " private port data\n"); 995 rte_eth_dev_release_port(eth_dev); 996 return -ENOMEM; 997 } 998 } 999 eth_dev->device = &dpaa2_dev->device; 1000 dpaa2_dev->eth_dev = eth_dev; 1001 eth_dev->data->rx_mbuf_alloc_failed = 0; 1002 1003 /* Invoke PMD device initialization function */ 1004 diag = dpaa2_dev_init(eth_dev); 1005 if (diag == 0) 1006 return 0; 1007 1008 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1009 rte_free(eth_dev->data->dev_private); 1010 rte_eth_dev_release_port(eth_dev); 1011 return diag; 1012 } 1013 1014 static int 1015 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) 1016 { 1017 struct rte_eth_dev *eth_dev; 1018 1019 eth_dev = dpaa2_dev->eth_dev; 1020 dpaa2_dev_uninit(eth_dev); 1021 1022 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1023 rte_free(eth_dev->data->dev_private); 1024 rte_eth_dev_release_port(eth_dev); 1025 1026 return 0; 1027 } 1028 1029 static struct rte_dpaa2_driver rte_dpaa2_pmd = { 1030 .drv_type = DPAA2_MC_DPNI_DEVID, 1031 .probe = rte_dpaa2_probe, 1032 .remove = rte_dpaa2_remove, 1033 }; 1034 1035 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); 1036