1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 5 * Copyright 2016 NXP. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <time.h> 35 #include <net/if.h> 36 37 #include <rte_mbuf.h> 38 #include <rte_ethdev.h> 39 #include <rte_malloc.h> 40 #include <rte_memcpy.h> 41 #include <rte_string_fns.h> 42 #include <rte_cycles.h> 43 #include <rte_kvargs.h> 44 #include <rte_dev.h> 45 #include <rte_fslmc.h> 46 47 #include <fslmc_logs.h> 48 #include <fslmc_vfio.h> 49 #include <dpaa2_hw_pvt.h> 50 #include <dpaa2_hw_mempool.h> 51 #include <dpaa2_hw_dpio.h> 52 #include <mc/fsl_dpmng.h> 53 #include "dpaa2_ethdev.h" 54 55 static struct rte_dpaa2_driver rte_dpaa2_pmd; 56 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev); 57 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); 58 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); 59 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 60 61 /** 62 * Atomically reads the link status information from global 63 * structure rte_eth_dev. 64 * 65 * @param dev 66 * - Pointer to the structure rte_eth_dev to read from. 67 * - Pointer to the buffer to be saved with the link status. 68 * 69 * @return 70 * - On success, zero. 71 * - On failure, negative value. 72 */ 73 static inline int 74 dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev, 75 struct rte_eth_link *link) 76 { 77 struct rte_eth_link *dst = link; 78 struct rte_eth_link *src = &dev->data->dev_link; 79 80 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 81 *(uint64_t *)src) == 0) 82 return -1; 83 84 return 0; 85 } 86 87 /** 88 * Atomically writes the link status information into global 89 * structure rte_eth_dev. 90 * 91 * @param dev 92 * - Pointer to the structure rte_eth_dev to read from. 93 * - Pointer to the buffer to be saved with the link status. 94 * 95 * @return 96 * - On success, zero. 97 * - On failure, negative value. 98 */ 99 static inline int 100 dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev, 101 struct rte_eth_link *link) 102 { 103 struct rte_eth_link *dst = &dev->data->dev_link; 104 struct rte_eth_link *src = link; 105 106 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 107 *(uint64_t *)src) == 0) 108 return -1; 109 110 return 0; 111 } 112 113 static int 114 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 115 { 116 int ret; 117 struct dpaa2_dev_priv *priv = dev->data->dev_private; 118 struct fsl_mc_io *dpni = priv->hw; 119 120 PMD_INIT_FUNC_TRACE(); 121 122 if (dpni == NULL) { 123 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 124 return -1; 125 } 126 127 if (on) 128 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, 129 priv->token, vlan_id); 130 else 131 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, 132 priv->token, vlan_id); 133 134 if (ret < 0) 135 PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d", 136 ret, vlan_id, priv->hw_id); 137 138 return ret; 139 } 140 141 static void 142 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) 143 { 144 struct dpaa2_dev_priv *priv = dev->data->dev_private; 145 struct fsl_mc_io *dpni = priv->hw; 146 int ret; 147 148 PMD_INIT_FUNC_TRACE(); 149 150 if (mask & ETH_VLAN_FILTER_MASK) { 151 if (dev->data->dev_conf.rxmode.hw_vlan_filter) 152 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 153 priv->token, true); 154 else 155 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 156 priv->token, false); 157 if (ret < 0) 158 RTE_LOG(ERR, PMD, "Unable to set vlan filter = %d\n", 159 ret); 160 } 161 } 162 163 static int 164 dpaa2_fw_version_get(struct rte_eth_dev *dev, 165 char *fw_version, 166 size_t fw_size) 167 { 168 int ret; 169 struct dpaa2_dev_priv *priv = dev->data->dev_private; 170 struct fsl_mc_io *dpni = priv->hw; 171 struct mc_soc_version mc_plat_info = {0}; 172 struct mc_version mc_ver_info = {0}; 173 174 PMD_INIT_FUNC_TRACE(); 175 176 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 177 RTE_LOG(WARNING, PMD, "\tmc_get_soc_version failed\n"); 178 179 if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) 180 RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n"); 181 182 ret = snprintf(fw_version, fw_size, 183 "%x-%d.%d.%d", 184 mc_plat_info.svr, 185 mc_ver_info.major, 186 mc_ver_info.minor, 187 mc_ver_info.revision); 188 189 ret += 1; /* add the size of '\0' */ 190 if (fw_size < (uint32_t)ret) 191 return ret; 192 else 193 return 0; 194 } 195 196 static void 197 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 198 { 199 struct dpaa2_dev_priv *priv = dev->data->dev_private; 200 201 PMD_INIT_FUNC_TRACE(); 202 203 dev_info->if_index = priv->hw_id; 204 205 dev_info->max_mac_addrs = priv->max_mac_filters; 206 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; 207 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; 208 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; 209 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; 210 dev_info->rx_offload_capa = 211 DEV_RX_OFFLOAD_IPV4_CKSUM | 212 DEV_RX_OFFLOAD_UDP_CKSUM | 213 DEV_RX_OFFLOAD_TCP_CKSUM | 214 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; 215 dev_info->tx_offload_capa = 216 DEV_TX_OFFLOAD_IPV4_CKSUM | 217 DEV_TX_OFFLOAD_UDP_CKSUM | 218 DEV_TX_OFFLOAD_TCP_CKSUM | 219 DEV_TX_OFFLOAD_SCTP_CKSUM | 220 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 221 dev_info->speed_capa = ETH_LINK_SPEED_1G | 222 ETH_LINK_SPEED_2_5G | 223 ETH_LINK_SPEED_10G; 224 } 225 226 static int 227 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) 228 { 229 struct dpaa2_dev_priv *priv = dev->data->dev_private; 230 uint16_t dist_idx; 231 uint32_t vq_id; 232 struct dpaa2_queue *mc_q, *mcq; 233 uint32_t tot_queues; 234 int i; 235 struct dpaa2_queue *dpaa2_q; 236 237 PMD_INIT_FUNC_TRACE(); 238 239 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; 240 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, 241 RTE_CACHE_LINE_SIZE); 242 if (!mc_q) { 243 PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n"); 244 return -1; 245 } 246 247 for (i = 0; i < priv->nb_rx_queues; i++) { 248 mc_q->dev = dev; 249 priv->rx_vq[i] = mc_q++; 250 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 251 dpaa2_q->q_storage = rte_malloc("dq_storage", 252 sizeof(struct queue_storage_info_t), 253 RTE_CACHE_LINE_SIZE); 254 if (!dpaa2_q->q_storage) 255 goto fail; 256 257 memset(dpaa2_q->q_storage, 0, 258 sizeof(struct queue_storage_info_t)); 259 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) 260 goto fail; 261 } 262 263 for (i = 0; i < priv->nb_tx_queues; i++) { 264 mc_q->dev = dev; 265 mc_q->flow_id = 0xffff; 266 priv->tx_vq[i] = mc_q++; 267 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 268 dpaa2_q->cscn = rte_malloc(NULL, 269 sizeof(struct qbman_result), 16); 270 if (!dpaa2_q->cscn) 271 goto fail_tx; 272 } 273 274 vq_id = 0; 275 for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { 276 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; 277 mcq->tc_index = DPAA2_DEF_TC; 278 mcq->flow_id = dist_idx; 279 vq_id++; 280 } 281 282 return 0; 283 fail_tx: 284 i -= 1; 285 while (i >= 0) { 286 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 287 rte_free(dpaa2_q->cscn); 288 priv->tx_vq[i--] = NULL; 289 } 290 i = priv->nb_rx_queues; 291 fail: 292 i -= 1; 293 mc_q = priv->rx_vq[0]; 294 while (i >= 0) { 295 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 296 dpaa2_free_dq_storage(dpaa2_q->q_storage); 297 rte_free(dpaa2_q->q_storage); 298 priv->rx_vq[i--] = NULL; 299 } 300 rte_free(mc_q); 301 return -1; 302 } 303 304 static int 305 dpaa2_eth_dev_configure(struct rte_eth_dev *dev) 306 { 307 struct rte_eth_dev_data *data = dev->data; 308 struct rte_eth_conf *eth_conf = &data->dev_conf; 309 int ret; 310 311 PMD_INIT_FUNC_TRACE(); 312 313 if (eth_conf->rxmode.jumbo_frame == 1) { 314 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { 315 ret = dpaa2_dev_mtu_set(dev, 316 eth_conf->rxmode.max_rx_pkt_len); 317 if (ret) { 318 PMD_INIT_LOG(ERR, 319 "unable to set mtu. check config\n"); 320 return ret; 321 } 322 } else { 323 return -1; 324 } 325 } 326 327 /* Check for correct configuration */ 328 if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS && 329 data->nb_rx_queues > 1) { 330 PMD_INIT_LOG(ERR, "Distribution is not enabled, " 331 "but Rx queues more than 1\n"); 332 return -1; 333 } 334 335 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { 336 /* Return in case number of Rx queues is 1 */ 337 if (data->nb_rx_queues == 1) 338 return 0; 339 ret = dpaa2_setup_flow_dist(dev, 340 eth_conf->rx_adv_conf.rss_conf.rss_hf); 341 if (ret) { 342 PMD_INIT_LOG(ERR, "unable to set flow distribution." 343 "please check queue config\n"); 344 return ret; 345 } 346 } 347 return 0; 348 } 349 350 /* Function to setup RX flow information. It contains traffic class ID, 351 * flow ID, destination configuration etc. 352 */ 353 static int 354 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, 355 uint16_t rx_queue_id, 356 uint16_t nb_rx_desc __rte_unused, 357 unsigned int socket_id __rte_unused, 358 const struct rte_eth_rxconf *rx_conf __rte_unused, 359 struct rte_mempool *mb_pool) 360 { 361 struct dpaa2_dev_priv *priv = dev->data->dev_private; 362 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 363 struct mc_soc_version mc_plat_info = {0}; 364 struct dpaa2_queue *dpaa2_q; 365 struct dpni_queue cfg; 366 uint8_t options = 0; 367 uint8_t flow_id; 368 uint32_t bpid; 369 int ret; 370 371 PMD_INIT_FUNC_TRACE(); 372 373 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p", 374 dev, rx_queue_id, mb_pool, rx_conf); 375 376 if (!priv->bp_list || priv->bp_list->mp != mb_pool) { 377 bpid = mempool_to_bpid(mb_pool); 378 ret = dpaa2_attach_bp_list(priv, 379 rte_dpaa2_bpid_info[bpid].bp_list); 380 if (ret) 381 return ret; 382 } 383 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 384 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ 385 386 /*Get the flow id from given VQ id*/ 387 flow_id = rx_queue_id % priv->nb_rx_queues; 388 memset(&cfg, 0, sizeof(struct dpni_queue)); 389 390 options = options | DPNI_QUEUE_OPT_USER_CTX; 391 cfg.user_context = (uint64_t)(dpaa2_q); 392 393 /*if ls2088 or rev2 device, enable the stashing */ 394 395 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 396 PMD_INIT_LOG(ERR, "\tmc_get_soc_version failed\n"); 397 398 if ((mc_plat_info.svr & 0xffff0000) != SVR_LS2080A) { 399 options |= DPNI_QUEUE_OPT_FLC; 400 cfg.flc.stash_control = true; 401 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0; 402 /* 00 00 00 - last 6 bit represent annotation, context stashing, 403 * data stashing setting 01 01 00 (0x14) to enable 404 * 1 line data, 1 line annotation 405 */ 406 cfg.flc.value |= 0x14; 407 } 408 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, 409 dpaa2_q->tc_index, flow_id, options, &cfg); 410 if (ret) { 411 PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret); 412 return -1; 413 } 414 415 if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { 416 struct dpni_taildrop taildrop; 417 418 taildrop.enable = 1; 419 /*enabling per rx queue congestion control */ 420 taildrop.threshold = CONG_THRESHOLD_RX_Q; 421 taildrop.units = DPNI_CONGESTION_UNIT_BYTES; 422 PMD_INIT_LOG(DEBUG, "Enabling Early Drop on queue = %d", 423 rx_queue_id); 424 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 425 DPNI_CP_QUEUE, DPNI_QUEUE_RX, 426 dpaa2_q->tc_index, flow_id, &taildrop); 427 if (ret) { 428 PMD_INIT_LOG(ERR, "Error in setting the rx flow" 429 " err : = %d\n", ret); 430 return -1; 431 } 432 } 433 434 dev->data->rx_queues[rx_queue_id] = dpaa2_q; 435 return 0; 436 } 437 438 static int 439 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, 440 uint16_t tx_queue_id, 441 uint16_t nb_tx_desc __rte_unused, 442 unsigned int socket_id __rte_unused, 443 const struct rte_eth_txconf *tx_conf __rte_unused) 444 { 445 struct dpaa2_dev_priv *priv = dev->data->dev_private; 446 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) 447 priv->tx_vq[tx_queue_id]; 448 struct fsl_mc_io *dpni = priv->hw; 449 struct dpni_queue tx_conf_cfg; 450 struct dpni_queue tx_flow_cfg; 451 uint8_t options = 0, flow_id; 452 uint32_t tc_id; 453 int ret; 454 455 PMD_INIT_FUNC_TRACE(); 456 457 /* Return if queue already configured */ 458 if (dpaa2_q->flow_id != 0xffff) 459 return 0; 460 461 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); 462 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); 463 464 tc_id = tx_queue_id; 465 flow_id = 0; 466 467 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, 468 tc_id, flow_id, options, &tx_flow_cfg); 469 if (ret) { 470 PMD_INIT_LOG(ERR, "Error in setting the tx flow: " 471 "tc_id=%d, flow =%d ErrorCode = %x\n", 472 tc_id, flow_id, -ret); 473 return -1; 474 } 475 476 dpaa2_q->flow_id = flow_id; 477 478 if (tx_queue_id == 0) { 479 /*Set tx-conf and error configuration*/ 480 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 481 priv->token, 482 DPNI_CONF_DISABLE); 483 if (ret) { 484 PMD_INIT_LOG(ERR, "Error in set tx conf mode settings" 485 " ErrorCode = %x", ret); 486 return -1; 487 } 488 } 489 dpaa2_q->tc_index = tc_id; 490 491 if (!(priv->flags & DPAA2_TX_CGR_OFF)) { 492 struct dpni_congestion_notification_cfg cong_notif_cfg; 493 494 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; 495 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; 496 /* Notify that the queue is not congested when the data in 497 * the queue is below this thershold. 498 */ 499 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; 500 cong_notif_cfg.message_ctx = 0; 501 cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn; 502 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; 503 cong_notif_cfg.notification_mode = 504 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | 505 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | 506 DPNI_CONG_OPT_COHERENT_WRITE; 507 508 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, 509 priv->token, 510 DPNI_QUEUE_TX, 511 tc_id, 512 &cong_notif_cfg); 513 if (ret) { 514 PMD_INIT_LOG(ERR, 515 "Error in setting tx congestion notification: = %d", 516 -ret); 517 return -ret; 518 } 519 } 520 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 521 return 0; 522 } 523 524 static void 525 dpaa2_dev_rx_queue_release(void *q __rte_unused) 526 { 527 PMD_INIT_FUNC_TRACE(); 528 } 529 530 static void 531 dpaa2_dev_tx_queue_release(void *q __rte_unused) 532 { 533 PMD_INIT_FUNC_TRACE(); 534 } 535 536 static const uint32_t * 537 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) 538 { 539 static const uint32_t ptypes[] = { 540 /*todo -= add more types */ 541 RTE_PTYPE_L2_ETHER, 542 RTE_PTYPE_L3_IPV4, 543 RTE_PTYPE_L3_IPV4_EXT, 544 RTE_PTYPE_L3_IPV6, 545 RTE_PTYPE_L3_IPV6_EXT, 546 RTE_PTYPE_L4_TCP, 547 RTE_PTYPE_L4_UDP, 548 RTE_PTYPE_L4_SCTP, 549 RTE_PTYPE_L4_ICMP, 550 RTE_PTYPE_UNKNOWN 551 }; 552 553 if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx) 554 return ptypes; 555 return NULL; 556 } 557 558 static int 559 dpaa2_dev_start(struct rte_eth_dev *dev) 560 { 561 struct rte_eth_dev_data *data = dev->data; 562 struct dpaa2_dev_priv *priv = data->dev_private; 563 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 564 struct dpni_queue cfg; 565 struct dpni_error_cfg err_cfg; 566 uint16_t qdid; 567 struct dpni_queue_id qid; 568 struct dpaa2_queue *dpaa2_q; 569 int ret, i; 570 571 PMD_INIT_FUNC_TRACE(); 572 573 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 574 if (ret) { 575 PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n", 576 ret, priv->hw_id); 577 return ret; 578 } 579 580 /* Power up the phy. Needed to make the link go Up */ 581 dpaa2_dev_set_link_up(dev); 582 583 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, 584 DPNI_QUEUE_TX, &qdid); 585 if (ret) { 586 PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret); 587 return ret; 588 } 589 priv->qdid = qdid; 590 591 for (i = 0; i < data->nb_rx_queues; i++) { 592 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; 593 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 594 DPNI_QUEUE_RX, dpaa2_q->tc_index, 595 dpaa2_q->flow_id, &cfg, &qid); 596 if (ret) { 597 PMD_INIT_LOG(ERR, "Error to get flow " 598 "information Error code = %d\n", ret); 599 return ret; 600 } 601 dpaa2_q->fqid = qid.fqid; 602 } 603 604 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 605 DPNI_OFF_RX_L3_CSUM, true); 606 if (ret) { 607 PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret); 608 return ret; 609 } 610 611 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 612 DPNI_OFF_RX_L4_CSUM, true); 613 if (ret) { 614 PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret); 615 return ret; 616 } 617 618 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 619 DPNI_OFF_TX_L3_CSUM, true); 620 if (ret) { 621 PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret); 622 return ret; 623 } 624 625 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 626 DPNI_OFF_TX_L4_CSUM, true); 627 if (ret) { 628 PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret); 629 return ret; 630 } 631 632 /*checksum errors, send them to normal path and set it in annotation */ 633 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; 634 635 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; 636 err_cfg.set_frame_annotation = true; 637 638 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, 639 priv->token, &err_cfg); 640 if (ret) { 641 PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:" 642 "code = %d\n", ret); 643 return ret; 644 } 645 /* VLAN Offload Settings */ 646 if (priv->max_vlan_filters) 647 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); 648 649 return 0; 650 } 651 652 /** 653 * This routine disables all traffic on the adapter by issuing a 654 * global reset on the MAC. 655 */ 656 static void 657 dpaa2_dev_stop(struct rte_eth_dev *dev) 658 { 659 struct dpaa2_dev_priv *priv = dev->data->dev_private; 660 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 661 int ret; 662 struct rte_eth_link link; 663 664 PMD_INIT_FUNC_TRACE(); 665 666 dpaa2_dev_set_link_down(dev); 667 668 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); 669 if (ret) { 670 PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n", 671 ret, priv->hw_id); 672 return; 673 } 674 675 /* clear the recorded link status */ 676 memset(&link, 0, sizeof(link)); 677 dpaa2_dev_atomic_write_link_status(dev, &link); 678 } 679 680 static void 681 dpaa2_dev_close(struct rte_eth_dev *dev) 682 { 683 struct rte_eth_dev_data *data = dev->data; 684 struct dpaa2_dev_priv *priv = dev->data->dev_private; 685 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 686 int i, ret; 687 struct rte_eth_link link; 688 struct dpaa2_queue *dpaa2_q; 689 690 PMD_INIT_FUNC_TRACE(); 691 692 for (i = 0; i < data->nb_tx_queues; i++) { 693 dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i]; 694 if (!dpaa2_q->cscn) { 695 rte_free(dpaa2_q->cscn); 696 dpaa2_q->cscn = NULL; 697 } 698 } 699 700 /* Clean the device first */ 701 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); 702 if (ret) { 703 PMD_INIT_LOG(ERR, "Failure cleaning dpni device with" 704 " error code %d\n", ret); 705 return; 706 } 707 708 memset(&link, 0, sizeof(link)); 709 dpaa2_dev_atomic_write_link_status(dev, &link); 710 } 711 712 static void 713 dpaa2_dev_promiscuous_enable( 714 struct rte_eth_dev *dev) 715 { 716 int ret; 717 struct dpaa2_dev_priv *priv = dev->data->dev_private; 718 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 719 720 PMD_INIT_FUNC_TRACE(); 721 722 if (dpni == NULL) { 723 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 724 return; 725 } 726 727 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 728 if (ret < 0) 729 RTE_LOG(ERR, PMD, "Unable to enable U promisc mode %d\n", ret); 730 731 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 732 if (ret < 0) 733 RTE_LOG(ERR, PMD, "Unable to enable M promisc mode %d\n", ret); 734 } 735 736 static void 737 dpaa2_dev_promiscuous_disable( 738 struct rte_eth_dev *dev) 739 { 740 int ret; 741 struct dpaa2_dev_priv *priv = dev->data->dev_private; 742 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 743 744 PMD_INIT_FUNC_TRACE(); 745 746 if (dpni == NULL) { 747 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 748 return; 749 } 750 751 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 752 if (ret < 0) 753 RTE_LOG(ERR, PMD, "Unable to disable U promisc mode %d\n", ret); 754 755 if (dev->data->all_multicast == 0) { 756 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, 757 priv->token, false); 758 if (ret < 0) 759 RTE_LOG(ERR, PMD, 760 "Unable to disable M promisc mode %d\n", 761 ret); 762 } 763 } 764 765 static void 766 dpaa2_dev_allmulticast_enable( 767 struct rte_eth_dev *dev) 768 { 769 int ret; 770 struct dpaa2_dev_priv *priv = dev->data->dev_private; 771 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 772 773 PMD_INIT_FUNC_TRACE(); 774 775 if (dpni == NULL) { 776 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 777 return; 778 } 779 780 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 781 if (ret < 0) 782 RTE_LOG(ERR, PMD, "Unable to enable multicast mode %d\n", ret); 783 } 784 785 static void 786 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) 787 { 788 int ret; 789 struct dpaa2_dev_priv *priv = dev->data->dev_private; 790 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 791 792 PMD_INIT_FUNC_TRACE(); 793 794 if (dpni == NULL) { 795 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 796 return; 797 } 798 799 /* must remain on for all promiscuous */ 800 if (dev->data->promiscuous == 1) 801 return; 802 803 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 804 if (ret < 0) 805 RTE_LOG(ERR, PMD, "Unable to disable multicast mode %d\n", ret); 806 } 807 808 static int 809 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 810 { 811 int ret; 812 struct dpaa2_dev_priv *priv = dev->data->dev_private; 813 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 814 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 815 816 PMD_INIT_FUNC_TRACE(); 817 818 if (dpni == NULL) { 819 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 820 return -EINVAL; 821 } 822 823 /* check that mtu is within the allowed range */ 824 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) 825 return -EINVAL; 826 827 if (frame_size > ETHER_MAX_LEN) 828 dev->data->dev_conf.rxmode.jumbo_frame = 1; 829 else 830 dev->data->dev_conf.rxmode.jumbo_frame = 0; 831 832 /* Set the Max Rx frame length as 'mtu' + 833 * Maximum Ethernet header length 834 */ 835 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 836 mtu + ETH_VLAN_HLEN); 837 if (ret) { 838 PMD_DRV_LOG(ERR, "setting the max frame length failed"); 839 return -1; 840 } 841 PMD_DRV_LOG(INFO, "MTU is configured %d for the device\n", mtu); 842 return 0; 843 } 844 845 static int 846 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, 847 struct ether_addr *addr, 848 __rte_unused uint32_t index, 849 __rte_unused uint32_t pool) 850 { 851 int ret; 852 struct dpaa2_dev_priv *priv = dev->data->dev_private; 853 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 854 855 PMD_INIT_FUNC_TRACE(); 856 857 if (dpni == NULL) { 858 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 859 return -1; 860 } 861 862 ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, 863 priv->token, addr->addr_bytes); 864 if (ret) 865 RTE_LOG(ERR, PMD, 866 "error: Adding the MAC ADDR failed: err = %d\n", ret); 867 return 0; 868 } 869 870 static void 871 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, 872 uint32_t index) 873 { 874 int ret; 875 struct dpaa2_dev_priv *priv = dev->data->dev_private; 876 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 877 struct rte_eth_dev_data *data = dev->data; 878 struct ether_addr *macaddr; 879 880 PMD_INIT_FUNC_TRACE(); 881 882 macaddr = &data->mac_addrs[index]; 883 884 if (dpni == NULL) { 885 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 886 return; 887 } 888 889 ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, 890 priv->token, macaddr->addr_bytes); 891 if (ret) 892 RTE_LOG(ERR, PMD, 893 "error: Removing the MAC ADDR failed: err = %d\n", ret); 894 } 895 896 static void 897 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, 898 struct ether_addr *addr) 899 { 900 int ret; 901 struct dpaa2_dev_priv *priv = dev->data->dev_private; 902 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 903 904 PMD_INIT_FUNC_TRACE(); 905 906 if (dpni == NULL) { 907 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 908 return; 909 } 910 911 ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, 912 priv->token, addr->addr_bytes); 913 914 if (ret) 915 RTE_LOG(ERR, PMD, 916 "error: Setting the MAC ADDR failed %d\n", ret); 917 } 918 static 919 void dpaa2_dev_stats_get(struct rte_eth_dev *dev, 920 struct rte_eth_stats *stats) 921 { 922 struct dpaa2_dev_priv *priv = dev->data->dev_private; 923 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 924 int32_t retcode; 925 uint8_t page0 = 0, page1 = 1, page2 = 2; 926 union dpni_statistics value; 927 928 memset(&value, 0, sizeof(union dpni_statistics)); 929 930 PMD_INIT_FUNC_TRACE(); 931 932 if (!dpni) { 933 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 934 return; 935 } 936 937 if (!stats) { 938 RTE_LOG(ERR, PMD, "stats is NULL\n"); 939 return; 940 } 941 942 /*Get Counters from page_0*/ 943 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 944 page0, 0, &value); 945 if (retcode) 946 goto err; 947 948 stats->ipackets = value.page_0.ingress_all_frames; 949 stats->ibytes = value.page_0.ingress_all_bytes; 950 951 /*Get Counters from page_1*/ 952 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 953 page1, 0, &value); 954 if (retcode) 955 goto err; 956 957 stats->opackets = value.page_1.egress_all_frames; 958 stats->obytes = value.page_1.egress_all_bytes; 959 960 /*Get Counters from page_2*/ 961 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 962 page2, 0, &value); 963 if (retcode) 964 goto err; 965 966 /* Ingress drop frame count due to configured rules */ 967 stats->ierrors = value.page_2.ingress_filtered_frames; 968 /* Ingress drop frame count due to error */ 969 stats->ierrors += value.page_2.ingress_discarded_frames; 970 971 stats->oerrors = value.page_2.egress_discarded_frames; 972 stats->imissed = value.page_2.ingress_nobuffer_discards; 973 974 return; 975 976 err: 977 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); 978 return; 979 }; 980 981 static 982 void dpaa2_dev_stats_reset(struct rte_eth_dev *dev) 983 { 984 struct dpaa2_dev_priv *priv = dev->data->dev_private; 985 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 986 int32_t retcode; 987 988 PMD_INIT_FUNC_TRACE(); 989 990 if (dpni == NULL) { 991 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 992 return; 993 } 994 995 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); 996 if (retcode) 997 goto error; 998 999 return; 1000 1001 error: 1002 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); 1003 return; 1004 }; 1005 1006 /* return 0 means link status changed, -1 means not changed */ 1007 static int 1008 dpaa2_dev_link_update(struct rte_eth_dev *dev, 1009 int wait_to_complete __rte_unused) 1010 { 1011 int ret; 1012 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1013 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1014 struct rte_eth_link link, old; 1015 struct dpni_link_state state = {0}; 1016 1017 PMD_INIT_FUNC_TRACE(); 1018 1019 if (dpni == NULL) { 1020 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1021 return 0; 1022 } 1023 memset(&old, 0, sizeof(old)); 1024 dpaa2_dev_atomic_read_link_status(dev, &old); 1025 1026 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1027 if (ret < 0) { 1028 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); 1029 return -1; 1030 } 1031 1032 if ((old.link_status == state.up) && (old.link_speed == state.rate)) { 1033 RTE_LOG(DEBUG, PMD, "No change in status\n"); 1034 return -1; 1035 } 1036 1037 memset(&link, 0, sizeof(struct rte_eth_link)); 1038 link.link_status = state.up; 1039 link.link_speed = state.rate; 1040 1041 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) 1042 link.link_duplex = ETH_LINK_HALF_DUPLEX; 1043 else 1044 link.link_duplex = ETH_LINK_FULL_DUPLEX; 1045 1046 dpaa2_dev_atomic_write_link_status(dev, &link); 1047 1048 if (link.link_status) 1049 PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id); 1050 else 1051 PMD_DRV_LOG(INFO, "Port %d Link is Down\n", dev->data->port_id); 1052 return 0; 1053 } 1054 1055 /** 1056 * Toggle the DPNI to enable, if not already enabled. 1057 * This is not strictly PHY up/down - it is more of logical toggling. 1058 */ 1059 static int 1060 dpaa2_dev_set_link_up(struct rte_eth_dev *dev) 1061 { 1062 int ret = -EINVAL; 1063 struct dpaa2_dev_priv *priv; 1064 struct fsl_mc_io *dpni; 1065 int en = 0; 1066 1067 PMD_INIT_FUNC_TRACE(); 1068 1069 priv = dev->data->dev_private; 1070 dpni = (struct fsl_mc_io *)priv->hw; 1071 1072 if (dpni == NULL) { 1073 RTE_LOG(ERR, PMD, "DPNI is NULL\n"); 1074 return ret; 1075 } 1076 1077 /* Check if DPNI is currently enabled */ 1078 ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); 1079 if (ret) { 1080 /* Unable to obtain dpni status; Not continuing */ 1081 PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret); 1082 return -EINVAL; 1083 } 1084 1085 /* Enable link if not already enabled */ 1086 if (!en) { 1087 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 1088 if (ret) { 1089 PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret); 1090 return -EINVAL; 1091 } 1092 } 1093 /* changing tx burst function to start enqueues */ 1094 dev->tx_pkt_burst = dpaa2_dev_tx; 1095 dev->data->dev_link.link_status = 1; 1096 1097 PMD_DRV_LOG(INFO, "Port %d Link UP successful", dev->data->port_id); 1098 return ret; 1099 } 1100 1101 /** 1102 * Toggle the DPNI to disable, if not already disabled. 1103 * This is not strictly PHY up/down - it is more of logical toggling. 1104 */ 1105 static int 1106 dpaa2_dev_set_link_down(struct rte_eth_dev *dev) 1107 { 1108 int ret = -EINVAL; 1109 struct dpaa2_dev_priv *priv; 1110 struct fsl_mc_io *dpni; 1111 int dpni_enabled = 0; 1112 int retries = 10; 1113 1114 PMD_INIT_FUNC_TRACE(); 1115 1116 priv = dev->data->dev_private; 1117 dpni = (struct fsl_mc_io *)priv->hw; 1118 1119 if (dpni == NULL) { 1120 RTE_LOG(ERR, PMD, "Device has not yet been configured\n"); 1121 return ret; 1122 } 1123 1124 /*changing tx burst function to avoid any more enqueues */ 1125 dev->tx_pkt_burst = dummy_dev_tx; 1126 1127 /* Loop while dpni_disable() attempts to drain the egress FQs 1128 * and confirm them back to us. 1129 */ 1130 do { 1131 ret = dpni_disable(dpni, 0, priv->token); 1132 if (ret) { 1133 PMD_DRV_LOG(ERR, "dpni disable failed (%d)", ret); 1134 return ret; 1135 } 1136 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); 1137 if (ret) { 1138 PMD_DRV_LOG(ERR, "dpni_is_enabled failed (%d)", ret); 1139 return ret; 1140 } 1141 if (dpni_enabled) 1142 /* Allow the MC some slack */ 1143 rte_delay_us(100 * 1000); 1144 } while (dpni_enabled && --retries); 1145 1146 if (!retries) { 1147 PMD_DRV_LOG(WARNING, "Retry count exceeded disabling DPNI\n"); 1148 /* todo- we may have to manually cleanup queues. 1149 */ 1150 } else { 1151 PMD_DRV_LOG(INFO, "Port %d Link DOWN successful", 1152 dev->data->port_id); 1153 } 1154 1155 dev->data->dev_link.link_status = 0; 1156 1157 return ret; 1158 } 1159 1160 static int 1161 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1162 { 1163 int ret = -EINVAL; 1164 struct dpaa2_dev_priv *priv; 1165 struct fsl_mc_io *dpni; 1166 struct dpni_link_state state = {0}; 1167 1168 PMD_INIT_FUNC_TRACE(); 1169 1170 priv = dev->data->dev_private; 1171 dpni = (struct fsl_mc_io *)priv->hw; 1172 1173 if (dpni == NULL || fc_conf == NULL) { 1174 RTE_LOG(ERR, PMD, "device not configured\n"); 1175 return ret; 1176 } 1177 1178 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1179 if (ret) { 1180 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); 1181 return ret; 1182 } 1183 1184 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); 1185 if (state.options & DPNI_LINK_OPT_PAUSE) { 1186 /* DPNI_LINK_OPT_PAUSE set 1187 * if ASYM_PAUSE not set, 1188 * RX Side flow control (handle received Pause frame) 1189 * TX side flow control (send Pause frame) 1190 * if ASYM_PAUSE set, 1191 * RX Side flow control (handle received Pause frame) 1192 * No TX side flow control (send Pause frame disabled) 1193 */ 1194 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) 1195 fc_conf->mode = RTE_FC_FULL; 1196 else 1197 fc_conf->mode = RTE_FC_RX_PAUSE; 1198 } else { 1199 /* DPNI_LINK_OPT_PAUSE not set 1200 * if ASYM_PAUSE set, 1201 * TX side flow control (send Pause frame) 1202 * No RX side flow control (No action on pause frame rx) 1203 * if ASYM_PAUSE not set, 1204 * Flow control disabled 1205 */ 1206 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) 1207 fc_conf->mode = RTE_FC_TX_PAUSE; 1208 else 1209 fc_conf->mode = RTE_FC_NONE; 1210 } 1211 1212 return ret; 1213 } 1214 1215 static int 1216 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1217 { 1218 int ret = -EINVAL; 1219 struct dpaa2_dev_priv *priv; 1220 struct fsl_mc_io *dpni; 1221 struct dpni_link_state state = {0}; 1222 struct dpni_link_cfg cfg = {0}; 1223 1224 PMD_INIT_FUNC_TRACE(); 1225 1226 priv = dev->data->dev_private; 1227 dpni = (struct fsl_mc_io *)priv->hw; 1228 1229 if (dpni == NULL) { 1230 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1231 return ret; 1232 } 1233 1234 /* It is necessary to obtain the current state before setting fc_conf 1235 * as MC would return error in case rate, autoneg or duplex values are 1236 * different. 1237 */ 1238 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1239 if (ret) { 1240 RTE_LOG(ERR, PMD, "Unable to get link state (err=%d)\n", ret); 1241 return -1; 1242 } 1243 1244 /* Disable link before setting configuration */ 1245 dpaa2_dev_set_link_down(dev); 1246 1247 /* Based on fc_conf, update cfg */ 1248 cfg.rate = state.rate; 1249 cfg.options = state.options; 1250 1251 /* update cfg with fc_conf */ 1252 switch (fc_conf->mode) { 1253 case RTE_FC_FULL: 1254 /* Full flow control; 1255 * OPT_PAUSE set, ASYM_PAUSE not set 1256 */ 1257 cfg.options |= DPNI_LINK_OPT_PAUSE; 1258 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1259 break; 1260 case RTE_FC_TX_PAUSE: 1261 /* Enable RX flow control 1262 * OPT_PAUSE not set; 1263 * ASYM_PAUSE set; 1264 */ 1265 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1266 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1267 break; 1268 case RTE_FC_RX_PAUSE: 1269 /* Enable TX Flow control 1270 * OPT_PAUSE set 1271 * ASYM_PAUSE set 1272 */ 1273 cfg.options |= DPNI_LINK_OPT_PAUSE; 1274 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1275 break; 1276 case RTE_FC_NONE: 1277 /* Disable Flow control 1278 * OPT_PAUSE not set 1279 * ASYM_PAUSE not set 1280 */ 1281 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1282 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1283 break; 1284 default: 1285 RTE_LOG(ERR, PMD, "Incorrect Flow control flag (%d)\n", 1286 fc_conf->mode); 1287 return -1; 1288 } 1289 1290 ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); 1291 if (ret) 1292 RTE_LOG(ERR, PMD, 1293 "Unable to set Link configuration (err=%d)\n", 1294 ret); 1295 1296 /* Enable link */ 1297 dpaa2_dev_set_link_up(dev); 1298 1299 return ret; 1300 } 1301 1302 static struct eth_dev_ops dpaa2_ethdev_ops = { 1303 .dev_configure = dpaa2_eth_dev_configure, 1304 .dev_start = dpaa2_dev_start, 1305 .dev_stop = dpaa2_dev_stop, 1306 .dev_close = dpaa2_dev_close, 1307 .promiscuous_enable = dpaa2_dev_promiscuous_enable, 1308 .promiscuous_disable = dpaa2_dev_promiscuous_disable, 1309 .allmulticast_enable = dpaa2_dev_allmulticast_enable, 1310 .allmulticast_disable = dpaa2_dev_allmulticast_disable, 1311 .dev_set_link_up = dpaa2_dev_set_link_up, 1312 .dev_set_link_down = dpaa2_dev_set_link_down, 1313 .link_update = dpaa2_dev_link_update, 1314 .stats_get = dpaa2_dev_stats_get, 1315 .stats_reset = dpaa2_dev_stats_reset, 1316 .fw_version_get = dpaa2_fw_version_get, 1317 .dev_infos_get = dpaa2_dev_info_get, 1318 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, 1319 .mtu_set = dpaa2_dev_mtu_set, 1320 .vlan_filter_set = dpaa2_vlan_filter_set, 1321 .vlan_offload_set = dpaa2_vlan_offload_set, 1322 .rx_queue_setup = dpaa2_dev_rx_queue_setup, 1323 .rx_queue_release = dpaa2_dev_rx_queue_release, 1324 .tx_queue_setup = dpaa2_dev_tx_queue_setup, 1325 .tx_queue_release = dpaa2_dev_tx_queue_release, 1326 .flow_ctrl_get = dpaa2_flow_ctrl_get, 1327 .flow_ctrl_set = dpaa2_flow_ctrl_set, 1328 .mac_addr_add = dpaa2_dev_add_mac_addr, 1329 .mac_addr_remove = dpaa2_dev_remove_mac_addr, 1330 .mac_addr_set = dpaa2_dev_set_mac_addr, 1331 }; 1332 1333 static int 1334 dpaa2_dev_init(struct rte_eth_dev *eth_dev) 1335 { 1336 struct rte_device *dev = eth_dev->device; 1337 struct rte_dpaa2_device *dpaa2_dev; 1338 struct fsl_mc_io *dpni_dev; 1339 struct dpni_attr attr; 1340 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1341 struct dpni_buffer_layout layout; 1342 int ret, hw_id; 1343 1344 PMD_INIT_FUNC_TRACE(); 1345 1346 /* For secondary processes, the primary has done all the work */ 1347 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1348 return 0; 1349 1350 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 1351 1352 hw_id = dpaa2_dev->object_id; 1353 1354 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); 1355 if (!dpni_dev) { 1356 PMD_INIT_LOG(ERR, "malloc failed for dpni device\n"); 1357 return -1; 1358 } 1359 1360 dpni_dev->regs = rte_mcp_ptr_list[0]; 1361 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); 1362 if (ret) { 1363 PMD_INIT_LOG(ERR, 1364 "Failure in opening dpni@%d with err code %d\n", 1365 hw_id, ret); 1366 rte_free(dpni_dev); 1367 return -1; 1368 } 1369 1370 /* Clean the device first */ 1371 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); 1372 if (ret) { 1373 PMD_INIT_LOG(ERR, 1374 "Failure cleaning dpni@%d with err code %d\n", 1375 hw_id, ret); 1376 goto init_err; 1377 } 1378 1379 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); 1380 if (ret) { 1381 PMD_INIT_LOG(ERR, 1382 "Failure in get dpni@%d attribute, err code %d\n", 1383 hw_id, ret); 1384 goto init_err; 1385 } 1386 1387 priv->num_rx_tc = attr.num_rx_tcs; 1388 1389 /* Resetting the "num_rx_queues" to equal number of queues in first TC 1390 * as only one TC is supported on Rx Side. Once Multiple TCs will be 1391 * in use for Rx processing then this will be changed or removed. 1392 */ 1393 priv->nb_rx_queues = attr.num_queues; 1394 1395 /* Using number of TX queues as number of TX TCs */ 1396 priv->nb_tx_queues = attr.num_tx_tcs; 1397 1398 PMD_DRV_LOG(DEBUG, "RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", 1399 priv->num_tc, priv->nb_rx_queues, priv->nb_tx_queues); 1400 1401 priv->hw = dpni_dev; 1402 priv->hw_id = hw_id; 1403 priv->options = attr.options; 1404 priv->max_mac_filters = attr.mac_filter_entries; 1405 priv->max_vlan_filters = attr.vlan_filter_entries; 1406 priv->flags = 0; 1407 1408 /* Allocate memory for hardware structure for queues */ 1409 ret = dpaa2_alloc_rx_tx_queues(eth_dev); 1410 if (ret) { 1411 PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n"); 1412 goto init_err; 1413 } 1414 1415 /* Allocate memory for storing MAC addresses */ 1416 eth_dev->data->mac_addrs = rte_zmalloc("dpni", 1417 ETHER_ADDR_LEN * attr.mac_filter_entries, 0); 1418 if (eth_dev->data->mac_addrs == NULL) { 1419 PMD_INIT_LOG(ERR, 1420 "Failed to allocate %d bytes needed to store MAC addresses", 1421 ETHER_ADDR_LEN * attr.mac_filter_entries); 1422 ret = -ENOMEM; 1423 goto init_err; 1424 } 1425 1426 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 1427 priv->token, 1428 (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes)); 1429 if (ret) { 1430 PMD_INIT_LOG(ERR, "DPNI get mac address failed:Err Code = %d\n", 1431 ret); 1432 goto init_err; 1433 } 1434 1435 /* ... tx buffer layout ... */ 1436 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1437 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1438 layout.pass_frame_status = 1; 1439 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1440 DPNI_QUEUE_TX, &layout); 1441 if (ret) { 1442 PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer layout", 1443 ret); 1444 goto init_err; 1445 } 1446 1447 /* ... tx-conf and error buffer layout ... */ 1448 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1449 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1450 layout.pass_frame_status = 1; 1451 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1452 DPNI_QUEUE_TX_CONFIRM, &layout); 1453 if (ret) { 1454 PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer layout", 1455 ret); 1456 goto init_err; 1457 } 1458 1459 eth_dev->dev_ops = &dpaa2_ethdev_ops; 1460 1461 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 1462 eth_dev->tx_pkt_burst = dpaa2_dev_tx; 1463 rte_fslmc_vfio_dmamap(); 1464 1465 return 0; 1466 init_err: 1467 dpaa2_dev_uninit(eth_dev); 1468 return ret; 1469 } 1470 1471 static int 1472 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) 1473 { 1474 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1475 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1476 int i, ret; 1477 struct dpaa2_queue *dpaa2_q; 1478 1479 PMD_INIT_FUNC_TRACE(); 1480 1481 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1482 return 0; 1483 1484 if (!dpni) { 1485 PMD_INIT_LOG(WARNING, "Already closed or not started"); 1486 return -1; 1487 } 1488 1489 dpaa2_dev_close(eth_dev); 1490 1491 if (priv->rx_vq[0]) { 1492 /* cleaning up queue storage */ 1493 for (i = 0; i < priv->nb_rx_queues; i++) { 1494 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 1495 if (dpaa2_q->q_storage) 1496 rte_free(dpaa2_q->q_storage); 1497 } 1498 /*free the all queue memory */ 1499 rte_free(priv->rx_vq[0]); 1500 priv->rx_vq[0] = NULL; 1501 } 1502 1503 /* free memory for storing MAC addresses */ 1504 if (eth_dev->data->mac_addrs) { 1505 rte_free(eth_dev->data->mac_addrs); 1506 eth_dev->data->mac_addrs = NULL; 1507 } 1508 1509 /* Close the device at underlying layer*/ 1510 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); 1511 if (ret) { 1512 PMD_INIT_LOG(ERR, 1513 "Failure closing dpni device with err code %d\n", 1514 ret); 1515 } 1516 1517 /* Free the allocated memory for ethernet private data and dpni*/ 1518 priv->hw = NULL; 1519 rte_free(dpni); 1520 1521 eth_dev->dev_ops = NULL; 1522 eth_dev->rx_pkt_burst = NULL; 1523 eth_dev->tx_pkt_burst = NULL; 1524 1525 return 0; 1526 } 1527 1528 static int 1529 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, 1530 struct rte_dpaa2_device *dpaa2_dev) 1531 { 1532 struct rte_eth_dev *eth_dev; 1533 int diag; 1534 1535 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1536 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); 1537 if (!eth_dev) 1538 return -ENODEV; 1539 eth_dev->data->dev_private = rte_zmalloc( 1540 "ethdev private structure", 1541 sizeof(struct dpaa2_dev_priv), 1542 RTE_CACHE_LINE_SIZE); 1543 if (eth_dev->data->dev_private == NULL) { 1544 PMD_INIT_LOG(CRIT, "Cannot allocate memzone for" 1545 " private port data\n"); 1546 rte_eth_dev_release_port(eth_dev); 1547 return -ENOMEM; 1548 } 1549 } else { 1550 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); 1551 if (!eth_dev) 1552 return -ENODEV; 1553 } 1554 1555 eth_dev->device = &dpaa2_dev->device; 1556 eth_dev->device->driver = &dpaa2_drv->driver; 1557 1558 dpaa2_dev->eth_dev = eth_dev; 1559 eth_dev->data->rx_mbuf_alloc_failed = 0; 1560 1561 /* Invoke PMD device initialization function */ 1562 diag = dpaa2_dev_init(eth_dev); 1563 if (diag == 0) 1564 return 0; 1565 1566 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1567 rte_free(eth_dev->data->dev_private); 1568 rte_eth_dev_release_port(eth_dev); 1569 return diag; 1570 } 1571 1572 static int 1573 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) 1574 { 1575 struct rte_eth_dev *eth_dev; 1576 1577 eth_dev = dpaa2_dev->eth_dev; 1578 dpaa2_dev_uninit(eth_dev); 1579 1580 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1581 rte_free(eth_dev->data->dev_private); 1582 rte_eth_dev_release_port(eth_dev); 1583 1584 return 0; 1585 } 1586 1587 static struct rte_dpaa2_driver rte_dpaa2_pmd = { 1588 .drv_type = DPAA2_ETH, 1589 .probe = rte_dpaa2_probe, 1590 .remove = rte_dpaa2_remove, 1591 }; 1592 1593 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); 1594