1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 5 * Copyright (c) 2016 NXP. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <time.h> 35 #include <net/if.h> 36 37 #include <rte_mbuf.h> 38 #include <rte_ethdev.h> 39 #include <rte_malloc.h> 40 #include <rte_memcpy.h> 41 #include <rte_string_fns.h> 42 #include <rte_cycles.h> 43 #include <rte_kvargs.h> 44 #include <rte_dev.h> 45 #include <rte_ethdev.h> 46 #include <rte_fslmc.h> 47 48 #include <fslmc_logs.h> 49 #include <fslmc_vfio.h> 50 #include <dpaa2_hw_pvt.h> 51 #include <dpaa2_hw_mempool.h> 52 #include <dpaa2_hw_dpio.h> 53 #include <mc/fsl_dpmng.h> 54 #include "dpaa2_ethdev.h" 55 56 static struct rte_dpaa2_driver rte_dpaa2_pmd; 57 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev); 58 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); 59 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); 60 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 61 62 /** 63 * Atomically reads the link status information from global 64 * structure rte_eth_dev. 65 * 66 * @param dev 67 * - Pointer to the structure rte_eth_dev to read from. 68 * - Pointer to the buffer to be saved with the link status. 69 * 70 * @return 71 * - On success, zero. 72 * - On failure, negative value. 73 */ 74 static inline int 75 dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev, 76 struct rte_eth_link *link) 77 { 78 struct rte_eth_link *dst = link; 79 struct rte_eth_link *src = &dev->data->dev_link; 80 81 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 82 *(uint64_t *)src) == 0) 83 return -1; 84 85 return 0; 86 } 87 88 /** 89 * Atomically writes the link status information into global 90 * structure rte_eth_dev. 91 * 92 * @param dev 93 * - Pointer to the structure rte_eth_dev to read from. 94 * - Pointer to the buffer to be saved with the link status. 95 * 96 * @return 97 * - On success, zero. 98 * - On failure, negative value. 99 */ 100 static inline int 101 dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev, 102 struct rte_eth_link *link) 103 { 104 struct rte_eth_link *dst = &dev->data->dev_link; 105 struct rte_eth_link *src = link; 106 107 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 108 *(uint64_t *)src) == 0) 109 return -1; 110 111 return 0; 112 } 113 114 static int 115 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 116 { 117 int ret; 118 struct dpaa2_dev_priv *priv = dev->data->dev_private; 119 struct fsl_mc_io *dpni = priv->hw; 120 121 PMD_INIT_FUNC_TRACE(); 122 123 if (dpni == NULL) { 124 RTE_LOG(ERR, PMD, "dpni is NULL"); 125 return -1; 126 } 127 128 if (on) 129 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, 130 priv->token, vlan_id); 131 else 132 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, 133 priv->token, vlan_id); 134 135 if (ret < 0) 136 PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d", 137 ret, vlan_id, priv->hw_id); 138 139 return ret; 140 } 141 142 static void 143 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) 144 { 145 struct dpaa2_dev_priv *priv = dev->data->dev_private; 146 struct fsl_mc_io *dpni = priv->hw; 147 int ret; 148 149 PMD_INIT_FUNC_TRACE(); 150 151 if (mask & ETH_VLAN_FILTER_MASK) { 152 if (dev->data->dev_conf.rxmode.hw_vlan_filter) 153 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 154 priv->token, true); 155 else 156 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 157 priv->token, false); 158 if (ret < 0) 159 RTE_LOG(ERR, PMD, "Unable to set vlan filter ret = %d", 160 ret); 161 } 162 } 163 164 static int 165 dpaa2_fw_version_get(struct rte_eth_dev *dev, 166 char *fw_version, 167 size_t fw_size) 168 { 169 int ret; 170 struct dpaa2_dev_priv *priv = dev->data->dev_private; 171 struct fsl_mc_io *dpni = priv->hw; 172 struct mc_soc_version mc_plat_info = {0}; 173 struct mc_version mc_ver_info = {0}; 174 175 PMD_INIT_FUNC_TRACE(); 176 177 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 178 RTE_LOG(WARNING, PMD, "\tmc_get_soc_version failed\n"); 179 180 if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) 181 RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n"); 182 183 ret = snprintf(fw_version, fw_size, 184 "%x-%d.%d.%d", 185 mc_plat_info.svr, 186 mc_ver_info.major, 187 mc_ver_info.minor, 188 mc_ver_info.revision); 189 190 ret += 1; /* add the size of '\0' */ 191 if (fw_size < (uint32_t)ret) 192 return ret; 193 else 194 return 0; 195 } 196 197 static void 198 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 199 { 200 struct dpaa2_dev_priv *priv = dev->data->dev_private; 201 202 PMD_INIT_FUNC_TRACE(); 203 204 dev_info->if_index = priv->hw_id; 205 206 dev_info->max_mac_addrs = priv->max_mac_filters; 207 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; 208 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; 209 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; 210 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; 211 dev_info->rx_offload_capa = 212 DEV_RX_OFFLOAD_IPV4_CKSUM | 213 DEV_RX_OFFLOAD_UDP_CKSUM | 214 DEV_RX_OFFLOAD_TCP_CKSUM | 215 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; 216 dev_info->tx_offload_capa = 217 DEV_TX_OFFLOAD_IPV4_CKSUM | 218 DEV_TX_OFFLOAD_UDP_CKSUM | 219 DEV_TX_OFFLOAD_TCP_CKSUM | 220 DEV_TX_OFFLOAD_SCTP_CKSUM | 221 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 222 dev_info->speed_capa = ETH_LINK_SPEED_1G | 223 ETH_LINK_SPEED_2_5G | 224 ETH_LINK_SPEED_10G; 225 } 226 227 static int 228 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) 229 { 230 struct dpaa2_dev_priv *priv = dev->data->dev_private; 231 uint16_t dist_idx; 232 uint32_t vq_id; 233 struct dpaa2_queue *mc_q, *mcq; 234 uint32_t tot_queues; 235 int i; 236 struct dpaa2_queue *dpaa2_q; 237 238 PMD_INIT_FUNC_TRACE(); 239 240 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; 241 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, 242 RTE_CACHE_LINE_SIZE); 243 if (!mc_q) { 244 PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n"); 245 return -1; 246 } 247 248 for (i = 0; i < priv->nb_rx_queues; i++) { 249 mc_q->dev = dev; 250 priv->rx_vq[i] = mc_q++; 251 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 252 dpaa2_q->q_storage = rte_malloc("dq_storage", 253 sizeof(struct queue_storage_info_t), 254 RTE_CACHE_LINE_SIZE); 255 if (!dpaa2_q->q_storage) 256 goto fail; 257 258 memset(dpaa2_q->q_storage, 0, 259 sizeof(struct queue_storage_info_t)); 260 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) 261 goto fail; 262 } 263 264 for (i = 0; i < priv->nb_tx_queues; i++) { 265 mc_q->dev = dev; 266 mc_q->flow_id = 0xffff; 267 priv->tx_vq[i] = mc_q++; 268 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 269 dpaa2_q->cscn = rte_malloc(NULL, 270 sizeof(struct qbman_result), 16); 271 if (!dpaa2_q->cscn) 272 goto fail_tx; 273 } 274 275 vq_id = 0; 276 for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { 277 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; 278 mcq->tc_index = DPAA2_DEF_TC; 279 mcq->flow_id = dist_idx; 280 vq_id++; 281 } 282 283 return 0; 284 fail_tx: 285 i -= 1; 286 while (i >= 0) { 287 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 288 rte_free(dpaa2_q->cscn); 289 priv->tx_vq[i--] = NULL; 290 } 291 i = priv->nb_rx_queues; 292 fail: 293 i -= 1; 294 mc_q = priv->rx_vq[0]; 295 while (i >= 0) { 296 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 297 dpaa2_free_dq_storage(dpaa2_q->q_storage); 298 rte_free(dpaa2_q->q_storage); 299 priv->rx_vq[i--] = NULL; 300 } 301 rte_free(mc_q); 302 return -1; 303 } 304 305 static int 306 dpaa2_eth_dev_configure(struct rte_eth_dev *dev) 307 { 308 struct rte_eth_dev_data *data = dev->data; 309 struct rte_eth_conf *eth_conf = &data->dev_conf; 310 int ret; 311 312 PMD_INIT_FUNC_TRACE(); 313 314 if (eth_conf->rxmode.jumbo_frame == 1) { 315 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { 316 ret = dpaa2_dev_mtu_set(dev, 317 eth_conf->rxmode.max_rx_pkt_len); 318 if (ret) { 319 PMD_INIT_LOG(ERR, 320 "unable to set mtu. check config\n"); 321 return ret; 322 } 323 } else { 324 return -1; 325 } 326 } 327 328 /* Check for correct configuration */ 329 if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS && 330 data->nb_rx_queues > 1) { 331 PMD_INIT_LOG(ERR, "Distribution is not enabled, " 332 "but Rx queues more than 1\n"); 333 return -1; 334 } 335 336 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { 337 /* Return in case number of Rx queues is 1 */ 338 if (data->nb_rx_queues == 1) 339 return 0; 340 ret = dpaa2_setup_flow_dist(dev, 341 eth_conf->rx_adv_conf.rss_conf.rss_hf); 342 if (ret) { 343 PMD_INIT_LOG(ERR, "unable to set flow distribution." 344 "please check queue config\n"); 345 return ret; 346 } 347 } 348 return 0; 349 } 350 351 /* Function to setup RX flow information. It contains traffic class ID, 352 * flow ID, destination configuration etc. 353 */ 354 static int 355 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, 356 uint16_t rx_queue_id, 357 uint16_t nb_rx_desc __rte_unused, 358 unsigned int socket_id __rte_unused, 359 const struct rte_eth_rxconf *rx_conf __rte_unused, 360 struct rte_mempool *mb_pool) 361 { 362 struct dpaa2_dev_priv *priv = dev->data->dev_private; 363 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 364 struct mc_soc_version mc_plat_info = {0}; 365 struct dpaa2_queue *dpaa2_q; 366 struct dpni_queue cfg; 367 uint8_t options = 0; 368 uint8_t flow_id; 369 uint32_t bpid; 370 int ret; 371 372 PMD_INIT_FUNC_TRACE(); 373 374 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p", 375 dev, rx_queue_id, mb_pool, rx_conf); 376 377 if (!priv->bp_list || priv->bp_list->mp != mb_pool) { 378 bpid = mempool_to_bpid(mb_pool); 379 ret = dpaa2_attach_bp_list(priv, 380 rte_dpaa2_bpid_info[bpid].bp_list); 381 if (ret) 382 return ret; 383 } 384 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 385 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ 386 387 /*Get the flow id from given VQ id*/ 388 flow_id = rx_queue_id % priv->nb_rx_queues; 389 memset(&cfg, 0, sizeof(struct dpni_queue)); 390 391 options = options | DPNI_QUEUE_OPT_USER_CTX; 392 cfg.user_context = (uint64_t)(dpaa2_q); 393 394 /*if ls2088 or rev2 device, enable the stashing */ 395 396 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 397 PMD_INIT_LOG(ERR, "\tmc_get_soc_version failed\n"); 398 399 if ((mc_plat_info.svr & 0xffff0000) != SVR_LS2080A) { 400 options |= DPNI_QUEUE_OPT_FLC; 401 cfg.flc.stash_control = true; 402 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0; 403 /* 00 00 00 - last 6 bit represent annotation, context stashing, 404 * data stashing setting 01 01 00 (0x14) to enable 405 * 1 line data, 1 line annotation 406 */ 407 cfg.flc.value |= 0x14; 408 } 409 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, 410 dpaa2_q->tc_index, flow_id, options, &cfg); 411 if (ret) { 412 PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret); 413 return -1; 414 } 415 416 if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { 417 struct dpni_taildrop taildrop; 418 419 taildrop.enable = 1; 420 /*enabling per rx queue congestion control */ 421 taildrop.threshold = CONG_THRESHOLD_RX_Q; 422 taildrop.units = DPNI_CONGESTION_UNIT_BYTES; 423 PMD_INIT_LOG(DEBUG, "Enabling Early Drop on queue = %d", 424 rx_queue_id); 425 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 426 DPNI_CP_QUEUE, DPNI_QUEUE_RX, 427 dpaa2_q->tc_index, flow_id, &taildrop); 428 if (ret) { 429 PMD_INIT_LOG(ERR, "Error in setting the rx flow" 430 " err : = %d\n", ret); 431 return -1; 432 } 433 } 434 435 dev->data->rx_queues[rx_queue_id] = dpaa2_q; 436 return 0; 437 } 438 439 static int 440 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, 441 uint16_t tx_queue_id, 442 uint16_t nb_tx_desc __rte_unused, 443 unsigned int socket_id __rte_unused, 444 const struct rte_eth_txconf *tx_conf __rte_unused) 445 { 446 struct dpaa2_dev_priv *priv = dev->data->dev_private; 447 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) 448 priv->tx_vq[tx_queue_id]; 449 struct fsl_mc_io *dpni = priv->hw; 450 struct dpni_queue tx_conf_cfg; 451 struct dpni_queue tx_flow_cfg; 452 uint8_t options = 0, flow_id; 453 uint32_t tc_id; 454 int ret; 455 456 PMD_INIT_FUNC_TRACE(); 457 458 /* Return if queue already configured */ 459 if (dpaa2_q->flow_id != 0xffff) 460 return 0; 461 462 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); 463 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); 464 465 tc_id = tx_queue_id; 466 flow_id = 0; 467 468 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, 469 tc_id, flow_id, options, &tx_flow_cfg); 470 if (ret) { 471 PMD_INIT_LOG(ERR, "Error in setting the tx flow: " 472 "tc_id=%d, flow =%d ErrorCode = %x\n", 473 tc_id, flow_id, -ret); 474 return -1; 475 } 476 477 dpaa2_q->flow_id = flow_id; 478 479 if (tx_queue_id == 0) { 480 /*Set tx-conf and error configuration*/ 481 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 482 priv->token, 483 DPNI_CONF_DISABLE); 484 if (ret) { 485 PMD_INIT_LOG(ERR, "Error in set tx conf mode settings" 486 " ErrorCode = %x", ret); 487 return -1; 488 } 489 } 490 dpaa2_q->tc_index = tc_id; 491 492 if (priv->flags & DPAA2_TX_CGR_SUPPORT) { 493 struct dpni_congestion_notification_cfg cong_notif_cfg; 494 495 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; 496 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; 497 /* Notify that the queue is not congested when the data in 498 * the queue is below this thershold. 499 */ 500 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; 501 cong_notif_cfg.message_ctx = 0; 502 cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn; 503 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; 504 cong_notif_cfg.notification_mode = 505 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | 506 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | 507 DPNI_CONG_OPT_COHERENT_WRITE; 508 509 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, 510 priv->token, 511 DPNI_QUEUE_TX, 512 tc_id, 513 &cong_notif_cfg); 514 if (ret) { 515 PMD_INIT_LOG(ERR, 516 "Error in setting tx congestion notification: = %d", 517 -ret); 518 return -ret; 519 } 520 } 521 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 522 return 0; 523 } 524 525 static void 526 dpaa2_dev_rx_queue_release(void *q __rte_unused) 527 { 528 PMD_INIT_FUNC_TRACE(); 529 } 530 531 static void 532 dpaa2_dev_tx_queue_release(void *q __rte_unused) 533 { 534 PMD_INIT_FUNC_TRACE(); 535 } 536 537 static const uint32_t * 538 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) 539 { 540 static const uint32_t ptypes[] = { 541 /*todo -= add more types */ 542 RTE_PTYPE_L2_ETHER, 543 RTE_PTYPE_L3_IPV4, 544 RTE_PTYPE_L3_IPV4_EXT, 545 RTE_PTYPE_L3_IPV6, 546 RTE_PTYPE_L3_IPV6_EXT, 547 RTE_PTYPE_L4_TCP, 548 RTE_PTYPE_L4_UDP, 549 RTE_PTYPE_L4_SCTP, 550 RTE_PTYPE_L4_ICMP, 551 RTE_PTYPE_UNKNOWN 552 }; 553 554 if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx) 555 return ptypes; 556 return NULL; 557 } 558 559 static int 560 dpaa2_dev_start(struct rte_eth_dev *dev) 561 { 562 struct rte_eth_dev_data *data = dev->data; 563 struct dpaa2_dev_priv *priv = data->dev_private; 564 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 565 struct dpni_queue cfg; 566 struct dpni_error_cfg err_cfg; 567 uint16_t qdid; 568 struct dpni_queue_id qid; 569 struct dpaa2_queue *dpaa2_q; 570 int ret, i; 571 572 PMD_INIT_FUNC_TRACE(); 573 574 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 575 if (ret) { 576 PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n", 577 ret, priv->hw_id); 578 return ret; 579 } 580 581 /* Power up the phy. Needed to make the link go Up */ 582 dpaa2_dev_set_link_up(dev); 583 584 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, 585 DPNI_QUEUE_TX, &qdid); 586 if (ret) { 587 PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret); 588 return ret; 589 } 590 priv->qdid = qdid; 591 592 for (i = 0; i < data->nb_rx_queues; i++) { 593 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; 594 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 595 DPNI_QUEUE_RX, dpaa2_q->tc_index, 596 dpaa2_q->flow_id, &cfg, &qid); 597 if (ret) { 598 PMD_INIT_LOG(ERR, "Error to get flow " 599 "information Error code = %d\n", ret); 600 return ret; 601 } 602 dpaa2_q->fqid = qid.fqid; 603 } 604 605 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 606 DPNI_OFF_RX_L3_CSUM, true); 607 if (ret) { 608 PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret); 609 return ret; 610 } 611 612 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 613 DPNI_OFF_RX_L4_CSUM, true); 614 if (ret) { 615 PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret); 616 return ret; 617 } 618 619 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 620 DPNI_OFF_TX_L3_CSUM, true); 621 if (ret) { 622 PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret); 623 return ret; 624 } 625 626 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 627 DPNI_OFF_TX_L4_CSUM, true); 628 if (ret) { 629 PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret); 630 return ret; 631 } 632 633 /*checksum errors, send them to normal path and set it in annotation */ 634 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; 635 636 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; 637 err_cfg.set_frame_annotation = true; 638 639 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, 640 priv->token, &err_cfg); 641 if (ret) { 642 PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:" 643 "code = %d\n", ret); 644 return ret; 645 } 646 /* VLAN Offload Settings */ 647 if (priv->max_vlan_filters) 648 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); 649 650 return 0; 651 } 652 653 /** 654 * This routine disables all traffic on the adapter by issuing a 655 * global reset on the MAC. 656 */ 657 static void 658 dpaa2_dev_stop(struct rte_eth_dev *dev) 659 { 660 struct dpaa2_dev_priv *priv = dev->data->dev_private; 661 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 662 int ret; 663 struct rte_eth_link link; 664 665 PMD_INIT_FUNC_TRACE(); 666 667 dpaa2_dev_set_link_down(dev); 668 669 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); 670 if (ret) { 671 PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n", 672 ret, priv->hw_id); 673 return; 674 } 675 676 /* clear the recorded link status */ 677 memset(&link, 0, sizeof(link)); 678 dpaa2_dev_atomic_write_link_status(dev, &link); 679 } 680 681 static void 682 dpaa2_dev_close(struct rte_eth_dev *dev) 683 { 684 struct rte_eth_dev_data *data = dev->data; 685 struct dpaa2_dev_priv *priv = dev->data->dev_private; 686 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 687 int i, ret; 688 struct rte_eth_link link; 689 struct dpaa2_queue *dpaa2_q; 690 691 PMD_INIT_FUNC_TRACE(); 692 693 for (i = 0; i < data->nb_tx_queues; i++) { 694 dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i]; 695 if (!dpaa2_q->cscn) { 696 rte_free(dpaa2_q->cscn); 697 dpaa2_q->cscn = NULL; 698 } 699 } 700 701 /* Clean the device first */ 702 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); 703 if (ret) { 704 PMD_INIT_LOG(ERR, "Failure cleaning dpni device with" 705 " error code %d\n", ret); 706 return; 707 } 708 709 memset(&link, 0, sizeof(link)); 710 dpaa2_dev_atomic_write_link_status(dev, &link); 711 } 712 713 static void 714 dpaa2_dev_promiscuous_enable( 715 struct rte_eth_dev *dev) 716 { 717 int ret; 718 struct dpaa2_dev_priv *priv = dev->data->dev_private; 719 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 720 721 PMD_INIT_FUNC_TRACE(); 722 723 if (dpni == NULL) { 724 RTE_LOG(ERR, PMD, "dpni is NULL"); 725 return; 726 } 727 728 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 729 if (ret < 0) 730 RTE_LOG(ERR, PMD, "Unable to enable U promisc mode %d", ret); 731 732 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 733 if (ret < 0) 734 RTE_LOG(ERR, PMD, "Unable to enable M promisc mode %d", ret); 735 } 736 737 static void 738 dpaa2_dev_promiscuous_disable( 739 struct rte_eth_dev *dev) 740 { 741 int ret; 742 struct dpaa2_dev_priv *priv = dev->data->dev_private; 743 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 744 745 PMD_INIT_FUNC_TRACE(); 746 747 if (dpni == NULL) { 748 RTE_LOG(ERR, PMD, "dpni is NULL"); 749 return; 750 } 751 752 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 753 if (ret < 0) 754 RTE_LOG(ERR, PMD, "Unable to disable U promisc mode %d", ret); 755 756 if (dev->data->all_multicast == 0) { 757 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, 758 priv->token, false); 759 if (ret < 0) 760 RTE_LOG(ERR, PMD, "Unable to disable M promisc mode %d", 761 ret); 762 } 763 } 764 765 static void 766 dpaa2_dev_allmulticast_enable( 767 struct rte_eth_dev *dev) 768 { 769 int ret; 770 struct dpaa2_dev_priv *priv = dev->data->dev_private; 771 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 772 773 PMD_INIT_FUNC_TRACE(); 774 775 if (dpni == NULL) { 776 RTE_LOG(ERR, PMD, "dpni is NULL"); 777 return; 778 } 779 780 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 781 if (ret < 0) 782 RTE_LOG(ERR, PMD, "Unable to enable multicast mode %d", ret); 783 } 784 785 static void 786 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) 787 { 788 int ret; 789 struct dpaa2_dev_priv *priv = dev->data->dev_private; 790 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 791 792 PMD_INIT_FUNC_TRACE(); 793 794 if (dpni == NULL) { 795 RTE_LOG(ERR, PMD, "dpni is NULL"); 796 return; 797 } 798 799 /* must remain on for all promiscuous */ 800 if (dev->data->promiscuous == 1) 801 return; 802 803 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 804 if (ret < 0) 805 RTE_LOG(ERR, PMD, "Unable to disable multicast mode %d", ret); 806 } 807 808 static int 809 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 810 { 811 int ret; 812 struct dpaa2_dev_priv *priv = dev->data->dev_private; 813 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 814 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 815 816 PMD_INIT_FUNC_TRACE(); 817 818 if (dpni == NULL) { 819 RTE_LOG(ERR, PMD, "dpni is NULL"); 820 return -EINVAL; 821 } 822 823 /* check that mtu is within the allowed range */ 824 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) 825 return -EINVAL; 826 827 if (frame_size > ETHER_MAX_LEN) 828 dev->data->dev_conf.rxmode.jumbo_frame = 1; 829 else 830 dev->data->dev_conf.rxmode.jumbo_frame = 0; 831 832 /* Set the Max Rx frame length as 'mtu' + 833 * Maximum Ethernet header length 834 */ 835 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 836 mtu + ETH_VLAN_HLEN); 837 if (ret) { 838 PMD_DRV_LOG(ERR, "setting the max frame length failed"); 839 return -1; 840 } 841 PMD_DRV_LOG(INFO, "MTU is configured %d for the device\n", mtu); 842 return 0; 843 } 844 845 static int 846 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, 847 struct ether_addr *addr, 848 __rte_unused uint32_t index, 849 __rte_unused uint32_t pool) 850 { 851 int ret; 852 struct dpaa2_dev_priv *priv = dev->data->dev_private; 853 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 854 855 PMD_INIT_FUNC_TRACE(); 856 857 if (dpni == NULL) { 858 RTE_LOG(ERR, PMD, "dpni is NULL"); 859 return -1; 860 } 861 862 ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, 863 priv->token, addr->addr_bytes); 864 if (ret) 865 RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:" 866 " err = %d", ret); 867 return 0; 868 } 869 870 static void 871 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, 872 uint32_t index) 873 { 874 int ret; 875 struct dpaa2_dev_priv *priv = dev->data->dev_private; 876 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 877 struct rte_eth_dev_data *data = dev->data; 878 struct ether_addr *macaddr; 879 880 PMD_INIT_FUNC_TRACE(); 881 882 macaddr = &data->mac_addrs[index]; 883 884 if (dpni == NULL) { 885 RTE_LOG(ERR, PMD, "dpni is NULL"); 886 return; 887 } 888 889 ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, 890 priv->token, macaddr->addr_bytes); 891 if (ret) 892 RTE_LOG(ERR, PMD, "error: Removing the MAC ADDR failed:" 893 " err = %d", ret); 894 } 895 896 static void 897 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, 898 struct ether_addr *addr) 899 { 900 int ret; 901 struct dpaa2_dev_priv *priv = dev->data->dev_private; 902 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 903 904 PMD_INIT_FUNC_TRACE(); 905 906 if (dpni == NULL) { 907 RTE_LOG(ERR, PMD, "dpni is NULL"); 908 return; 909 } 910 911 ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, 912 priv->token, addr->addr_bytes); 913 914 if (ret) 915 RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret); 916 } 917 static 918 void dpaa2_dev_stats_get(struct rte_eth_dev *dev, 919 struct rte_eth_stats *stats) 920 { 921 struct dpaa2_dev_priv *priv = dev->data->dev_private; 922 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 923 int32_t retcode; 924 uint8_t page0 = 0, page1 = 1, page2 = 2; 925 union dpni_statistics value; 926 927 memset(&value, 0, sizeof(union dpni_statistics)); 928 929 PMD_INIT_FUNC_TRACE(); 930 931 if (!dpni) { 932 RTE_LOG(ERR, PMD, "dpni is NULL"); 933 return; 934 } 935 936 if (!stats) { 937 RTE_LOG(ERR, PMD, "stats is NULL"); 938 return; 939 } 940 941 /*Get Counters from page_0*/ 942 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 943 page0, &value); 944 if (retcode) 945 goto err; 946 947 stats->ipackets = value.page_0.ingress_all_frames; 948 stats->ibytes = value.page_0.ingress_all_bytes; 949 950 /*Get Counters from page_1*/ 951 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 952 page1, &value); 953 if (retcode) 954 goto err; 955 956 stats->opackets = value.page_1.egress_all_frames; 957 stats->obytes = value.page_1.egress_all_bytes; 958 959 /*Get Counters from page_2*/ 960 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 961 page2, &value); 962 if (retcode) 963 goto err; 964 965 /* Ingress drop frame count due to configured rules */ 966 stats->ierrors = value.page_2.ingress_filtered_frames; 967 /* Ingress drop frame count due to error */ 968 stats->ierrors += value.page_2.ingress_discarded_frames; 969 970 stats->oerrors = value.page_2.egress_discarded_frames; 971 stats->imissed = value.page_2.ingress_nobuffer_discards; 972 973 return; 974 975 err: 976 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); 977 return; 978 }; 979 980 static 981 void dpaa2_dev_stats_reset(struct rte_eth_dev *dev) 982 { 983 struct dpaa2_dev_priv *priv = dev->data->dev_private; 984 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 985 int32_t retcode; 986 987 PMD_INIT_FUNC_TRACE(); 988 989 if (dpni == NULL) { 990 RTE_LOG(ERR, PMD, "dpni is NULL"); 991 return; 992 } 993 994 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); 995 if (retcode) 996 goto error; 997 998 return; 999 1000 error: 1001 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); 1002 return; 1003 }; 1004 1005 /* return 0 means link status changed, -1 means not changed */ 1006 static int 1007 dpaa2_dev_link_update(struct rte_eth_dev *dev, 1008 int wait_to_complete __rte_unused) 1009 { 1010 int ret; 1011 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1012 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1013 struct rte_eth_link link, old; 1014 struct dpni_link_state state = {0}; 1015 1016 PMD_INIT_FUNC_TRACE(); 1017 1018 if (dpni == NULL) { 1019 RTE_LOG(ERR, PMD, "error : dpni is NULL"); 1020 return 0; 1021 } 1022 memset(&old, 0, sizeof(old)); 1023 dpaa2_dev_atomic_read_link_status(dev, &old); 1024 1025 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1026 if (ret < 0) { 1027 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d", ret); 1028 return -1; 1029 } 1030 1031 if ((old.link_status == state.up) && (old.link_speed == state.rate)) { 1032 RTE_LOG(DEBUG, PMD, "No change in status\n"); 1033 return -1; 1034 } 1035 1036 memset(&link, 0, sizeof(struct rte_eth_link)); 1037 link.link_status = state.up; 1038 link.link_speed = state.rate; 1039 1040 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) 1041 link.link_duplex = ETH_LINK_HALF_DUPLEX; 1042 else 1043 link.link_duplex = ETH_LINK_FULL_DUPLEX; 1044 1045 dpaa2_dev_atomic_write_link_status(dev, &link); 1046 1047 if (link.link_status) 1048 PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id); 1049 else 1050 PMD_DRV_LOG(INFO, "Port %d Link is Down\n", dev->data->port_id); 1051 return 0; 1052 } 1053 1054 /** 1055 * Toggle the DPNI to enable, if not already enabled. 1056 * This is not strictly PHY up/down - it is more of logical toggling. 1057 */ 1058 static int 1059 dpaa2_dev_set_link_up(struct rte_eth_dev *dev) 1060 { 1061 int ret = -EINVAL; 1062 struct dpaa2_dev_priv *priv; 1063 struct fsl_mc_io *dpni; 1064 int en = 0; 1065 1066 PMD_INIT_FUNC_TRACE(); 1067 1068 priv = dev->data->dev_private; 1069 dpni = (struct fsl_mc_io *)priv->hw; 1070 1071 if (dpni == NULL) { 1072 RTE_LOG(ERR, PMD, "Device has not yet been configured"); 1073 return ret; 1074 } 1075 1076 /* Check if DPNI is currently enabled */ 1077 ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); 1078 if (ret) { 1079 /* Unable to obtain dpni status; Not continuing */ 1080 PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret); 1081 return -EINVAL; 1082 } 1083 1084 /* Enable link if not already enabled */ 1085 if (!en) { 1086 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 1087 if (ret) { 1088 PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret); 1089 return -EINVAL; 1090 } 1091 } 1092 /* changing tx burst function to start enqueues */ 1093 dev->tx_pkt_burst = dpaa2_dev_tx; 1094 dev->data->dev_link.link_status = 1; 1095 1096 PMD_DRV_LOG(INFO, "Port %d Link UP successful", dev->data->port_id); 1097 return ret; 1098 } 1099 1100 /** 1101 * Toggle the DPNI to disable, if not already disabled. 1102 * This is not strictly PHY up/down - it is more of logical toggling. 1103 */ 1104 static int 1105 dpaa2_dev_set_link_down(struct rte_eth_dev *dev) 1106 { 1107 int ret = -EINVAL; 1108 struct dpaa2_dev_priv *priv; 1109 struct fsl_mc_io *dpni; 1110 int dpni_enabled = 0; 1111 int retries = 10; 1112 1113 PMD_INIT_FUNC_TRACE(); 1114 1115 priv = dev->data->dev_private; 1116 dpni = (struct fsl_mc_io *)priv->hw; 1117 1118 if (dpni == NULL) { 1119 RTE_LOG(ERR, PMD, "Device has not yet been configured"); 1120 return ret; 1121 } 1122 1123 /*changing tx burst function to avoid any more enqueues */ 1124 dev->tx_pkt_burst = dummy_dev_tx; 1125 1126 /* Loop while dpni_disable() attempts to drain the egress FQs 1127 * and confirm them back to us. 1128 */ 1129 do { 1130 ret = dpni_disable(dpni, 0, priv->token); 1131 if (ret) { 1132 PMD_DRV_LOG(ERR, "dpni disable failed (%d)", ret); 1133 return ret; 1134 } 1135 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); 1136 if (ret) { 1137 PMD_DRV_LOG(ERR, "dpni_is_enabled failed (%d)", ret); 1138 return ret; 1139 } 1140 if (dpni_enabled) 1141 /* Allow the MC some slack */ 1142 rte_delay_us(100 * 1000); 1143 } while (dpni_enabled && --retries); 1144 1145 if (!retries) { 1146 PMD_DRV_LOG(WARNING, "Retry count exceeded disabling DPNI\n"); 1147 /* todo- we may have to manually cleanup queues. 1148 */ 1149 } else { 1150 PMD_DRV_LOG(INFO, "Port %d Link DOWN successful", 1151 dev->data->port_id); 1152 } 1153 1154 dev->data->dev_link.link_status = 0; 1155 1156 return ret; 1157 } 1158 1159 static int 1160 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1161 { 1162 int ret = -EINVAL; 1163 struct dpaa2_dev_priv *priv; 1164 struct fsl_mc_io *dpni; 1165 struct dpni_link_state state = {0}; 1166 1167 PMD_INIT_FUNC_TRACE(); 1168 1169 priv = dev->data->dev_private; 1170 dpni = (struct fsl_mc_io *)priv->hw; 1171 1172 if (dpni == NULL || fc_conf == NULL) { 1173 RTE_LOG(ERR, PMD, "device not configured"); 1174 return ret; 1175 } 1176 1177 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1178 if (ret) { 1179 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d", ret); 1180 return ret; 1181 } 1182 1183 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); 1184 if (state.options & DPNI_LINK_OPT_PAUSE) { 1185 /* DPNI_LINK_OPT_PAUSE set 1186 * if ASYM_PAUSE not set, 1187 * RX Side flow control (handle received Pause frame) 1188 * TX side flow control (send Pause frame) 1189 * if ASYM_PAUSE set, 1190 * RX Side flow control (handle received Pause frame) 1191 * No TX side flow control (send Pause frame disabled) 1192 */ 1193 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) 1194 fc_conf->mode = RTE_FC_FULL; 1195 else 1196 fc_conf->mode = RTE_FC_RX_PAUSE; 1197 } else { 1198 /* DPNI_LINK_OPT_PAUSE not set 1199 * if ASYM_PAUSE set, 1200 * TX side flow control (send Pause frame) 1201 * No RX side flow control (No action on pause frame rx) 1202 * if ASYM_PAUSE not set, 1203 * Flow control disabled 1204 */ 1205 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) 1206 fc_conf->mode = RTE_FC_TX_PAUSE; 1207 else 1208 fc_conf->mode = RTE_FC_NONE; 1209 } 1210 1211 return ret; 1212 } 1213 1214 static int 1215 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1216 { 1217 int ret = -EINVAL; 1218 struct dpaa2_dev_priv *priv; 1219 struct fsl_mc_io *dpni; 1220 struct dpni_link_state state = {0}; 1221 struct dpni_link_cfg cfg = {0}; 1222 1223 PMD_INIT_FUNC_TRACE(); 1224 1225 priv = dev->data->dev_private; 1226 dpni = (struct fsl_mc_io *)priv->hw; 1227 1228 if (dpni == NULL) { 1229 RTE_LOG(ERR, PMD, "dpni is NULL"); 1230 return ret; 1231 } 1232 1233 /* It is necessary to obtain the current state before setting fc_conf 1234 * as MC would return error in case rate, autoneg or duplex values are 1235 * different. 1236 */ 1237 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1238 if (ret) { 1239 RTE_LOG(ERR, PMD, "Unable to get link state (err=%d)", ret); 1240 return -1; 1241 } 1242 1243 /* Disable link before setting configuration */ 1244 dpaa2_dev_set_link_down(dev); 1245 1246 /* Based on fc_conf, update cfg */ 1247 cfg.rate = state.rate; 1248 cfg.options = state.options; 1249 1250 /* update cfg with fc_conf */ 1251 switch (fc_conf->mode) { 1252 case RTE_FC_FULL: 1253 /* Full flow control; 1254 * OPT_PAUSE set, ASYM_PAUSE not set 1255 */ 1256 cfg.options |= DPNI_LINK_OPT_PAUSE; 1257 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1258 case RTE_FC_TX_PAUSE: 1259 /* Enable RX flow control 1260 * OPT_PAUSE not set; 1261 * ASYM_PAUSE set; 1262 */ 1263 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1264 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1265 break; 1266 case RTE_FC_RX_PAUSE: 1267 /* Enable TX Flow control 1268 * OPT_PAUSE set 1269 * ASYM_PAUSE set 1270 */ 1271 cfg.options |= DPNI_LINK_OPT_PAUSE; 1272 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1273 break; 1274 case RTE_FC_NONE: 1275 /* Disable Flow control 1276 * OPT_PAUSE not set 1277 * ASYM_PAUSE not set 1278 */ 1279 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1280 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1281 break; 1282 default: 1283 RTE_LOG(ERR, PMD, "Incorrect Flow control flag (%d)", 1284 fc_conf->mode); 1285 return -1; 1286 } 1287 1288 ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); 1289 if (ret) 1290 RTE_LOG(ERR, PMD, "Unable to set Link configuration (err=%d)", 1291 ret); 1292 1293 /* Enable link */ 1294 dpaa2_dev_set_link_up(dev); 1295 1296 return ret; 1297 } 1298 1299 static struct eth_dev_ops dpaa2_ethdev_ops = { 1300 .dev_configure = dpaa2_eth_dev_configure, 1301 .dev_start = dpaa2_dev_start, 1302 .dev_stop = dpaa2_dev_stop, 1303 .dev_close = dpaa2_dev_close, 1304 .promiscuous_enable = dpaa2_dev_promiscuous_enable, 1305 .promiscuous_disable = dpaa2_dev_promiscuous_disable, 1306 .allmulticast_enable = dpaa2_dev_allmulticast_enable, 1307 .allmulticast_disable = dpaa2_dev_allmulticast_disable, 1308 .dev_set_link_up = dpaa2_dev_set_link_up, 1309 .dev_set_link_down = dpaa2_dev_set_link_down, 1310 .link_update = dpaa2_dev_link_update, 1311 .stats_get = dpaa2_dev_stats_get, 1312 .stats_reset = dpaa2_dev_stats_reset, 1313 .fw_version_get = dpaa2_fw_version_get, 1314 .dev_infos_get = dpaa2_dev_info_get, 1315 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, 1316 .mtu_set = dpaa2_dev_mtu_set, 1317 .vlan_filter_set = dpaa2_vlan_filter_set, 1318 .vlan_offload_set = dpaa2_vlan_offload_set, 1319 .rx_queue_setup = dpaa2_dev_rx_queue_setup, 1320 .rx_queue_release = dpaa2_dev_rx_queue_release, 1321 .tx_queue_setup = dpaa2_dev_tx_queue_setup, 1322 .tx_queue_release = dpaa2_dev_tx_queue_release, 1323 .flow_ctrl_get = dpaa2_flow_ctrl_get, 1324 .flow_ctrl_set = dpaa2_flow_ctrl_set, 1325 .mac_addr_add = dpaa2_dev_add_mac_addr, 1326 .mac_addr_remove = dpaa2_dev_remove_mac_addr, 1327 .mac_addr_set = dpaa2_dev_set_mac_addr, 1328 }; 1329 1330 static int 1331 dpaa2_dev_init(struct rte_eth_dev *eth_dev) 1332 { 1333 struct rte_device *dev = eth_dev->device; 1334 struct rte_dpaa2_device *dpaa2_dev; 1335 struct fsl_mc_io *dpni_dev; 1336 struct dpni_attr attr; 1337 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1338 struct dpni_buffer_layout layout; 1339 int ret, hw_id; 1340 1341 PMD_INIT_FUNC_TRACE(); 1342 1343 /* For secondary processes, the primary has done all the work */ 1344 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1345 return 0; 1346 1347 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 1348 1349 hw_id = dpaa2_dev->object_id; 1350 1351 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); 1352 if (!dpni_dev) { 1353 PMD_INIT_LOG(ERR, "malloc failed for dpni device\n"); 1354 return -1; 1355 } 1356 1357 dpni_dev->regs = rte_mcp_ptr_list[0]; 1358 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); 1359 if (ret) { 1360 PMD_INIT_LOG(ERR, 1361 "Failure in opening dpni@%d with err code %d\n", 1362 hw_id, ret); 1363 rte_free(dpni_dev); 1364 return -1; 1365 } 1366 1367 /* Clean the device first */ 1368 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); 1369 if (ret) { 1370 PMD_INIT_LOG(ERR, 1371 "Failure cleaning dpni@%d with err code %d\n", 1372 hw_id, ret); 1373 goto init_err; 1374 } 1375 1376 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); 1377 if (ret) { 1378 PMD_INIT_LOG(ERR, 1379 "Failure in get dpni@%d attribute, err code %d\n", 1380 hw_id, ret); 1381 goto init_err; 1382 } 1383 1384 priv->num_tc = attr.num_tcs; 1385 1386 /* Resetting the "num_rx_vqueues" to equal number of queues in first TC 1387 * as only one TC is supported on Rx Side. Once Multiple TCs will be 1388 * in use for Rx processing then this will be changed or removed. 1389 */ 1390 priv->nb_rx_queues = attr.num_queues; 1391 1392 /* TODO:Using hard coded value for number of TX queues due to dependency 1393 * in MC. 1394 */ 1395 priv->nb_tx_queues = 8; 1396 1397 PMD_INIT_LOG(DEBUG, "num TC - RX %d", priv->num_tc); 1398 PMD_INIT_LOG(DEBUG, "nb_tx_queues %d", priv->nb_tx_queues); 1399 PMD_INIT_LOG(DEBUG, "nb_rx_queues %d", priv->nb_rx_queues); 1400 1401 priv->hw = dpni_dev; 1402 priv->hw_id = hw_id; 1403 priv->options = attr.options; 1404 priv->max_mac_filters = attr.mac_filter_entries; 1405 priv->max_vlan_filters = attr.vlan_filter_entries; 1406 priv->flags = 0; 1407 1408 /* Allocate memory for hardware structure for queues */ 1409 ret = dpaa2_alloc_rx_tx_queues(eth_dev); 1410 if (ret) { 1411 PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n"); 1412 goto init_err; 1413 } 1414 1415 /* Allocate memory for storing MAC addresses */ 1416 eth_dev->data->mac_addrs = rte_zmalloc("dpni", 1417 ETHER_ADDR_LEN * attr.mac_filter_entries, 0); 1418 if (eth_dev->data->mac_addrs == NULL) { 1419 PMD_INIT_LOG(ERR, 1420 "Failed to allocate %d bytes needed to store MAC addresses", 1421 ETHER_ADDR_LEN * attr.mac_filter_entries); 1422 ret = -ENOMEM; 1423 goto init_err; 1424 } 1425 1426 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 1427 priv->token, 1428 (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes)); 1429 if (ret) { 1430 PMD_INIT_LOG(ERR, "DPNI get mac address failed:Err Code = %d\n", 1431 ret); 1432 goto init_err; 1433 } 1434 1435 /* ... tx buffer layout ... */ 1436 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1437 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1438 layout.pass_frame_status = 1; 1439 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1440 DPNI_QUEUE_TX, &layout); 1441 if (ret) { 1442 PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer layout", 1443 ret); 1444 goto init_err; 1445 } 1446 1447 /* ... tx-conf and error buffer layout ... */ 1448 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1449 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1450 layout.pass_frame_status = 1; 1451 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1452 DPNI_QUEUE_TX_CONFIRM, &layout); 1453 if (ret) { 1454 PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer layout", 1455 ret); 1456 goto init_err; 1457 } 1458 1459 eth_dev->dev_ops = &dpaa2_ethdev_ops; 1460 1461 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 1462 eth_dev->tx_pkt_burst = dpaa2_dev_tx; 1463 rte_fslmc_vfio_dmamap(); 1464 1465 return 0; 1466 init_err: 1467 dpaa2_dev_uninit(eth_dev); 1468 return ret; 1469 } 1470 1471 static int 1472 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) 1473 { 1474 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1475 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1476 int i, ret; 1477 struct dpaa2_queue *dpaa2_q; 1478 1479 PMD_INIT_FUNC_TRACE(); 1480 1481 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1482 return -EPERM; 1483 1484 if (!dpni) { 1485 PMD_INIT_LOG(WARNING, "Already closed or not started"); 1486 return -1; 1487 } 1488 1489 dpaa2_dev_close(eth_dev); 1490 1491 if (priv->rx_vq[0]) { 1492 /* cleaning up queue storage */ 1493 for (i = 0; i < priv->nb_rx_queues; i++) { 1494 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 1495 if (dpaa2_q->q_storage) 1496 rte_free(dpaa2_q->q_storage); 1497 } 1498 /*free the all queue memory */ 1499 rte_free(priv->rx_vq[0]); 1500 priv->rx_vq[0] = NULL; 1501 } 1502 1503 /* free memory for storing MAC addresses */ 1504 if (eth_dev->data->mac_addrs) { 1505 rte_free(eth_dev->data->mac_addrs); 1506 eth_dev->data->mac_addrs = NULL; 1507 } 1508 1509 /* Close the device at underlying layer*/ 1510 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); 1511 if (ret) { 1512 PMD_INIT_LOG(ERR, 1513 "Failure closing dpni device with err code %d\n", 1514 ret); 1515 } 1516 1517 /* Free the allocated memory for ethernet private data and dpni*/ 1518 priv->hw = NULL; 1519 rte_free(dpni); 1520 1521 eth_dev->dev_ops = NULL; 1522 eth_dev->rx_pkt_burst = NULL; 1523 eth_dev->tx_pkt_burst = NULL; 1524 1525 return 0; 1526 } 1527 1528 static int 1529 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, 1530 struct rte_dpaa2_device *dpaa2_dev) 1531 { 1532 struct rte_eth_dev *eth_dev; 1533 char ethdev_name[RTE_ETH_NAME_MAX_LEN]; 1534 1535 int diag; 1536 1537 sprintf(ethdev_name, "dpni-%d", dpaa2_dev->object_id); 1538 1539 eth_dev = rte_eth_dev_allocate(ethdev_name); 1540 if (eth_dev == NULL) 1541 return -ENOMEM; 1542 1543 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1544 eth_dev->data->dev_private = rte_zmalloc( 1545 "ethdev private structure", 1546 sizeof(struct dpaa2_dev_priv), 1547 RTE_CACHE_LINE_SIZE); 1548 if (eth_dev->data->dev_private == NULL) { 1549 PMD_INIT_LOG(CRIT, "Cannot allocate memzone for" 1550 " private port data\n"); 1551 rte_eth_dev_release_port(eth_dev); 1552 return -ENOMEM; 1553 } 1554 } 1555 eth_dev->device = &dpaa2_dev->device; 1556 eth_dev->device->driver = &dpaa2_drv->driver; 1557 1558 dpaa2_dev->eth_dev = eth_dev; 1559 eth_dev->data->rx_mbuf_alloc_failed = 0; 1560 1561 /* Invoke PMD device initialization function */ 1562 diag = dpaa2_dev_init(eth_dev); 1563 if (diag == 0) 1564 return 0; 1565 1566 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1567 rte_free(eth_dev->data->dev_private); 1568 rte_eth_dev_release_port(eth_dev); 1569 return diag; 1570 } 1571 1572 static int 1573 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) 1574 { 1575 struct rte_eth_dev *eth_dev; 1576 1577 eth_dev = dpaa2_dev->eth_dev; 1578 dpaa2_dev_uninit(eth_dev); 1579 1580 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1581 rte_free(eth_dev->data->dev_private); 1582 rte_eth_dev_release_port(eth_dev); 1583 1584 return 0; 1585 } 1586 1587 static struct rte_dpaa2_driver rte_dpaa2_pmd = { 1588 .drv_type = DPAA2_MC_DPNI_DEVID, 1589 .probe = rte_dpaa2_probe, 1590 .remove = rte_dpaa2_remove, 1591 }; 1592 1593 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); 1594