1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 5 * Copyright 2016 NXP. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <time.h> 35 #include <net/if.h> 36 37 #include <rte_mbuf.h> 38 #include <rte_ethdev.h> 39 #include <rte_malloc.h> 40 #include <rte_memcpy.h> 41 #include <rte_string_fns.h> 42 #include <rte_cycles.h> 43 #include <rte_kvargs.h> 44 #include <rte_dev.h> 45 #include <rte_fslmc.h> 46 47 #include <fslmc_logs.h> 48 #include <fslmc_vfio.h> 49 #include <dpaa2_hw_pvt.h> 50 #include <dpaa2_hw_mempool.h> 51 #include <dpaa2_hw_dpio.h> 52 #include <mc/fsl_dpmng.h> 53 #include "dpaa2_ethdev.h" 54 55 struct rte_dpaa2_xstats_name_off { 56 char name[RTE_ETH_XSTATS_NAME_SIZE]; 57 uint8_t page_id; /* dpni statistics page id */ 58 uint8_t stats_id; /* stats id in the given page */ 59 }; 60 61 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { 62 {"ingress_multicast_frames", 0, 2}, 63 {"ingress_multicast_bytes", 0, 3}, 64 {"ingress_broadcast_frames", 0, 4}, 65 {"ingress_broadcast_bytes", 0, 5}, 66 {"egress_multicast_frames", 1, 2}, 67 {"egress_multicast_bytes", 1, 3}, 68 {"egress_broadcast_frames", 1, 4}, 69 {"egress_broadcast_bytes", 1, 5}, 70 {"ingress_filtered_frames", 2, 0}, 71 {"ingress_discarded_frames", 2, 1}, 72 {"ingress_nobuffer_discards", 2, 2}, 73 {"egress_discarded_frames", 2, 3}, 74 {"egress_confirmed_frames", 2, 4}, 75 }; 76 77 static struct rte_dpaa2_driver rte_dpaa2_pmd; 78 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev); 79 static int dpaa2_dev_link_update(struct rte_eth_dev *dev, 80 int wait_to_complete); 81 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); 82 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); 83 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 84 85 /** 86 * Atomically reads the link status information from global 87 * structure rte_eth_dev. 88 * 89 * @param dev 90 * - Pointer to the structure rte_eth_dev to read from. 91 * - Pointer to the buffer to be saved with the link status. 92 * 93 * @return 94 * - On success, zero. 95 * - On failure, negative value. 96 */ 97 static inline int 98 dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev, 99 struct rte_eth_link *link) 100 { 101 struct rte_eth_link *dst = link; 102 struct rte_eth_link *src = &dev->data->dev_link; 103 104 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 105 *(uint64_t *)src) == 0) 106 return -1; 107 108 return 0; 109 } 110 111 /** 112 * Atomically writes the link status information into global 113 * structure rte_eth_dev. 114 * 115 * @param dev 116 * - Pointer to the structure rte_eth_dev to read from. 117 * - Pointer to the buffer to be saved with the link status. 118 * 119 * @return 120 * - On success, zero. 121 * - On failure, negative value. 122 */ 123 static inline int 124 dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev, 125 struct rte_eth_link *link) 126 { 127 struct rte_eth_link *dst = &dev->data->dev_link; 128 struct rte_eth_link *src = link; 129 130 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 131 *(uint64_t *)src) == 0) 132 return -1; 133 134 return 0; 135 } 136 137 static int 138 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 139 { 140 int ret; 141 struct dpaa2_dev_priv *priv = dev->data->dev_private; 142 struct fsl_mc_io *dpni = priv->hw; 143 144 PMD_INIT_FUNC_TRACE(); 145 146 if (dpni == NULL) { 147 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 148 return -1; 149 } 150 151 if (on) 152 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, 153 priv->token, vlan_id); 154 else 155 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, 156 priv->token, vlan_id); 157 158 if (ret < 0) 159 PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d", 160 ret, vlan_id, priv->hw_id); 161 162 return ret; 163 } 164 165 static void 166 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) 167 { 168 struct dpaa2_dev_priv *priv = dev->data->dev_private; 169 struct fsl_mc_io *dpni = priv->hw; 170 int ret; 171 172 PMD_INIT_FUNC_TRACE(); 173 174 if (mask & ETH_VLAN_FILTER_MASK) { 175 if (dev->data->dev_conf.rxmode.hw_vlan_filter) 176 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 177 priv->token, true); 178 else 179 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 180 priv->token, false); 181 if (ret < 0) 182 RTE_LOG(ERR, PMD, "Unable to set vlan filter = %d\n", 183 ret); 184 } 185 186 if (mask & ETH_VLAN_EXTEND_MASK) { 187 if (dev->data->dev_conf.rxmode.hw_vlan_extend) 188 RTE_LOG(INFO, PMD, 189 "VLAN extend offload not supported\n"); 190 } 191 } 192 193 static int 194 dpaa2_fw_version_get(struct rte_eth_dev *dev, 195 char *fw_version, 196 size_t fw_size) 197 { 198 int ret; 199 struct dpaa2_dev_priv *priv = dev->data->dev_private; 200 struct fsl_mc_io *dpni = priv->hw; 201 struct mc_soc_version mc_plat_info = {0}; 202 struct mc_version mc_ver_info = {0}; 203 204 PMD_INIT_FUNC_TRACE(); 205 206 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 207 RTE_LOG(WARNING, PMD, "\tmc_get_soc_version failed\n"); 208 209 if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) 210 RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n"); 211 212 ret = snprintf(fw_version, fw_size, 213 "%x-%d.%d.%d", 214 mc_plat_info.svr, 215 mc_ver_info.major, 216 mc_ver_info.minor, 217 mc_ver_info.revision); 218 219 ret += 1; /* add the size of '\0' */ 220 if (fw_size < (uint32_t)ret) 221 return ret; 222 else 223 return 0; 224 } 225 226 static void 227 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 228 { 229 struct dpaa2_dev_priv *priv = dev->data->dev_private; 230 231 PMD_INIT_FUNC_TRACE(); 232 233 dev_info->if_index = priv->hw_id; 234 235 dev_info->max_mac_addrs = priv->max_mac_filters; 236 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; 237 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; 238 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; 239 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; 240 dev_info->rx_offload_capa = 241 DEV_RX_OFFLOAD_IPV4_CKSUM | 242 DEV_RX_OFFLOAD_UDP_CKSUM | 243 DEV_RX_OFFLOAD_TCP_CKSUM | 244 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; 245 dev_info->tx_offload_capa = 246 DEV_TX_OFFLOAD_IPV4_CKSUM | 247 DEV_TX_OFFLOAD_UDP_CKSUM | 248 DEV_TX_OFFLOAD_TCP_CKSUM | 249 DEV_TX_OFFLOAD_SCTP_CKSUM | 250 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 251 dev_info->speed_capa = ETH_LINK_SPEED_1G | 252 ETH_LINK_SPEED_2_5G | 253 ETH_LINK_SPEED_10G; 254 } 255 256 static int 257 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) 258 { 259 struct dpaa2_dev_priv *priv = dev->data->dev_private; 260 uint16_t dist_idx; 261 uint32_t vq_id; 262 struct dpaa2_queue *mc_q, *mcq; 263 uint32_t tot_queues; 264 int i; 265 struct dpaa2_queue *dpaa2_q; 266 267 PMD_INIT_FUNC_TRACE(); 268 269 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; 270 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, 271 RTE_CACHE_LINE_SIZE); 272 if (!mc_q) { 273 PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n"); 274 return -1; 275 } 276 277 for (i = 0; i < priv->nb_rx_queues; i++) { 278 mc_q->dev = dev; 279 priv->rx_vq[i] = mc_q++; 280 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 281 dpaa2_q->q_storage = rte_malloc("dq_storage", 282 sizeof(struct queue_storage_info_t), 283 RTE_CACHE_LINE_SIZE); 284 if (!dpaa2_q->q_storage) 285 goto fail; 286 287 memset(dpaa2_q->q_storage, 0, 288 sizeof(struct queue_storage_info_t)); 289 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) 290 goto fail; 291 } 292 293 for (i = 0; i < priv->nb_tx_queues; i++) { 294 mc_q->dev = dev; 295 mc_q->flow_id = 0xffff; 296 priv->tx_vq[i] = mc_q++; 297 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 298 dpaa2_q->cscn = rte_malloc(NULL, 299 sizeof(struct qbman_result), 16); 300 if (!dpaa2_q->cscn) 301 goto fail_tx; 302 } 303 304 vq_id = 0; 305 for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { 306 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; 307 mcq->tc_index = DPAA2_DEF_TC; 308 mcq->flow_id = dist_idx; 309 vq_id++; 310 } 311 312 return 0; 313 fail_tx: 314 i -= 1; 315 while (i >= 0) { 316 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 317 rte_free(dpaa2_q->cscn); 318 priv->tx_vq[i--] = NULL; 319 } 320 i = priv->nb_rx_queues; 321 fail: 322 i -= 1; 323 mc_q = priv->rx_vq[0]; 324 while (i >= 0) { 325 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 326 dpaa2_free_dq_storage(dpaa2_q->q_storage); 327 rte_free(dpaa2_q->q_storage); 328 priv->rx_vq[i--] = NULL; 329 } 330 rte_free(mc_q); 331 return -1; 332 } 333 334 static int 335 dpaa2_eth_dev_configure(struct rte_eth_dev *dev) 336 { 337 struct dpaa2_dev_priv *priv = dev->data->dev_private; 338 struct fsl_mc_io *dpni = priv->hw; 339 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 340 int rx_ip_csum_offload = false; 341 int ret; 342 343 PMD_INIT_FUNC_TRACE(); 344 345 if (eth_conf->rxmode.jumbo_frame == 1) { 346 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { 347 ret = dpaa2_dev_mtu_set(dev, 348 eth_conf->rxmode.max_rx_pkt_len); 349 if (ret) { 350 PMD_INIT_LOG(ERR, 351 "unable to set mtu. check config\n"); 352 return ret; 353 } 354 } else { 355 return -1; 356 } 357 } 358 359 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { 360 ret = dpaa2_setup_flow_dist(dev, 361 eth_conf->rx_adv_conf.rss_conf.rss_hf); 362 if (ret) { 363 PMD_INIT_LOG(ERR, "unable to set flow distribution." 364 "please check queue config\n"); 365 return ret; 366 } 367 } 368 369 if (eth_conf->rxmode.hw_ip_checksum) 370 rx_ip_csum_offload = true; 371 372 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 373 DPNI_OFF_RX_L3_CSUM, rx_ip_csum_offload); 374 if (ret) { 375 PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret); 376 return ret; 377 } 378 379 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 380 DPNI_OFF_RX_L4_CSUM, rx_ip_csum_offload); 381 if (ret) { 382 PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret); 383 return ret; 384 } 385 386 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 387 DPNI_OFF_TX_L3_CSUM, true); 388 if (ret) { 389 PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret); 390 return ret; 391 } 392 393 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 394 DPNI_OFF_TX_L4_CSUM, true); 395 if (ret) { 396 PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret); 397 return ret; 398 } 399 400 /* update the current status */ 401 dpaa2_dev_link_update(dev, 0); 402 403 return 0; 404 } 405 406 /* Function to setup RX flow information. It contains traffic class ID, 407 * flow ID, destination configuration etc. 408 */ 409 static int 410 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, 411 uint16_t rx_queue_id, 412 uint16_t nb_rx_desc __rte_unused, 413 unsigned int socket_id __rte_unused, 414 const struct rte_eth_rxconf *rx_conf __rte_unused, 415 struct rte_mempool *mb_pool) 416 { 417 struct dpaa2_dev_priv *priv = dev->data->dev_private; 418 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 419 struct mc_soc_version mc_plat_info = {0}; 420 struct dpaa2_queue *dpaa2_q; 421 struct dpni_queue cfg; 422 uint8_t options = 0; 423 uint8_t flow_id; 424 uint32_t bpid; 425 int ret; 426 427 PMD_INIT_FUNC_TRACE(); 428 429 PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p", 430 dev, rx_queue_id, mb_pool, rx_conf); 431 432 if (!priv->bp_list || priv->bp_list->mp != mb_pool) { 433 bpid = mempool_to_bpid(mb_pool); 434 ret = dpaa2_attach_bp_list(priv, 435 rte_dpaa2_bpid_info[bpid].bp_list); 436 if (ret) 437 return ret; 438 } 439 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 440 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ 441 442 /*Get the flow id from given VQ id*/ 443 flow_id = rx_queue_id % priv->nb_rx_queues; 444 memset(&cfg, 0, sizeof(struct dpni_queue)); 445 446 options = options | DPNI_QUEUE_OPT_USER_CTX; 447 cfg.user_context = (uint64_t)(dpaa2_q); 448 449 /*if ls2088 or rev2 device, enable the stashing */ 450 451 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 452 PMD_INIT_LOG(ERR, "\tmc_get_soc_version failed\n"); 453 454 if ((mc_plat_info.svr & 0xffff0000) != SVR_LS2080A) { 455 options |= DPNI_QUEUE_OPT_FLC; 456 cfg.flc.stash_control = true; 457 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0; 458 /* 00 00 00 - last 6 bit represent annotation, context stashing, 459 * data stashing setting 01 01 00 (0x14) to enable 460 * 1 line data, 1 line annotation 461 */ 462 cfg.flc.value |= 0x14; 463 } 464 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, 465 dpaa2_q->tc_index, flow_id, options, &cfg); 466 if (ret) { 467 PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret); 468 return -1; 469 } 470 471 if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { 472 struct dpni_taildrop taildrop; 473 474 taildrop.enable = 1; 475 /*enabling per rx queue congestion control */ 476 taildrop.threshold = CONG_THRESHOLD_RX_Q; 477 taildrop.units = DPNI_CONGESTION_UNIT_BYTES; 478 taildrop.oal = CONG_RX_OAL; 479 PMD_DRV_LOG(DEBUG, "Enabling Early Drop on queue = %d", 480 rx_queue_id); 481 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 482 DPNI_CP_QUEUE, DPNI_QUEUE_RX, 483 dpaa2_q->tc_index, flow_id, &taildrop); 484 if (ret) { 485 PMD_INIT_LOG(ERR, "Error in setting the rx flow" 486 " err : = %d\n", ret); 487 return -1; 488 } 489 } 490 491 dev->data->rx_queues[rx_queue_id] = dpaa2_q; 492 return 0; 493 } 494 495 static int 496 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, 497 uint16_t tx_queue_id, 498 uint16_t nb_tx_desc __rte_unused, 499 unsigned int socket_id __rte_unused, 500 const struct rte_eth_txconf *tx_conf __rte_unused) 501 { 502 struct dpaa2_dev_priv *priv = dev->data->dev_private; 503 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) 504 priv->tx_vq[tx_queue_id]; 505 struct fsl_mc_io *dpni = priv->hw; 506 struct dpni_queue tx_conf_cfg; 507 struct dpni_queue tx_flow_cfg; 508 uint8_t options = 0, flow_id; 509 uint32_t tc_id; 510 int ret; 511 512 PMD_INIT_FUNC_TRACE(); 513 514 /* Return if queue already configured */ 515 if (dpaa2_q->flow_id != 0xffff) 516 return 0; 517 518 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); 519 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); 520 521 tc_id = tx_queue_id; 522 flow_id = 0; 523 524 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, 525 tc_id, flow_id, options, &tx_flow_cfg); 526 if (ret) { 527 PMD_INIT_LOG(ERR, "Error in setting the tx flow: " 528 "tc_id=%d, flow =%d ErrorCode = %x\n", 529 tc_id, flow_id, -ret); 530 return -1; 531 } 532 533 dpaa2_q->flow_id = flow_id; 534 535 if (tx_queue_id == 0) { 536 /*Set tx-conf and error configuration*/ 537 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 538 priv->token, 539 DPNI_CONF_DISABLE); 540 if (ret) { 541 PMD_INIT_LOG(ERR, "Error in set tx conf mode settings" 542 " ErrorCode = %x", ret); 543 return -1; 544 } 545 } 546 dpaa2_q->tc_index = tc_id; 547 548 if (!(priv->flags & DPAA2_TX_CGR_OFF)) { 549 struct dpni_congestion_notification_cfg cong_notif_cfg; 550 551 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; 552 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; 553 /* Notify that the queue is not congested when the data in 554 * the queue is below this thershold. 555 */ 556 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; 557 cong_notif_cfg.message_ctx = 0; 558 cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn; 559 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; 560 cong_notif_cfg.notification_mode = 561 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | 562 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | 563 DPNI_CONG_OPT_COHERENT_WRITE; 564 565 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, 566 priv->token, 567 DPNI_QUEUE_TX, 568 tc_id, 569 &cong_notif_cfg); 570 if (ret) { 571 PMD_INIT_LOG(ERR, 572 "Error in setting tx congestion notification: = %d", 573 -ret); 574 return -ret; 575 } 576 } 577 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 578 return 0; 579 } 580 581 static void 582 dpaa2_dev_rx_queue_release(void *q __rte_unused) 583 { 584 PMD_INIT_FUNC_TRACE(); 585 } 586 587 static void 588 dpaa2_dev_tx_queue_release(void *q __rte_unused) 589 { 590 PMD_INIT_FUNC_TRACE(); 591 } 592 593 static const uint32_t * 594 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) 595 { 596 static const uint32_t ptypes[] = { 597 /*todo -= add more types */ 598 RTE_PTYPE_L2_ETHER, 599 RTE_PTYPE_L3_IPV4, 600 RTE_PTYPE_L3_IPV4_EXT, 601 RTE_PTYPE_L3_IPV6, 602 RTE_PTYPE_L3_IPV6_EXT, 603 RTE_PTYPE_L4_TCP, 604 RTE_PTYPE_L4_UDP, 605 RTE_PTYPE_L4_SCTP, 606 RTE_PTYPE_L4_ICMP, 607 RTE_PTYPE_UNKNOWN 608 }; 609 610 if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx) 611 return ptypes; 612 return NULL; 613 } 614 615 /** 616 * Dpaa2 link Interrupt handler 617 * 618 * @param param 619 * The address of parameter (struct rte_eth_dev *) regsitered before. 620 * 621 * @return 622 * void 623 */ 624 static void 625 dpaa2_interrupt_handler(void *param) 626 { 627 struct rte_eth_dev *dev = param; 628 struct dpaa2_dev_priv *priv = dev->data->dev_private; 629 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 630 int ret; 631 int irq_index = DPNI_IRQ_INDEX; 632 unsigned int status = 0, clear = 0; 633 634 PMD_INIT_FUNC_TRACE(); 635 636 if (dpni == NULL) { 637 RTE_LOG(ERR, PMD, "dpni is NULL"); 638 return; 639 } 640 641 ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, 642 irq_index, &status); 643 if (unlikely(ret)) { 644 RTE_LOG(ERR, PMD, "Can't get irq status (err %d)", ret); 645 clear = 0xffffffff; 646 goto out; 647 } 648 649 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { 650 clear = DPNI_IRQ_EVENT_LINK_CHANGED; 651 dpaa2_dev_link_update(dev, 0); 652 /* calling all the apps registered for link status event */ 653 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 654 NULL, NULL); 655 } 656 out: 657 ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, 658 irq_index, clear); 659 if (unlikely(ret)) 660 RTE_LOG(ERR, PMD, "Can't clear irq status (err %d)", ret); 661 } 662 663 static int 664 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) 665 { 666 int err = 0; 667 struct dpaa2_dev_priv *priv = dev->data->dev_private; 668 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 669 int irq_index = DPNI_IRQ_INDEX; 670 unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; 671 672 PMD_INIT_FUNC_TRACE(); 673 674 err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, 675 irq_index, mask); 676 if (err < 0) { 677 PMD_INIT_LOG(ERR, "Error: dpni_set_irq_mask():%d (%s)", err, 678 strerror(-err)); 679 return err; 680 } 681 682 err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, 683 irq_index, enable); 684 if (err < 0) 685 PMD_INIT_LOG(ERR, "Error: dpni_set_irq_enable():%d (%s)", err, 686 strerror(-err)); 687 688 return err; 689 } 690 691 static int 692 dpaa2_dev_start(struct rte_eth_dev *dev) 693 { 694 struct rte_device *rdev = dev->device; 695 struct rte_dpaa2_device *dpaa2_dev; 696 struct rte_eth_dev_data *data = dev->data; 697 struct dpaa2_dev_priv *priv = data->dev_private; 698 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 699 struct dpni_queue cfg; 700 struct dpni_error_cfg err_cfg; 701 uint16_t qdid; 702 struct dpni_queue_id qid; 703 struct dpaa2_queue *dpaa2_q; 704 int ret, i; 705 struct rte_intr_handle *intr_handle; 706 707 dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); 708 intr_handle = &dpaa2_dev->intr_handle; 709 710 PMD_INIT_FUNC_TRACE(); 711 712 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 713 if (ret) { 714 PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n", 715 ret, priv->hw_id); 716 return ret; 717 } 718 719 /* Power up the phy. Needed to make the link go UP */ 720 dpaa2_dev_set_link_up(dev); 721 722 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, 723 DPNI_QUEUE_TX, &qdid); 724 if (ret) { 725 PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret); 726 return ret; 727 } 728 priv->qdid = qdid; 729 730 for (i = 0; i < data->nb_rx_queues; i++) { 731 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; 732 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 733 DPNI_QUEUE_RX, dpaa2_q->tc_index, 734 dpaa2_q->flow_id, &cfg, &qid); 735 if (ret) { 736 PMD_INIT_LOG(ERR, "Error to get flow " 737 "information Error code = %d\n", ret); 738 return ret; 739 } 740 dpaa2_q->fqid = qid.fqid; 741 } 742 743 /*checksum errors, send them to normal path and set it in annotation */ 744 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; 745 746 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; 747 err_cfg.set_frame_annotation = true; 748 749 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, 750 priv->token, &err_cfg); 751 if (ret) { 752 PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:" 753 "code = %d\n", ret); 754 return ret; 755 } 756 /* VLAN Offload Settings */ 757 if (priv->max_vlan_filters) 758 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); 759 760 /* if the interrupts were configured on this devices*/ 761 if (intr_handle && (intr_handle->fd) && 762 (dev->data->dev_conf.intr_conf.lsc != 0)) { 763 /* Registering LSC interrupt handler */ 764 rte_intr_callback_register(intr_handle, 765 dpaa2_interrupt_handler, 766 (void *)dev); 767 768 /* enable vfio intr/eventfd mapping 769 * Interrupt index 0 is required, so we can not use 770 * rte_intr_enable. 771 */ 772 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX); 773 774 /* enable dpni_irqs */ 775 dpaa2_eth_setup_irqs(dev, 1); 776 } 777 778 return 0; 779 } 780 781 /** 782 * This routine disables all traffic on the adapter by issuing a 783 * global reset on the MAC. 784 */ 785 static void 786 dpaa2_dev_stop(struct rte_eth_dev *dev) 787 { 788 struct dpaa2_dev_priv *priv = dev->data->dev_private; 789 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 790 int ret; 791 struct rte_eth_link link; 792 struct rte_intr_handle *intr_handle = dev->intr_handle; 793 794 PMD_INIT_FUNC_TRACE(); 795 796 /* reset interrupt callback */ 797 if (intr_handle && (intr_handle->fd) && 798 (dev->data->dev_conf.intr_conf.lsc != 0)) { 799 /*disable dpni irqs */ 800 dpaa2_eth_setup_irqs(dev, 0); 801 802 /* disable vfio intr before callback unregister */ 803 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX); 804 805 /* Unregistering LSC interrupt handler */ 806 rte_intr_callback_unregister(intr_handle, 807 dpaa2_interrupt_handler, 808 (void *)dev); 809 } 810 811 dpaa2_dev_set_link_down(dev); 812 813 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); 814 if (ret) { 815 PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n", 816 ret, priv->hw_id); 817 return; 818 } 819 820 /* clear the recorded link status */ 821 memset(&link, 0, sizeof(link)); 822 dpaa2_dev_atomic_write_link_status(dev, &link); 823 } 824 825 static void 826 dpaa2_dev_close(struct rte_eth_dev *dev) 827 { 828 struct rte_eth_dev_data *data = dev->data; 829 struct dpaa2_dev_priv *priv = dev->data->dev_private; 830 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 831 int i, ret; 832 struct rte_eth_link link; 833 struct dpaa2_queue *dpaa2_q; 834 835 PMD_INIT_FUNC_TRACE(); 836 837 for (i = 0; i < data->nb_tx_queues; i++) { 838 dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i]; 839 if (!dpaa2_q->cscn) { 840 rte_free(dpaa2_q->cscn); 841 dpaa2_q->cscn = NULL; 842 } 843 } 844 845 /* Clean the device first */ 846 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); 847 if (ret) { 848 PMD_INIT_LOG(ERR, "Failure cleaning dpni device with" 849 " error code %d\n", ret); 850 return; 851 } 852 853 memset(&link, 0, sizeof(link)); 854 dpaa2_dev_atomic_write_link_status(dev, &link); 855 } 856 857 static void 858 dpaa2_dev_promiscuous_enable( 859 struct rte_eth_dev *dev) 860 { 861 int ret; 862 struct dpaa2_dev_priv *priv = dev->data->dev_private; 863 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 864 865 PMD_INIT_FUNC_TRACE(); 866 867 if (dpni == NULL) { 868 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 869 return; 870 } 871 872 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 873 if (ret < 0) 874 RTE_LOG(ERR, PMD, "Unable to enable U promisc mode %d\n", ret); 875 876 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 877 if (ret < 0) 878 RTE_LOG(ERR, PMD, "Unable to enable M promisc mode %d\n", ret); 879 } 880 881 static void 882 dpaa2_dev_promiscuous_disable( 883 struct rte_eth_dev *dev) 884 { 885 int ret; 886 struct dpaa2_dev_priv *priv = dev->data->dev_private; 887 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 888 889 PMD_INIT_FUNC_TRACE(); 890 891 if (dpni == NULL) { 892 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 893 return; 894 } 895 896 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 897 if (ret < 0) 898 RTE_LOG(ERR, PMD, "Unable to disable U promisc mode %d\n", ret); 899 900 if (dev->data->all_multicast == 0) { 901 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, 902 priv->token, false); 903 if (ret < 0) 904 RTE_LOG(ERR, PMD, 905 "Unable to disable M promisc mode %d\n", 906 ret); 907 } 908 } 909 910 static void 911 dpaa2_dev_allmulticast_enable( 912 struct rte_eth_dev *dev) 913 { 914 int ret; 915 struct dpaa2_dev_priv *priv = dev->data->dev_private; 916 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 917 918 PMD_INIT_FUNC_TRACE(); 919 920 if (dpni == NULL) { 921 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 922 return; 923 } 924 925 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 926 if (ret < 0) 927 RTE_LOG(ERR, PMD, "Unable to enable multicast mode %d\n", ret); 928 } 929 930 static void 931 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) 932 { 933 int ret; 934 struct dpaa2_dev_priv *priv = dev->data->dev_private; 935 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 936 937 PMD_INIT_FUNC_TRACE(); 938 939 if (dpni == NULL) { 940 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 941 return; 942 } 943 944 /* must remain on for all promiscuous */ 945 if (dev->data->promiscuous == 1) 946 return; 947 948 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 949 if (ret < 0) 950 RTE_LOG(ERR, PMD, "Unable to disable multicast mode %d\n", ret); 951 } 952 953 static int 954 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 955 { 956 int ret; 957 struct dpaa2_dev_priv *priv = dev->data->dev_private; 958 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 959 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 960 961 PMD_INIT_FUNC_TRACE(); 962 963 if (dpni == NULL) { 964 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 965 return -EINVAL; 966 } 967 968 /* check that mtu is within the allowed range */ 969 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) 970 return -EINVAL; 971 972 if (frame_size > ETHER_MAX_LEN) 973 dev->data->dev_conf.rxmode.jumbo_frame = 1; 974 else 975 dev->data->dev_conf.rxmode.jumbo_frame = 0; 976 977 /* Set the Max Rx frame length as 'mtu' + 978 * Maximum Ethernet header length 979 */ 980 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 981 mtu + ETH_VLAN_HLEN); 982 if (ret) { 983 PMD_DRV_LOG(ERR, "setting the max frame length failed"); 984 return -1; 985 } 986 PMD_DRV_LOG(INFO, "MTU is configured %d for the device", mtu); 987 return 0; 988 } 989 990 static int 991 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, 992 struct ether_addr *addr, 993 __rte_unused uint32_t index, 994 __rte_unused uint32_t pool) 995 { 996 int ret; 997 struct dpaa2_dev_priv *priv = dev->data->dev_private; 998 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 999 1000 PMD_INIT_FUNC_TRACE(); 1001 1002 if (dpni == NULL) { 1003 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1004 return -1; 1005 } 1006 1007 ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, 1008 priv->token, addr->addr_bytes); 1009 if (ret) 1010 RTE_LOG(ERR, PMD, 1011 "error: Adding the MAC ADDR failed: err = %d\n", ret); 1012 return 0; 1013 } 1014 1015 static void 1016 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, 1017 uint32_t index) 1018 { 1019 int ret; 1020 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1021 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1022 struct rte_eth_dev_data *data = dev->data; 1023 struct ether_addr *macaddr; 1024 1025 PMD_INIT_FUNC_TRACE(); 1026 1027 macaddr = &data->mac_addrs[index]; 1028 1029 if (dpni == NULL) { 1030 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1031 return; 1032 } 1033 1034 ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, 1035 priv->token, macaddr->addr_bytes); 1036 if (ret) 1037 RTE_LOG(ERR, PMD, 1038 "error: Removing the MAC ADDR failed: err = %d\n", ret); 1039 } 1040 1041 static void 1042 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, 1043 struct ether_addr *addr) 1044 { 1045 int ret; 1046 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1047 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1048 1049 PMD_INIT_FUNC_TRACE(); 1050 1051 if (dpni == NULL) { 1052 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1053 return; 1054 } 1055 1056 ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, 1057 priv->token, addr->addr_bytes); 1058 1059 if (ret) 1060 RTE_LOG(ERR, PMD, 1061 "error: Setting the MAC ADDR failed %d\n", ret); 1062 } 1063 static 1064 int dpaa2_dev_stats_get(struct rte_eth_dev *dev, 1065 struct rte_eth_stats *stats) 1066 { 1067 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1068 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1069 int32_t retcode; 1070 uint8_t page0 = 0, page1 = 1, page2 = 2; 1071 union dpni_statistics value; 1072 1073 memset(&value, 0, sizeof(union dpni_statistics)); 1074 1075 PMD_INIT_FUNC_TRACE(); 1076 1077 if (!dpni) { 1078 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1079 return -EINVAL; 1080 } 1081 1082 if (!stats) { 1083 RTE_LOG(ERR, PMD, "stats is NULL\n"); 1084 return -EINVAL; 1085 } 1086 1087 /*Get Counters from page_0*/ 1088 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1089 page0, 0, &value); 1090 if (retcode) 1091 goto err; 1092 1093 stats->ipackets = value.page_0.ingress_all_frames; 1094 stats->ibytes = value.page_0.ingress_all_bytes; 1095 1096 /*Get Counters from page_1*/ 1097 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1098 page1, 0, &value); 1099 if (retcode) 1100 goto err; 1101 1102 stats->opackets = value.page_1.egress_all_frames; 1103 stats->obytes = value.page_1.egress_all_bytes; 1104 1105 /*Get Counters from page_2*/ 1106 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1107 page2, 0, &value); 1108 if (retcode) 1109 goto err; 1110 1111 /* Ingress drop frame count due to configured rules */ 1112 stats->ierrors = value.page_2.ingress_filtered_frames; 1113 /* Ingress drop frame count due to error */ 1114 stats->ierrors += value.page_2.ingress_discarded_frames; 1115 1116 stats->oerrors = value.page_2.egress_discarded_frames; 1117 stats->imissed = value.page_2.ingress_nobuffer_discards; 1118 1119 return 0; 1120 1121 err: 1122 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); 1123 return retcode; 1124 }; 1125 1126 static int 1127 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1128 unsigned int n) 1129 { 1130 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1131 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1132 int32_t retcode; 1133 union dpni_statistics value[3] = {}; 1134 unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); 1135 1136 if (xstats == NULL) 1137 return 0; 1138 1139 if (n < num) 1140 return num; 1141 1142 /* Get Counters from page_0*/ 1143 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1144 0, 0, &value[0]); 1145 if (retcode) 1146 goto err; 1147 1148 /* Get Counters from page_1*/ 1149 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1150 1, 0, &value[1]); 1151 if (retcode) 1152 goto err; 1153 1154 /* Get Counters from page_2*/ 1155 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1156 2, 0, &value[2]); 1157 if (retcode) 1158 goto err; 1159 1160 for (i = 0; i < num; i++) { 1161 xstats[i].id = i; 1162 xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. 1163 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1164 } 1165 return i; 1166 err: 1167 RTE_LOG(ERR, PMD, "Error in obtaining extended stats (%d)\n", retcode); 1168 return retcode; 1169 } 1170 1171 static int 1172 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1173 struct rte_eth_xstat_name *xstats_names, 1174 __rte_unused unsigned int limit) 1175 { 1176 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1177 1178 if (xstats_names != NULL) 1179 for (i = 0; i < stat_cnt; i++) 1180 snprintf(xstats_names[i].name, 1181 sizeof(xstats_names[i].name), 1182 "%s", 1183 dpaa2_xstats_strings[i].name); 1184 1185 return stat_cnt; 1186 } 1187 1188 static int 1189 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1190 uint64_t *values, unsigned int n) 1191 { 1192 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1193 uint64_t values_copy[stat_cnt]; 1194 1195 if (!ids) { 1196 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1197 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1198 int32_t retcode; 1199 union dpni_statistics value[3] = {}; 1200 1201 if (n < stat_cnt) 1202 return stat_cnt; 1203 1204 if (!values) 1205 return 0; 1206 1207 /* Get Counters from page_0*/ 1208 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1209 0, 0, &value[0]); 1210 if (retcode) 1211 return 0; 1212 1213 /* Get Counters from page_1*/ 1214 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1215 1, 0, &value[1]); 1216 if (retcode) 1217 return 0; 1218 1219 /* Get Counters from page_2*/ 1220 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1221 2, 0, &value[2]); 1222 if (retcode) 1223 return 0; 1224 1225 for (i = 0; i < stat_cnt; i++) { 1226 values[i] = value[dpaa2_xstats_strings[i].page_id]. 1227 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1228 } 1229 return stat_cnt; 1230 } 1231 1232 dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 1233 1234 for (i = 0; i < n; i++) { 1235 if (ids[i] >= stat_cnt) { 1236 PMD_INIT_LOG(ERR, "id value isn't valid"); 1237 return -1; 1238 } 1239 values[i] = values_copy[ids[i]]; 1240 } 1241 return n; 1242 } 1243 1244 static int 1245 dpaa2_xstats_get_names_by_id( 1246 struct rte_eth_dev *dev, 1247 struct rte_eth_xstat_name *xstats_names, 1248 const uint64_t *ids, 1249 unsigned int limit) 1250 { 1251 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1252 struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 1253 1254 if (!ids) 1255 return dpaa2_xstats_get_names(dev, xstats_names, limit); 1256 1257 dpaa2_xstats_get_names(dev, xstats_names_copy, limit); 1258 1259 for (i = 0; i < limit; i++) { 1260 if (ids[i] >= stat_cnt) { 1261 PMD_INIT_LOG(ERR, "id value isn't valid"); 1262 return -1; 1263 } 1264 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 1265 } 1266 return limit; 1267 } 1268 1269 static void 1270 dpaa2_dev_stats_reset(struct rte_eth_dev *dev) 1271 { 1272 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1273 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1274 int32_t retcode; 1275 1276 PMD_INIT_FUNC_TRACE(); 1277 1278 if (dpni == NULL) { 1279 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1280 return; 1281 } 1282 1283 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); 1284 if (retcode) 1285 goto error; 1286 1287 return; 1288 1289 error: 1290 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); 1291 return; 1292 }; 1293 1294 /* return 0 means link status changed, -1 means not changed */ 1295 static int 1296 dpaa2_dev_link_update(struct rte_eth_dev *dev, 1297 int wait_to_complete __rte_unused) 1298 { 1299 int ret; 1300 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1301 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1302 struct rte_eth_link link, old; 1303 struct dpni_link_state state = {0}; 1304 1305 if (dpni == NULL) { 1306 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1307 return 0; 1308 } 1309 memset(&old, 0, sizeof(old)); 1310 dpaa2_dev_atomic_read_link_status(dev, &old); 1311 1312 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1313 if (ret < 0) { 1314 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); 1315 return -1; 1316 } 1317 1318 if ((old.link_status == state.up) && (old.link_speed == state.rate)) { 1319 RTE_LOG(DEBUG, PMD, "No change in status\n"); 1320 return -1; 1321 } 1322 1323 memset(&link, 0, sizeof(struct rte_eth_link)); 1324 link.link_status = state.up; 1325 link.link_speed = state.rate; 1326 1327 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) 1328 link.link_duplex = ETH_LINK_HALF_DUPLEX; 1329 else 1330 link.link_duplex = ETH_LINK_FULL_DUPLEX; 1331 1332 dpaa2_dev_atomic_write_link_status(dev, &link); 1333 1334 if (link.link_status) 1335 PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id); 1336 else 1337 PMD_DRV_LOG(INFO, "Port %d Link is Down", dev->data->port_id); 1338 return 0; 1339 } 1340 1341 /** 1342 * Toggle the DPNI to enable, if not already enabled. 1343 * This is not strictly PHY up/down - it is more of logical toggling. 1344 */ 1345 static int 1346 dpaa2_dev_set_link_up(struct rte_eth_dev *dev) 1347 { 1348 int ret = -EINVAL; 1349 struct dpaa2_dev_priv *priv; 1350 struct fsl_mc_io *dpni; 1351 int en = 0; 1352 struct dpni_link_state state = {0}; 1353 1354 priv = dev->data->dev_private; 1355 dpni = (struct fsl_mc_io *)priv->hw; 1356 1357 if (dpni == NULL) { 1358 RTE_LOG(ERR, PMD, "DPNI is NULL\n"); 1359 return ret; 1360 } 1361 1362 /* Check if DPNI is currently enabled */ 1363 ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); 1364 if (ret) { 1365 /* Unable to obtain dpni status; Not continuing */ 1366 PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret); 1367 return -EINVAL; 1368 } 1369 1370 /* Enable link if not already enabled */ 1371 if (!en) { 1372 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 1373 if (ret) { 1374 PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret); 1375 return -EINVAL; 1376 } 1377 } 1378 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1379 if (ret < 0) { 1380 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); 1381 return -1; 1382 } 1383 1384 /* changing tx burst function to start enqueues */ 1385 dev->tx_pkt_burst = dpaa2_dev_tx; 1386 dev->data->dev_link.link_status = state.up; 1387 1388 if (state.up) 1389 PMD_DRV_LOG(INFO, "Port %d Link is set as UP", 1390 dev->data->port_id); 1391 else 1392 PMD_DRV_LOG(INFO, "Port %d Link is DOWN", dev->data->port_id); 1393 return ret; 1394 } 1395 1396 /** 1397 * Toggle the DPNI to disable, if not already disabled. 1398 * This is not strictly PHY up/down - it is more of logical toggling. 1399 */ 1400 static int 1401 dpaa2_dev_set_link_down(struct rte_eth_dev *dev) 1402 { 1403 int ret = -EINVAL; 1404 struct dpaa2_dev_priv *priv; 1405 struct fsl_mc_io *dpni; 1406 int dpni_enabled = 0; 1407 int retries = 10; 1408 1409 PMD_INIT_FUNC_TRACE(); 1410 1411 priv = dev->data->dev_private; 1412 dpni = (struct fsl_mc_io *)priv->hw; 1413 1414 if (dpni == NULL) { 1415 RTE_LOG(ERR, PMD, "Device has not yet been configured\n"); 1416 return ret; 1417 } 1418 1419 /*changing tx burst function to avoid any more enqueues */ 1420 dev->tx_pkt_burst = dummy_dev_tx; 1421 1422 /* Loop while dpni_disable() attempts to drain the egress FQs 1423 * and confirm them back to us. 1424 */ 1425 do { 1426 ret = dpni_disable(dpni, 0, priv->token); 1427 if (ret) { 1428 PMD_DRV_LOG(ERR, "dpni disable failed (%d)", ret); 1429 return ret; 1430 } 1431 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); 1432 if (ret) { 1433 PMD_DRV_LOG(ERR, "dpni_is_enabled failed (%d)", ret); 1434 return ret; 1435 } 1436 if (dpni_enabled) 1437 /* Allow the MC some slack */ 1438 rte_delay_us(100 * 1000); 1439 } while (dpni_enabled && --retries); 1440 1441 if (!retries) { 1442 PMD_DRV_LOG(WARNING, "Retry count exceeded disabling DPNI\n"); 1443 /* todo- we may have to manually cleanup queues. 1444 */ 1445 } else { 1446 PMD_DRV_LOG(INFO, "Port %d Link DOWN successful", 1447 dev->data->port_id); 1448 } 1449 1450 dev->data->dev_link.link_status = 0; 1451 1452 return ret; 1453 } 1454 1455 static int 1456 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1457 { 1458 int ret = -EINVAL; 1459 struct dpaa2_dev_priv *priv; 1460 struct fsl_mc_io *dpni; 1461 struct dpni_link_state state = {0}; 1462 1463 PMD_INIT_FUNC_TRACE(); 1464 1465 priv = dev->data->dev_private; 1466 dpni = (struct fsl_mc_io *)priv->hw; 1467 1468 if (dpni == NULL || fc_conf == NULL) { 1469 RTE_LOG(ERR, PMD, "device not configured\n"); 1470 return ret; 1471 } 1472 1473 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1474 if (ret) { 1475 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); 1476 return ret; 1477 } 1478 1479 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); 1480 if (state.options & DPNI_LINK_OPT_PAUSE) { 1481 /* DPNI_LINK_OPT_PAUSE set 1482 * if ASYM_PAUSE not set, 1483 * RX Side flow control (handle received Pause frame) 1484 * TX side flow control (send Pause frame) 1485 * if ASYM_PAUSE set, 1486 * RX Side flow control (handle received Pause frame) 1487 * No TX side flow control (send Pause frame disabled) 1488 */ 1489 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) 1490 fc_conf->mode = RTE_FC_FULL; 1491 else 1492 fc_conf->mode = RTE_FC_RX_PAUSE; 1493 } else { 1494 /* DPNI_LINK_OPT_PAUSE not set 1495 * if ASYM_PAUSE set, 1496 * TX side flow control (send Pause frame) 1497 * No RX side flow control (No action on pause frame rx) 1498 * if ASYM_PAUSE not set, 1499 * Flow control disabled 1500 */ 1501 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) 1502 fc_conf->mode = RTE_FC_TX_PAUSE; 1503 else 1504 fc_conf->mode = RTE_FC_NONE; 1505 } 1506 1507 return ret; 1508 } 1509 1510 static int 1511 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1512 { 1513 int ret = -EINVAL; 1514 struct dpaa2_dev_priv *priv; 1515 struct fsl_mc_io *dpni; 1516 struct dpni_link_state state = {0}; 1517 struct dpni_link_cfg cfg = {0}; 1518 1519 PMD_INIT_FUNC_TRACE(); 1520 1521 priv = dev->data->dev_private; 1522 dpni = (struct fsl_mc_io *)priv->hw; 1523 1524 if (dpni == NULL) { 1525 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1526 return ret; 1527 } 1528 1529 /* It is necessary to obtain the current state before setting fc_conf 1530 * as MC would return error in case rate, autoneg or duplex values are 1531 * different. 1532 */ 1533 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1534 if (ret) { 1535 RTE_LOG(ERR, PMD, "Unable to get link state (err=%d)\n", ret); 1536 return -1; 1537 } 1538 1539 /* Disable link before setting configuration */ 1540 dpaa2_dev_set_link_down(dev); 1541 1542 /* Based on fc_conf, update cfg */ 1543 cfg.rate = state.rate; 1544 cfg.options = state.options; 1545 1546 /* update cfg with fc_conf */ 1547 switch (fc_conf->mode) { 1548 case RTE_FC_FULL: 1549 /* Full flow control; 1550 * OPT_PAUSE set, ASYM_PAUSE not set 1551 */ 1552 cfg.options |= DPNI_LINK_OPT_PAUSE; 1553 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1554 break; 1555 case RTE_FC_TX_PAUSE: 1556 /* Enable RX flow control 1557 * OPT_PAUSE not set; 1558 * ASYM_PAUSE set; 1559 */ 1560 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1561 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1562 break; 1563 case RTE_FC_RX_PAUSE: 1564 /* Enable TX Flow control 1565 * OPT_PAUSE set 1566 * ASYM_PAUSE set 1567 */ 1568 cfg.options |= DPNI_LINK_OPT_PAUSE; 1569 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1570 break; 1571 case RTE_FC_NONE: 1572 /* Disable Flow control 1573 * OPT_PAUSE not set 1574 * ASYM_PAUSE not set 1575 */ 1576 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1577 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1578 break; 1579 default: 1580 RTE_LOG(ERR, PMD, "Incorrect Flow control flag (%d)\n", 1581 fc_conf->mode); 1582 return -1; 1583 } 1584 1585 ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); 1586 if (ret) 1587 RTE_LOG(ERR, PMD, 1588 "Unable to set Link configuration (err=%d)\n", 1589 ret); 1590 1591 /* Enable link */ 1592 dpaa2_dev_set_link_up(dev); 1593 1594 return ret; 1595 } 1596 1597 static int 1598 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, 1599 struct rte_eth_rss_conf *rss_conf) 1600 { 1601 struct rte_eth_dev_data *data = dev->data; 1602 struct rte_eth_conf *eth_conf = &data->dev_conf; 1603 int ret; 1604 1605 PMD_INIT_FUNC_TRACE(); 1606 1607 if (rss_conf->rss_hf) { 1608 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf); 1609 if (ret) { 1610 PMD_INIT_LOG(ERR, "unable to set flow dist"); 1611 return ret; 1612 } 1613 } else { 1614 ret = dpaa2_remove_flow_dist(dev, 0); 1615 if (ret) { 1616 PMD_INIT_LOG(ERR, "unable to remove flow dist"); 1617 return ret; 1618 } 1619 } 1620 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; 1621 return 0; 1622 } 1623 1624 static int 1625 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1626 struct rte_eth_rss_conf *rss_conf) 1627 { 1628 struct rte_eth_dev_data *data = dev->data; 1629 struct rte_eth_conf *eth_conf = &data->dev_conf; 1630 1631 /* dpaa2 does not support rss_key, so length should be 0*/ 1632 rss_conf->rss_key_len = 0; 1633 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; 1634 return 0; 1635 } 1636 1637 static struct eth_dev_ops dpaa2_ethdev_ops = { 1638 .dev_configure = dpaa2_eth_dev_configure, 1639 .dev_start = dpaa2_dev_start, 1640 .dev_stop = dpaa2_dev_stop, 1641 .dev_close = dpaa2_dev_close, 1642 .promiscuous_enable = dpaa2_dev_promiscuous_enable, 1643 .promiscuous_disable = dpaa2_dev_promiscuous_disable, 1644 .allmulticast_enable = dpaa2_dev_allmulticast_enable, 1645 .allmulticast_disable = dpaa2_dev_allmulticast_disable, 1646 .dev_set_link_up = dpaa2_dev_set_link_up, 1647 .dev_set_link_down = dpaa2_dev_set_link_down, 1648 .link_update = dpaa2_dev_link_update, 1649 .stats_get = dpaa2_dev_stats_get, 1650 .xstats_get = dpaa2_dev_xstats_get, 1651 .xstats_get_by_id = dpaa2_xstats_get_by_id, 1652 .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id, 1653 .xstats_get_names = dpaa2_xstats_get_names, 1654 .stats_reset = dpaa2_dev_stats_reset, 1655 .xstats_reset = dpaa2_dev_stats_reset, 1656 .fw_version_get = dpaa2_fw_version_get, 1657 .dev_infos_get = dpaa2_dev_info_get, 1658 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, 1659 .mtu_set = dpaa2_dev_mtu_set, 1660 .vlan_filter_set = dpaa2_vlan_filter_set, 1661 .vlan_offload_set = dpaa2_vlan_offload_set, 1662 .rx_queue_setup = dpaa2_dev_rx_queue_setup, 1663 .rx_queue_release = dpaa2_dev_rx_queue_release, 1664 .tx_queue_setup = dpaa2_dev_tx_queue_setup, 1665 .tx_queue_release = dpaa2_dev_tx_queue_release, 1666 .flow_ctrl_get = dpaa2_flow_ctrl_get, 1667 .flow_ctrl_set = dpaa2_flow_ctrl_set, 1668 .mac_addr_add = dpaa2_dev_add_mac_addr, 1669 .mac_addr_remove = dpaa2_dev_remove_mac_addr, 1670 .mac_addr_set = dpaa2_dev_set_mac_addr, 1671 .rss_hash_update = dpaa2_dev_rss_hash_update, 1672 .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, 1673 }; 1674 1675 static int 1676 dpaa2_dev_init(struct rte_eth_dev *eth_dev) 1677 { 1678 struct rte_device *dev = eth_dev->device; 1679 struct rte_dpaa2_device *dpaa2_dev; 1680 struct fsl_mc_io *dpni_dev; 1681 struct dpni_attr attr; 1682 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1683 struct dpni_buffer_layout layout; 1684 int ret, hw_id; 1685 1686 PMD_INIT_FUNC_TRACE(); 1687 1688 /* For secondary processes, the primary has done all the work */ 1689 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1690 return 0; 1691 1692 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 1693 1694 hw_id = dpaa2_dev->object_id; 1695 1696 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); 1697 if (!dpni_dev) { 1698 PMD_INIT_LOG(ERR, "malloc failed for dpni device\n"); 1699 return -1; 1700 } 1701 1702 dpni_dev->regs = rte_mcp_ptr_list[0]; 1703 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); 1704 if (ret) { 1705 PMD_INIT_LOG(ERR, 1706 "Failure in opening dpni@%d with err code %d\n", 1707 hw_id, ret); 1708 rte_free(dpni_dev); 1709 return -1; 1710 } 1711 1712 /* Clean the device first */ 1713 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); 1714 if (ret) { 1715 PMD_INIT_LOG(ERR, 1716 "Failure cleaning dpni@%d with err code %d\n", 1717 hw_id, ret); 1718 goto init_err; 1719 } 1720 1721 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); 1722 if (ret) { 1723 PMD_INIT_LOG(ERR, 1724 "Failure in get dpni@%d attribute, err code %d\n", 1725 hw_id, ret); 1726 goto init_err; 1727 } 1728 1729 priv->num_rx_tc = attr.num_rx_tcs; 1730 1731 /* Resetting the "num_rx_queues" to equal number of queues in first TC 1732 * as only one TC is supported on Rx Side. Once Multiple TCs will be 1733 * in use for Rx processing then this will be changed or removed. 1734 */ 1735 priv->nb_rx_queues = attr.num_queues; 1736 1737 /* Using number of TX queues as number of TX TCs */ 1738 priv->nb_tx_queues = attr.num_tx_tcs; 1739 1740 PMD_DRV_LOG(DEBUG, "RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", 1741 priv->num_rx_tc, priv->nb_rx_queues, priv->nb_tx_queues); 1742 1743 priv->hw = dpni_dev; 1744 priv->hw_id = hw_id; 1745 priv->options = attr.options; 1746 priv->max_mac_filters = attr.mac_filter_entries; 1747 priv->max_vlan_filters = attr.vlan_filter_entries; 1748 priv->flags = 0; 1749 1750 /* Allocate memory for hardware structure for queues */ 1751 ret = dpaa2_alloc_rx_tx_queues(eth_dev); 1752 if (ret) { 1753 PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n"); 1754 goto init_err; 1755 } 1756 1757 /* Allocate memory for storing MAC addresses */ 1758 eth_dev->data->mac_addrs = rte_zmalloc("dpni", 1759 ETHER_ADDR_LEN * attr.mac_filter_entries, 0); 1760 if (eth_dev->data->mac_addrs == NULL) { 1761 PMD_INIT_LOG(ERR, 1762 "Failed to allocate %d bytes needed to store MAC addresses", 1763 ETHER_ADDR_LEN * attr.mac_filter_entries); 1764 ret = -ENOMEM; 1765 goto init_err; 1766 } 1767 1768 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 1769 priv->token, 1770 (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes)); 1771 if (ret) { 1772 PMD_INIT_LOG(ERR, "DPNI get mac address failed:Err Code = %d\n", 1773 ret); 1774 goto init_err; 1775 } 1776 1777 /* ... tx buffer layout ... */ 1778 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1779 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1780 layout.pass_frame_status = 1; 1781 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1782 DPNI_QUEUE_TX, &layout); 1783 if (ret) { 1784 PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer layout", 1785 ret); 1786 goto init_err; 1787 } 1788 1789 /* ... tx-conf and error buffer layout ... */ 1790 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1791 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1792 layout.pass_frame_status = 1; 1793 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1794 DPNI_QUEUE_TX_CONFIRM, &layout); 1795 if (ret) { 1796 PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer layout", 1797 ret); 1798 goto init_err; 1799 } 1800 1801 eth_dev->dev_ops = &dpaa2_ethdev_ops; 1802 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 1803 1804 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 1805 eth_dev->tx_pkt_burst = dpaa2_dev_tx; 1806 rte_fslmc_vfio_dmamap(); 1807 1808 RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); 1809 return 0; 1810 init_err: 1811 dpaa2_dev_uninit(eth_dev); 1812 return ret; 1813 } 1814 1815 static int 1816 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) 1817 { 1818 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1819 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1820 int i, ret; 1821 struct dpaa2_queue *dpaa2_q; 1822 1823 PMD_INIT_FUNC_TRACE(); 1824 1825 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1826 return 0; 1827 1828 if (!dpni) { 1829 PMD_INIT_LOG(WARNING, "Already closed or not started"); 1830 return -1; 1831 } 1832 1833 dpaa2_dev_close(eth_dev); 1834 1835 if (priv->rx_vq[0]) { 1836 /* cleaning up queue storage */ 1837 for (i = 0; i < priv->nb_rx_queues; i++) { 1838 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 1839 if (dpaa2_q->q_storage) 1840 rte_free(dpaa2_q->q_storage); 1841 } 1842 /*free the all queue memory */ 1843 rte_free(priv->rx_vq[0]); 1844 priv->rx_vq[0] = NULL; 1845 } 1846 1847 /* free memory for storing MAC addresses */ 1848 if (eth_dev->data->mac_addrs) { 1849 rte_free(eth_dev->data->mac_addrs); 1850 eth_dev->data->mac_addrs = NULL; 1851 } 1852 1853 /* Close the device at underlying layer*/ 1854 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); 1855 if (ret) { 1856 PMD_INIT_LOG(ERR, 1857 "Failure closing dpni device with err code %d\n", 1858 ret); 1859 } 1860 1861 /* Free the allocated memory for ethernet private data and dpni*/ 1862 priv->hw = NULL; 1863 rte_free(dpni); 1864 1865 eth_dev->dev_ops = NULL; 1866 eth_dev->rx_pkt_burst = NULL; 1867 eth_dev->tx_pkt_burst = NULL; 1868 1869 RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); 1870 return 0; 1871 } 1872 1873 static int 1874 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, 1875 struct rte_dpaa2_device *dpaa2_dev) 1876 { 1877 struct rte_eth_dev *eth_dev; 1878 int diag; 1879 1880 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1881 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); 1882 if (!eth_dev) 1883 return -ENODEV; 1884 eth_dev->data->dev_private = rte_zmalloc( 1885 "ethdev private structure", 1886 sizeof(struct dpaa2_dev_priv), 1887 RTE_CACHE_LINE_SIZE); 1888 if (eth_dev->data->dev_private == NULL) { 1889 PMD_INIT_LOG(CRIT, "Cannot allocate memzone for" 1890 " private port data\n"); 1891 rte_eth_dev_release_port(eth_dev); 1892 return -ENOMEM; 1893 } 1894 } else { 1895 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); 1896 if (!eth_dev) 1897 return -ENODEV; 1898 } 1899 1900 eth_dev->device = &dpaa2_dev->device; 1901 eth_dev->device->driver = &dpaa2_drv->driver; 1902 1903 dpaa2_dev->eth_dev = eth_dev; 1904 eth_dev->data->rx_mbuf_alloc_failed = 0; 1905 1906 /* Invoke PMD device initialization function */ 1907 diag = dpaa2_dev_init(eth_dev); 1908 if (diag == 0) 1909 return 0; 1910 1911 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1912 rte_free(eth_dev->data->dev_private); 1913 rte_eth_dev_release_port(eth_dev); 1914 return diag; 1915 } 1916 1917 static int 1918 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) 1919 { 1920 struct rte_eth_dev *eth_dev; 1921 1922 eth_dev = dpaa2_dev->eth_dev; 1923 dpaa2_dev_uninit(eth_dev); 1924 1925 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1926 rte_free(eth_dev->data->dev_private); 1927 rte_eth_dev_release_port(eth_dev); 1928 1929 return 0; 1930 } 1931 1932 static struct rte_dpaa2_driver rte_dpaa2_pmd = { 1933 .drv_type = DPAA2_ETH, 1934 .probe = rte_dpaa2_probe, 1935 .remove = rte_dpaa2_remove, 1936 }; 1937 1938 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); 1939