1 /* * SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 11 #include <rte_mbuf.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_memcpy.h> 15 #include <rte_string_fns.h> 16 #include <rte_cycles.h> 17 #include <rte_kvargs.h> 18 #include <rte_dev.h> 19 #include <rte_fslmc.h> 20 21 #include "dpaa2_pmd_logs.h" 22 #include <fslmc_vfio.h> 23 #include <dpaa2_hw_pvt.h> 24 #include <dpaa2_hw_mempool.h> 25 #include <dpaa2_hw_dpio.h> 26 #include <mc/fsl_dpmng.h> 27 #include "dpaa2_ethdev.h" 28 #include <fsl_qbman_debug.h> 29 30 struct rte_dpaa2_xstats_name_off { 31 char name[RTE_ETH_XSTATS_NAME_SIZE]; 32 uint8_t page_id; /* dpni statistics page id */ 33 uint8_t stats_id; /* stats id in the given page */ 34 }; 35 36 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { 37 {"ingress_multicast_frames", 0, 2}, 38 {"ingress_multicast_bytes", 0, 3}, 39 {"ingress_broadcast_frames", 0, 4}, 40 {"ingress_broadcast_bytes", 0, 5}, 41 {"egress_multicast_frames", 1, 2}, 42 {"egress_multicast_bytes", 1, 3}, 43 {"egress_broadcast_frames", 1, 4}, 44 {"egress_broadcast_bytes", 1, 5}, 45 {"ingress_filtered_frames", 2, 0}, 46 {"ingress_discarded_frames", 2, 1}, 47 {"ingress_nobuffer_discards", 2, 2}, 48 {"egress_discarded_frames", 2, 3}, 49 {"egress_confirmed_frames", 2, 4}, 50 }; 51 52 static struct rte_dpaa2_driver rte_dpaa2_pmd; 53 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev); 54 static int dpaa2_dev_link_update(struct rte_eth_dev *dev, 55 int wait_to_complete); 56 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); 57 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); 58 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 59 60 int dpaa2_logtype_pmd; 61 62 static int 63 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 64 { 65 int ret; 66 struct dpaa2_dev_priv *priv = dev->data->dev_private; 67 struct fsl_mc_io *dpni = priv->hw; 68 69 PMD_INIT_FUNC_TRACE(); 70 71 if (dpni == NULL) { 72 DPAA2_PMD_ERR("dpni is NULL"); 73 return -1; 74 } 75 76 if (on) 77 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, 78 priv->token, vlan_id); 79 else 80 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, 81 priv->token, vlan_id); 82 83 if (ret < 0) 84 DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d", 85 ret, vlan_id, priv->hw_id); 86 87 return ret; 88 } 89 90 static int 91 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) 92 { 93 struct dpaa2_dev_priv *priv = dev->data->dev_private; 94 struct fsl_mc_io *dpni = priv->hw; 95 int ret; 96 97 PMD_INIT_FUNC_TRACE(); 98 99 if (mask & ETH_VLAN_FILTER_MASK) { 100 /* VLAN Filter not avaialble */ 101 if (!priv->max_vlan_filters) { 102 DPAA2_PMD_INFO("VLAN filter not available"); 103 goto next_mask; 104 } 105 106 if (dev->data->dev_conf.rxmode.hw_vlan_filter) 107 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 108 priv->token, true); 109 else 110 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 111 priv->token, false); 112 if (ret < 0) 113 DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret); 114 } 115 next_mask: 116 if (mask & ETH_VLAN_EXTEND_MASK) { 117 if (dev->data->dev_conf.rxmode.hw_vlan_extend) 118 DPAA2_PMD_INFO("VLAN extend offload not supported"); 119 } 120 121 return 0; 122 } 123 124 static int 125 dpaa2_fw_version_get(struct rte_eth_dev *dev, 126 char *fw_version, 127 size_t fw_size) 128 { 129 int ret; 130 struct dpaa2_dev_priv *priv = dev->data->dev_private; 131 struct fsl_mc_io *dpni = priv->hw; 132 struct mc_soc_version mc_plat_info = {0}; 133 struct mc_version mc_ver_info = {0}; 134 135 PMD_INIT_FUNC_TRACE(); 136 137 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 138 DPAA2_PMD_WARN("\tmc_get_soc_version failed"); 139 140 if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) 141 DPAA2_PMD_WARN("\tmc_get_version failed"); 142 143 ret = snprintf(fw_version, fw_size, 144 "%x-%d.%d.%d", 145 mc_plat_info.svr, 146 mc_ver_info.major, 147 mc_ver_info.minor, 148 mc_ver_info.revision); 149 150 ret += 1; /* add the size of '\0' */ 151 if (fw_size < (uint32_t)ret) 152 return ret; 153 else 154 return 0; 155 } 156 157 static void 158 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 159 { 160 struct dpaa2_dev_priv *priv = dev->data->dev_private; 161 162 PMD_INIT_FUNC_TRACE(); 163 164 dev_info->if_index = priv->hw_id; 165 166 dev_info->max_mac_addrs = priv->max_mac_filters; 167 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; 168 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; 169 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; 170 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; 171 dev_info->rx_offload_capa = 172 DEV_RX_OFFLOAD_IPV4_CKSUM | 173 DEV_RX_OFFLOAD_UDP_CKSUM | 174 DEV_RX_OFFLOAD_TCP_CKSUM | 175 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; 176 dev_info->tx_offload_capa = 177 DEV_TX_OFFLOAD_IPV4_CKSUM | 178 DEV_TX_OFFLOAD_UDP_CKSUM | 179 DEV_TX_OFFLOAD_TCP_CKSUM | 180 DEV_TX_OFFLOAD_SCTP_CKSUM | 181 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 182 dev_info->speed_capa = ETH_LINK_SPEED_1G | 183 ETH_LINK_SPEED_2_5G | 184 ETH_LINK_SPEED_10G; 185 } 186 187 static int 188 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) 189 { 190 struct dpaa2_dev_priv *priv = dev->data->dev_private; 191 uint16_t dist_idx; 192 uint32_t vq_id; 193 struct dpaa2_queue *mc_q, *mcq; 194 uint32_t tot_queues; 195 int i; 196 struct dpaa2_queue *dpaa2_q; 197 198 PMD_INIT_FUNC_TRACE(); 199 200 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; 201 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, 202 RTE_CACHE_LINE_SIZE); 203 if (!mc_q) { 204 DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues"); 205 return -1; 206 } 207 208 for (i = 0; i < priv->nb_rx_queues; i++) { 209 mc_q->dev = dev; 210 priv->rx_vq[i] = mc_q++; 211 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 212 dpaa2_q->q_storage = rte_malloc("dq_storage", 213 sizeof(struct queue_storage_info_t), 214 RTE_CACHE_LINE_SIZE); 215 if (!dpaa2_q->q_storage) 216 goto fail; 217 218 memset(dpaa2_q->q_storage, 0, 219 sizeof(struct queue_storage_info_t)); 220 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) 221 goto fail; 222 } 223 224 for (i = 0; i < priv->nb_tx_queues; i++) { 225 mc_q->dev = dev; 226 mc_q->flow_id = 0xffff; 227 priv->tx_vq[i] = mc_q++; 228 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 229 dpaa2_q->cscn = rte_malloc(NULL, 230 sizeof(struct qbman_result), 16); 231 if (!dpaa2_q->cscn) 232 goto fail_tx; 233 } 234 235 vq_id = 0; 236 for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { 237 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; 238 mcq->tc_index = DPAA2_DEF_TC; 239 mcq->flow_id = dist_idx; 240 vq_id++; 241 } 242 243 return 0; 244 fail_tx: 245 i -= 1; 246 while (i >= 0) { 247 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 248 rte_free(dpaa2_q->cscn); 249 priv->tx_vq[i--] = NULL; 250 } 251 i = priv->nb_rx_queues; 252 fail: 253 i -= 1; 254 mc_q = priv->rx_vq[0]; 255 while (i >= 0) { 256 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 257 dpaa2_free_dq_storage(dpaa2_q->q_storage); 258 rte_free(dpaa2_q->q_storage); 259 priv->rx_vq[i--] = NULL; 260 } 261 rte_free(mc_q); 262 return -1; 263 } 264 265 static int 266 dpaa2_eth_dev_configure(struct rte_eth_dev *dev) 267 { 268 struct dpaa2_dev_priv *priv = dev->data->dev_private; 269 struct fsl_mc_io *dpni = priv->hw; 270 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 271 int rx_ip_csum_offload = false; 272 int ret; 273 274 PMD_INIT_FUNC_TRACE(); 275 276 if (eth_conf->rxmode.jumbo_frame == 1) { 277 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { 278 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, 279 priv->token, eth_conf->rxmode.max_rx_pkt_len); 280 if (ret) { 281 DPAA2_PMD_ERR( 282 "Unable to set mtu. check config"); 283 return ret; 284 } 285 } else { 286 return -1; 287 } 288 } 289 290 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { 291 ret = dpaa2_setup_flow_dist(dev, 292 eth_conf->rx_adv_conf.rss_conf.rss_hf); 293 if (ret) { 294 DPAA2_PMD_ERR("Unable to set flow distribution." 295 "Check queue config"); 296 return ret; 297 } 298 } 299 300 if (eth_conf->rxmode.hw_ip_checksum) 301 rx_ip_csum_offload = true; 302 303 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 304 DPNI_OFF_RX_L3_CSUM, rx_ip_csum_offload); 305 if (ret) { 306 DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret); 307 return ret; 308 } 309 310 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 311 DPNI_OFF_RX_L4_CSUM, rx_ip_csum_offload); 312 if (ret) { 313 DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret); 314 return ret; 315 } 316 317 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 318 DPNI_OFF_TX_L3_CSUM, true); 319 if (ret) { 320 DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret); 321 return ret; 322 } 323 324 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 325 DPNI_OFF_TX_L4_CSUM, true); 326 if (ret) { 327 DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret); 328 return ret; 329 } 330 331 /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in 332 * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC] 333 * to 0 for LS2 in the hardware thus disabling data/annotation 334 * stashing. For LX2 this is fixed in hardware and thus hash result and 335 * parse results can be received in FD using this option. 336 */ 337 if (dpaa2_svr_family == SVR_LX2160A) { 338 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 339 DPNI_FLCTYPE_HASH, true); 340 if (ret) { 341 DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret); 342 return ret; 343 } 344 } 345 346 if (eth_conf->rxmode.hw_vlan_filter) 347 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); 348 349 /* update the current status */ 350 dpaa2_dev_link_update(dev, 0); 351 352 return 0; 353 } 354 355 /* Function to setup RX flow information. It contains traffic class ID, 356 * flow ID, destination configuration etc. 357 */ 358 static int 359 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, 360 uint16_t rx_queue_id, 361 uint16_t nb_rx_desc __rte_unused, 362 unsigned int socket_id __rte_unused, 363 const struct rte_eth_rxconf *rx_conf __rte_unused, 364 struct rte_mempool *mb_pool) 365 { 366 struct dpaa2_dev_priv *priv = dev->data->dev_private; 367 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 368 struct dpaa2_queue *dpaa2_q; 369 struct dpni_queue cfg; 370 uint8_t options = 0; 371 uint8_t flow_id; 372 uint32_t bpid; 373 int ret; 374 375 PMD_INIT_FUNC_TRACE(); 376 377 DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p", 378 dev, rx_queue_id, mb_pool, rx_conf); 379 380 if (!priv->bp_list || priv->bp_list->mp != mb_pool) { 381 bpid = mempool_to_bpid(mb_pool); 382 ret = dpaa2_attach_bp_list(priv, 383 rte_dpaa2_bpid_info[bpid].bp_list); 384 if (ret) 385 return ret; 386 } 387 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 388 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ 389 390 /*Get the flow id from given VQ id*/ 391 flow_id = rx_queue_id % priv->nb_rx_queues; 392 memset(&cfg, 0, sizeof(struct dpni_queue)); 393 394 options = options | DPNI_QUEUE_OPT_USER_CTX; 395 cfg.user_context = (size_t)(dpaa2_q); 396 397 /*if ls2088 or rev2 device, enable the stashing */ 398 399 if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) { 400 options |= DPNI_QUEUE_OPT_FLC; 401 cfg.flc.stash_control = true; 402 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0; 403 /* 00 00 00 - last 6 bit represent annotation, context stashing, 404 * data stashing setting 01 01 00 (0x14) 405 * (in following order ->DS AS CS) 406 * to enable 1 line data, 1 line annotation. 407 * For LX2, this setting should be 01 00 00 (0x10) 408 */ 409 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A) 410 cfg.flc.value |= 0x10; 411 else 412 cfg.flc.value |= 0x14; 413 } 414 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, 415 dpaa2_q->tc_index, flow_id, options, &cfg); 416 if (ret) { 417 DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret); 418 return -1; 419 } 420 421 if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { 422 struct dpni_taildrop taildrop; 423 424 taildrop.enable = 1; 425 /*enabling per rx queue congestion control */ 426 taildrop.threshold = CONG_THRESHOLD_RX_Q; 427 taildrop.units = DPNI_CONGESTION_UNIT_BYTES; 428 taildrop.oal = CONG_RX_OAL; 429 DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d", 430 rx_queue_id); 431 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 432 DPNI_CP_QUEUE, DPNI_QUEUE_RX, 433 dpaa2_q->tc_index, flow_id, &taildrop); 434 if (ret) { 435 DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", 436 ret); 437 return -1; 438 } 439 } 440 441 dev->data->rx_queues[rx_queue_id] = dpaa2_q; 442 return 0; 443 } 444 445 static int 446 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, 447 uint16_t tx_queue_id, 448 uint16_t nb_tx_desc __rte_unused, 449 unsigned int socket_id __rte_unused, 450 const struct rte_eth_txconf *tx_conf __rte_unused) 451 { 452 struct dpaa2_dev_priv *priv = dev->data->dev_private; 453 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) 454 priv->tx_vq[tx_queue_id]; 455 struct fsl_mc_io *dpni = priv->hw; 456 struct dpni_queue tx_conf_cfg; 457 struct dpni_queue tx_flow_cfg; 458 uint8_t options = 0, flow_id; 459 uint32_t tc_id; 460 int ret; 461 462 PMD_INIT_FUNC_TRACE(); 463 464 /* Return if queue already configured */ 465 if (dpaa2_q->flow_id != 0xffff) { 466 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 467 return 0; 468 } 469 470 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); 471 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); 472 473 tc_id = tx_queue_id; 474 flow_id = 0; 475 476 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, 477 tc_id, flow_id, options, &tx_flow_cfg); 478 if (ret) { 479 DPAA2_PMD_ERR("Error in setting the tx flow: " 480 "tc_id=%d, flow=%d err=%d", 481 tc_id, flow_id, ret); 482 return -1; 483 } 484 485 dpaa2_q->flow_id = flow_id; 486 487 if (tx_queue_id == 0) { 488 /*Set tx-conf and error configuration*/ 489 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 490 priv->token, 491 DPNI_CONF_DISABLE); 492 if (ret) { 493 DPAA2_PMD_ERR("Error in set tx conf mode settings: " 494 "err=%d", ret); 495 return -1; 496 } 497 } 498 dpaa2_q->tc_index = tc_id; 499 500 if (!(priv->flags & DPAA2_TX_CGR_OFF)) { 501 struct dpni_congestion_notification_cfg cong_notif_cfg; 502 503 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; 504 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; 505 /* Notify that the queue is not congested when the data in 506 * the queue is below this thershold. 507 */ 508 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; 509 cong_notif_cfg.message_ctx = 0; 510 cong_notif_cfg.message_iova = (size_t)dpaa2_q->cscn; 511 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; 512 cong_notif_cfg.notification_mode = 513 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | 514 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | 515 DPNI_CONG_OPT_COHERENT_WRITE; 516 517 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, 518 priv->token, 519 DPNI_QUEUE_TX, 520 tc_id, 521 &cong_notif_cfg); 522 if (ret) { 523 DPAA2_PMD_ERR( 524 "Error in setting tx congestion notification: " 525 "err=%d", ret); 526 return -ret; 527 } 528 } 529 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 530 return 0; 531 } 532 533 static void 534 dpaa2_dev_rx_queue_release(void *q __rte_unused) 535 { 536 PMD_INIT_FUNC_TRACE(); 537 } 538 539 static void 540 dpaa2_dev_tx_queue_release(void *q __rte_unused) 541 { 542 PMD_INIT_FUNC_TRACE(); 543 } 544 545 static uint32_t 546 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 547 { 548 int32_t ret; 549 struct dpaa2_dev_priv *priv = dev->data->dev_private; 550 struct dpaa2_queue *dpaa2_q; 551 struct qbman_swp *swp; 552 struct qbman_fq_query_np_rslt state; 553 uint32_t frame_cnt = 0; 554 555 PMD_INIT_FUNC_TRACE(); 556 557 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 558 ret = dpaa2_affine_qbman_swp(); 559 if (ret) { 560 DPAA2_PMD_ERR("Failure in affining portal"); 561 return -EINVAL; 562 } 563 } 564 swp = DPAA2_PER_LCORE_PORTAL; 565 566 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 567 568 if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) { 569 frame_cnt = qbman_fq_state_frame_count(&state); 570 DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u", 571 rx_queue_id, frame_cnt); 572 } 573 return frame_cnt; 574 } 575 576 static const uint32_t * 577 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) 578 { 579 static const uint32_t ptypes[] = { 580 /*todo -= add more types */ 581 RTE_PTYPE_L2_ETHER, 582 RTE_PTYPE_L3_IPV4, 583 RTE_PTYPE_L3_IPV4_EXT, 584 RTE_PTYPE_L3_IPV6, 585 RTE_PTYPE_L3_IPV6_EXT, 586 RTE_PTYPE_L4_TCP, 587 RTE_PTYPE_L4_UDP, 588 RTE_PTYPE_L4_SCTP, 589 RTE_PTYPE_L4_ICMP, 590 RTE_PTYPE_UNKNOWN 591 }; 592 593 if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx) 594 return ptypes; 595 return NULL; 596 } 597 598 /** 599 * Dpaa2 link Interrupt handler 600 * 601 * @param param 602 * The address of parameter (struct rte_eth_dev *) regsitered before. 603 * 604 * @return 605 * void 606 */ 607 static void 608 dpaa2_interrupt_handler(void *param) 609 { 610 struct rte_eth_dev *dev = param; 611 struct dpaa2_dev_priv *priv = dev->data->dev_private; 612 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 613 int ret; 614 int irq_index = DPNI_IRQ_INDEX; 615 unsigned int status = 0, clear = 0; 616 617 PMD_INIT_FUNC_TRACE(); 618 619 if (dpni == NULL) { 620 DPAA2_PMD_ERR("dpni is NULL"); 621 return; 622 } 623 624 ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, 625 irq_index, &status); 626 if (unlikely(ret)) { 627 DPAA2_PMD_ERR("Can't get irq status (err %d)", ret); 628 clear = 0xffffffff; 629 goto out; 630 } 631 632 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { 633 clear = DPNI_IRQ_EVENT_LINK_CHANGED; 634 dpaa2_dev_link_update(dev, 0); 635 /* calling all the apps registered for link status event */ 636 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 637 NULL); 638 } 639 out: 640 ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, 641 irq_index, clear); 642 if (unlikely(ret)) 643 DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret); 644 } 645 646 static int 647 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) 648 { 649 int err = 0; 650 struct dpaa2_dev_priv *priv = dev->data->dev_private; 651 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 652 int irq_index = DPNI_IRQ_INDEX; 653 unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; 654 655 PMD_INIT_FUNC_TRACE(); 656 657 err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, 658 irq_index, mask); 659 if (err < 0) { 660 DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err, 661 strerror(-err)); 662 return err; 663 } 664 665 err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, 666 irq_index, enable); 667 if (err < 0) 668 DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err, 669 strerror(-err)); 670 671 return err; 672 } 673 674 static int 675 dpaa2_dev_start(struct rte_eth_dev *dev) 676 { 677 struct rte_device *rdev = dev->device; 678 struct rte_dpaa2_device *dpaa2_dev; 679 struct rte_eth_dev_data *data = dev->data; 680 struct dpaa2_dev_priv *priv = data->dev_private; 681 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 682 struct dpni_queue cfg; 683 struct dpni_error_cfg err_cfg; 684 uint16_t qdid; 685 struct dpni_queue_id qid; 686 struct dpaa2_queue *dpaa2_q; 687 int ret, i; 688 struct rte_intr_handle *intr_handle; 689 690 dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); 691 intr_handle = &dpaa2_dev->intr_handle; 692 693 PMD_INIT_FUNC_TRACE(); 694 695 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 696 if (ret) { 697 DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d", 698 priv->hw_id, ret); 699 return ret; 700 } 701 702 /* Power up the phy. Needed to make the link go UP */ 703 dpaa2_dev_set_link_up(dev); 704 705 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, 706 DPNI_QUEUE_TX, &qdid); 707 if (ret) { 708 DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret); 709 return ret; 710 } 711 priv->qdid = qdid; 712 713 for (i = 0; i < data->nb_rx_queues; i++) { 714 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; 715 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 716 DPNI_QUEUE_RX, dpaa2_q->tc_index, 717 dpaa2_q->flow_id, &cfg, &qid); 718 if (ret) { 719 DPAA2_PMD_ERR("Error in getting flow information: " 720 "err=%d", ret); 721 return ret; 722 } 723 dpaa2_q->fqid = qid.fqid; 724 } 725 726 /*checksum errors, send them to normal path and set it in annotation */ 727 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; 728 729 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; 730 err_cfg.set_frame_annotation = true; 731 732 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, 733 priv->token, &err_cfg); 734 if (ret) { 735 DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d", 736 ret); 737 return ret; 738 } 739 740 /* if the interrupts were configured on this devices*/ 741 if (intr_handle && (intr_handle->fd) && 742 (dev->data->dev_conf.intr_conf.lsc != 0)) { 743 /* Registering LSC interrupt handler */ 744 rte_intr_callback_register(intr_handle, 745 dpaa2_interrupt_handler, 746 (void *)dev); 747 748 /* enable vfio intr/eventfd mapping 749 * Interrupt index 0 is required, so we can not use 750 * rte_intr_enable. 751 */ 752 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX); 753 754 /* enable dpni_irqs */ 755 dpaa2_eth_setup_irqs(dev, 1); 756 } 757 758 return 0; 759 } 760 761 /** 762 * This routine disables all traffic on the adapter by issuing a 763 * global reset on the MAC. 764 */ 765 static void 766 dpaa2_dev_stop(struct rte_eth_dev *dev) 767 { 768 struct dpaa2_dev_priv *priv = dev->data->dev_private; 769 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 770 int ret; 771 struct rte_eth_link link; 772 struct rte_intr_handle *intr_handle = dev->intr_handle; 773 774 PMD_INIT_FUNC_TRACE(); 775 776 /* reset interrupt callback */ 777 if (intr_handle && (intr_handle->fd) && 778 (dev->data->dev_conf.intr_conf.lsc != 0)) { 779 /*disable dpni irqs */ 780 dpaa2_eth_setup_irqs(dev, 0); 781 782 /* disable vfio intr before callback unregister */ 783 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX); 784 785 /* Unregistering LSC interrupt handler */ 786 rte_intr_callback_unregister(intr_handle, 787 dpaa2_interrupt_handler, 788 (void *)dev); 789 } 790 791 dpaa2_dev_set_link_down(dev); 792 793 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); 794 if (ret) { 795 DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev", 796 ret, priv->hw_id); 797 return; 798 } 799 800 /* clear the recorded link status */ 801 memset(&link, 0, sizeof(link)); 802 rte_eth_linkstatus_set(dev, &link); 803 } 804 805 static void 806 dpaa2_dev_close(struct rte_eth_dev *dev) 807 { 808 struct rte_eth_dev_data *data = dev->data; 809 struct dpaa2_dev_priv *priv = dev->data->dev_private; 810 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 811 int i, ret; 812 struct rte_eth_link link; 813 struct dpaa2_queue *dpaa2_q; 814 815 PMD_INIT_FUNC_TRACE(); 816 817 for (i = 0; i < data->nb_tx_queues; i++) { 818 dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i]; 819 if (!dpaa2_q->cscn) { 820 rte_free(dpaa2_q->cscn); 821 dpaa2_q->cscn = NULL; 822 } 823 } 824 825 /* Clean the device first */ 826 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); 827 if (ret) { 828 DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret); 829 return; 830 } 831 832 memset(&link, 0, sizeof(link)); 833 rte_eth_linkstatus_set(dev, &link); 834 } 835 836 static void 837 dpaa2_dev_promiscuous_enable( 838 struct rte_eth_dev *dev) 839 { 840 int ret; 841 struct dpaa2_dev_priv *priv = dev->data->dev_private; 842 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 843 844 PMD_INIT_FUNC_TRACE(); 845 846 if (dpni == NULL) { 847 DPAA2_PMD_ERR("dpni is NULL"); 848 return; 849 } 850 851 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 852 if (ret < 0) 853 DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret); 854 855 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 856 if (ret < 0) 857 DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret); 858 } 859 860 static void 861 dpaa2_dev_promiscuous_disable( 862 struct rte_eth_dev *dev) 863 { 864 int ret; 865 struct dpaa2_dev_priv *priv = dev->data->dev_private; 866 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 867 868 PMD_INIT_FUNC_TRACE(); 869 870 if (dpni == NULL) { 871 DPAA2_PMD_ERR("dpni is NULL"); 872 return; 873 } 874 875 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 876 if (ret < 0) 877 DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret); 878 879 if (dev->data->all_multicast == 0) { 880 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, 881 priv->token, false); 882 if (ret < 0) 883 DPAA2_PMD_ERR("Unable to disable M promisc mode %d", 884 ret); 885 } 886 } 887 888 static void 889 dpaa2_dev_allmulticast_enable( 890 struct rte_eth_dev *dev) 891 { 892 int ret; 893 struct dpaa2_dev_priv *priv = dev->data->dev_private; 894 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 895 896 PMD_INIT_FUNC_TRACE(); 897 898 if (dpni == NULL) { 899 DPAA2_PMD_ERR("dpni is NULL"); 900 return; 901 } 902 903 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 904 if (ret < 0) 905 DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret); 906 } 907 908 static void 909 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) 910 { 911 int ret; 912 struct dpaa2_dev_priv *priv = dev->data->dev_private; 913 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 914 915 PMD_INIT_FUNC_TRACE(); 916 917 if (dpni == NULL) { 918 DPAA2_PMD_ERR("dpni is NULL"); 919 return; 920 } 921 922 /* must remain on for all promiscuous */ 923 if (dev->data->promiscuous == 1) 924 return; 925 926 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 927 if (ret < 0) 928 DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret); 929 } 930 931 static int 932 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 933 { 934 int ret; 935 struct dpaa2_dev_priv *priv = dev->data->dev_private; 936 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 937 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN 938 + VLAN_TAG_SIZE; 939 940 PMD_INIT_FUNC_TRACE(); 941 942 if (dpni == NULL) { 943 DPAA2_PMD_ERR("dpni is NULL"); 944 return -EINVAL; 945 } 946 947 /* check that mtu is within the allowed range */ 948 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) 949 return -EINVAL; 950 951 if (frame_size > ETHER_MAX_LEN) 952 dev->data->dev_conf.rxmode.jumbo_frame = 1; 953 else 954 dev->data->dev_conf.rxmode.jumbo_frame = 0; 955 956 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 957 958 /* Set the Max Rx frame length as 'mtu' + 959 * Maximum Ethernet header length 960 */ 961 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 962 frame_size); 963 if (ret) { 964 DPAA2_PMD_ERR("Setting the max frame length failed"); 965 return -1; 966 } 967 DPAA2_PMD_INFO("MTU configured for the device: %d", mtu); 968 return 0; 969 } 970 971 static int 972 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, 973 struct ether_addr *addr, 974 __rte_unused uint32_t index, 975 __rte_unused uint32_t pool) 976 { 977 int ret; 978 struct dpaa2_dev_priv *priv = dev->data->dev_private; 979 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 980 981 PMD_INIT_FUNC_TRACE(); 982 983 if (dpni == NULL) { 984 DPAA2_PMD_ERR("dpni is NULL"); 985 return -1; 986 } 987 988 ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, 989 priv->token, addr->addr_bytes); 990 if (ret) 991 DPAA2_PMD_ERR( 992 "error: Adding the MAC ADDR failed: err = %d", ret); 993 return 0; 994 } 995 996 static void 997 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, 998 uint32_t index) 999 { 1000 int ret; 1001 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1002 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1003 struct rte_eth_dev_data *data = dev->data; 1004 struct ether_addr *macaddr; 1005 1006 PMD_INIT_FUNC_TRACE(); 1007 1008 macaddr = &data->mac_addrs[index]; 1009 1010 if (dpni == NULL) { 1011 DPAA2_PMD_ERR("dpni is NULL"); 1012 return; 1013 } 1014 1015 ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, 1016 priv->token, macaddr->addr_bytes); 1017 if (ret) 1018 DPAA2_PMD_ERR( 1019 "error: Removing the MAC ADDR failed: err = %d", ret); 1020 } 1021 1022 static void 1023 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, 1024 struct ether_addr *addr) 1025 { 1026 int ret; 1027 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1028 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1029 1030 PMD_INIT_FUNC_TRACE(); 1031 1032 if (dpni == NULL) { 1033 DPAA2_PMD_ERR("dpni is NULL"); 1034 return; 1035 } 1036 1037 ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, 1038 priv->token, addr->addr_bytes); 1039 1040 if (ret) 1041 DPAA2_PMD_ERR( 1042 "error: Setting the MAC ADDR failed %d", ret); 1043 } 1044 1045 static 1046 int dpaa2_dev_stats_get(struct rte_eth_dev *dev, 1047 struct rte_eth_stats *stats) 1048 { 1049 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1050 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1051 int32_t retcode; 1052 uint8_t page0 = 0, page1 = 1, page2 = 2; 1053 union dpni_statistics value; 1054 1055 memset(&value, 0, sizeof(union dpni_statistics)); 1056 1057 PMD_INIT_FUNC_TRACE(); 1058 1059 if (!dpni) { 1060 DPAA2_PMD_ERR("dpni is NULL"); 1061 return -EINVAL; 1062 } 1063 1064 if (!stats) { 1065 DPAA2_PMD_ERR("stats is NULL"); 1066 return -EINVAL; 1067 } 1068 1069 /*Get Counters from page_0*/ 1070 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1071 page0, 0, &value); 1072 if (retcode) 1073 goto err; 1074 1075 stats->ipackets = value.page_0.ingress_all_frames; 1076 stats->ibytes = value.page_0.ingress_all_bytes; 1077 1078 /*Get Counters from page_1*/ 1079 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1080 page1, 0, &value); 1081 if (retcode) 1082 goto err; 1083 1084 stats->opackets = value.page_1.egress_all_frames; 1085 stats->obytes = value.page_1.egress_all_bytes; 1086 1087 /*Get Counters from page_2*/ 1088 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1089 page2, 0, &value); 1090 if (retcode) 1091 goto err; 1092 1093 /* Ingress drop frame count due to configured rules */ 1094 stats->ierrors = value.page_2.ingress_filtered_frames; 1095 /* Ingress drop frame count due to error */ 1096 stats->ierrors += value.page_2.ingress_discarded_frames; 1097 1098 stats->oerrors = value.page_2.egress_discarded_frames; 1099 stats->imissed = value.page_2.ingress_nobuffer_discards; 1100 1101 return 0; 1102 1103 err: 1104 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 1105 return retcode; 1106 }; 1107 1108 static int 1109 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1110 unsigned int n) 1111 { 1112 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1113 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1114 int32_t retcode; 1115 union dpni_statistics value[3] = {}; 1116 unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); 1117 1118 if (xstats == NULL) 1119 return 0; 1120 1121 if (n < num) 1122 return num; 1123 1124 /* Get Counters from page_0*/ 1125 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1126 0, 0, &value[0]); 1127 if (retcode) 1128 goto err; 1129 1130 /* Get Counters from page_1*/ 1131 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1132 1, 0, &value[1]); 1133 if (retcode) 1134 goto err; 1135 1136 /* Get Counters from page_2*/ 1137 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1138 2, 0, &value[2]); 1139 if (retcode) 1140 goto err; 1141 1142 for (i = 0; i < num; i++) { 1143 xstats[i].id = i; 1144 xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. 1145 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1146 } 1147 return i; 1148 err: 1149 DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode); 1150 return retcode; 1151 } 1152 1153 static int 1154 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1155 struct rte_eth_xstat_name *xstats_names, 1156 __rte_unused unsigned int limit) 1157 { 1158 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1159 1160 if (xstats_names != NULL) 1161 for (i = 0; i < stat_cnt; i++) 1162 snprintf(xstats_names[i].name, 1163 sizeof(xstats_names[i].name), 1164 "%s", 1165 dpaa2_xstats_strings[i].name); 1166 1167 return stat_cnt; 1168 } 1169 1170 static int 1171 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1172 uint64_t *values, unsigned int n) 1173 { 1174 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1175 uint64_t values_copy[stat_cnt]; 1176 1177 if (!ids) { 1178 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1179 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1180 int32_t retcode; 1181 union dpni_statistics value[3] = {}; 1182 1183 if (n < stat_cnt) 1184 return stat_cnt; 1185 1186 if (!values) 1187 return 0; 1188 1189 /* Get Counters from page_0*/ 1190 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1191 0, 0, &value[0]); 1192 if (retcode) 1193 return 0; 1194 1195 /* Get Counters from page_1*/ 1196 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1197 1, 0, &value[1]); 1198 if (retcode) 1199 return 0; 1200 1201 /* Get Counters from page_2*/ 1202 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1203 2, 0, &value[2]); 1204 if (retcode) 1205 return 0; 1206 1207 for (i = 0; i < stat_cnt; i++) { 1208 values[i] = value[dpaa2_xstats_strings[i].page_id]. 1209 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1210 } 1211 return stat_cnt; 1212 } 1213 1214 dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 1215 1216 for (i = 0; i < n; i++) { 1217 if (ids[i] >= stat_cnt) { 1218 DPAA2_PMD_ERR("xstats id value isn't valid"); 1219 return -1; 1220 } 1221 values[i] = values_copy[ids[i]]; 1222 } 1223 return n; 1224 } 1225 1226 static int 1227 dpaa2_xstats_get_names_by_id( 1228 struct rte_eth_dev *dev, 1229 struct rte_eth_xstat_name *xstats_names, 1230 const uint64_t *ids, 1231 unsigned int limit) 1232 { 1233 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1234 struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 1235 1236 if (!ids) 1237 return dpaa2_xstats_get_names(dev, xstats_names, limit); 1238 1239 dpaa2_xstats_get_names(dev, xstats_names_copy, limit); 1240 1241 for (i = 0; i < limit; i++) { 1242 if (ids[i] >= stat_cnt) { 1243 DPAA2_PMD_ERR("xstats id value isn't valid"); 1244 return -1; 1245 } 1246 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 1247 } 1248 return limit; 1249 } 1250 1251 static void 1252 dpaa2_dev_stats_reset(struct rte_eth_dev *dev) 1253 { 1254 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1255 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1256 int32_t retcode; 1257 1258 PMD_INIT_FUNC_TRACE(); 1259 1260 if (dpni == NULL) { 1261 DPAA2_PMD_ERR("dpni is NULL"); 1262 return; 1263 } 1264 1265 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); 1266 if (retcode) 1267 goto error; 1268 1269 return; 1270 1271 error: 1272 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 1273 return; 1274 }; 1275 1276 /* return 0 means link status changed, -1 means not changed */ 1277 static int 1278 dpaa2_dev_link_update(struct rte_eth_dev *dev, 1279 int wait_to_complete __rte_unused) 1280 { 1281 int ret; 1282 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1283 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1284 struct rte_eth_link link; 1285 struct dpni_link_state state = {0}; 1286 1287 if (dpni == NULL) { 1288 DPAA2_PMD_ERR("dpni is NULL"); 1289 return 0; 1290 } 1291 1292 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1293 if (ret < 0) { 1294 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); 1295 return -1; 1296 } 1297 1298 memset(&link, 0, sizeof(struct rte_eth_link)); 1299 link.link_status = state.up; 1300 link.link_speed = state.rate; 1301 1302 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) 1303 link.link_duplex = ETH_LINK_HALF_DUPLEX; 1304 else 1305 link.link_duplex = ETH_LINK_FULL_DUPLEX; 1306 1307 ret = rte_eth_linkstatus_set(dev, &link); 1308 if (ret == -1) 1309 DPAA2_PMD_DEBUG("No change in status"); 1310 else 1311 DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id, 1312 link.link_status ? "Up" : "Down"); 1313 1314 return ret; 1315 } 1316 1317 /** 1318 * Toggle the DPNI to enable, if not already enabled. 1319 * This is not strictly PHY up/down - it is more of logical toggling. 1320 */ 1321 static int 1322 dpaa2_dev_set_link_up(struct rte_eth_dev *dev) 1323 { 1324 int ret = -EINVAL; 1325 struct dpaa2_dev_priv *priv; 1326 struct fsl_mc_io *dpni; 1327 int en = 0; 1328 struct dpni_link_state state = {0}; 1329 1330 priv = dev->data->dev_private; 1331 dpni = (struct fsl_mc_io *)priv->hw; 1332 1333 if (dpni == NULL) { 1334 DPAA2_PMD_ERR("dpni is NULL"); 1335 return ret; 1336 } 1337 1338 /* Check if DPNI is currently enabled */ 1339 ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); 1340 if (ret) { 1341 /* Unable to obtain dpni status; Not continuing */ 1342 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1343 return -EINVAL; 1344 } 1345 1346 /* Enable link if not already enabled */ 1347 if (!en) { 1348 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 1349 if (ret) { 1350 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1351 return -EINVAL; 1352 } 1353 } 1354 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1355 if (ret < 0) { 1356 DPAA2_PMD_ERR("Unable to get link state (%d)", ret); 1357 return -1; 1358 } 1359 1360 /* changing tx burst function to start enqueues */ 1361 dev->tx_pkt_burst = dpaa2_dev_tx; 1362 dev->data->dev_link.link_status = state.up; 1363 1364 if (state.up) 1365 DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id); 1366 else 1367 DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id); 1368 return ret; 1369 } 1370 1371 /** 1372 * Toggle the DPNI to disable, if not already disabled. 1373 * This is not strictly PHY up/down - it is more of logical toggling. 1374 */ 1375 static int 1376 dpaa2_dev_set_link_down(struct rte_eth_dev *dev) 1377 { 1378 int ret = -EINVAL; 1379 struct dpaa2_dev_priv *priv; 1380 struct fsl_mc_io *dpni; 1381 int dpni_enabled = 0; 1382 int retries = 10; 1383 1384 PMD_INIT_FUNC_TRACE(); 1385 1386 priv = dev->data->dev_private; 1387 dpni = (struct fsl_mc_io *)priv->hw; 1388 1389 if (dpni == NULL) { 1390 DPAA2_PMD_ERR("Device has not yet been configured"); 1391 return ret; 1392 } 1393 1394 /*changing tx burst function to avoid any more enqueues */ 1395 dev->tx_pkt_burst = dummy_dev_tx; 1396 1397 /* Loop while dpni_disable() attempts to drain the egress FQs 1398 * and confirm them back to us. 1399 */ 1400 do { 1401 ret = dpni_disable(dpni, 0, priv->token); 1402 if (ret) { 1403 DPAA2_PMD_ERR("dpni disable failed (%d)", ret); 1404 return ret; 1405 } 1406 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); 1407 if (ret) { 1408 DPAA2_PMD_ERR("dpni enable check failed (%d)", ret); 1409 return ret; 1410 } 1411 if (dpni_enabled) 1412 /* Allow the MC some slack */ 1413 rte_delay_us(100 * 1000); 1414 } while (dpni_enabled && --retries); 1415 1416 if (!retries) { 1417 DPAA2_PMD_WARN("Retry count exceeded disabling dpni"); 1418 /* todo- we may have to manually cleanup queues. 1419 */ 1420 } else { 1421 DPAA2_PMD_INFO("Port %d Link DOWN successful", 1422 dev->data->port_id); 1423 } 1424 1425 dev->data->dev_link.link_status = 0; 1426 1427 return ret; 1428 } 1429 1430 static int 1431 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1432 { 1433 int ret = -EINVAL; 1434 struct dpaa2_dev_priv *priv; 1435 struct fsl_mc_io *dpni; 1436 struct dpni_link_state state = {0}; 1437 1438 PMD_INIT_FUNC_TRACE(); 1439 1440 priv = dev->data->dev_private; 1441 dpni = (struct fsl_mc_io *)priv->hw; 1442 1443 if (dpni == NULL || fc_conf == NULL) { 1444 DPAA2_PMD_ERR("device not configured"); 1445 return ret; 1446 } 1447 1448 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1449 if (ret) { 1450 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); 1451 return ret; 1452 } 1453 1454 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); 1455 if (state.options & DPNI_LINK_OPT_PAUSE) { 1456 /* DPNI_LINK_OPT_PAUSE set 1457 * if ASYM_PAUSE not set, 1458 * RX Side flow control (handle received Pause frame) 1459 * TX side flow control (send Pause frame) 1460 * if ASYM_PAUSE set, 1461 * RX Side flow control (handle received Pause frame) 1462 * No TX side flow control (send Pause frame disabled) 1463 */ 1464 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) 1465 fc_conf->mode = RTE_FC_FULL; 1466 else 1467 fc_conf->mode = RTE_FC_RX_PAUSE; 1468 } else { 1469 /* DPNI_LINK_OPT_PAUSE not set 1470 * if ASYM_PAUSE set, 1471 * TX side flow control (send Pause frame) 1472 * No RX side flow control (No action on pause frame rx) 1473 * if ASYM_PAUSE not set, 1474 * Flow control disabled 1475 */ 1476 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) 1477 fc_conf->mode = RTE_FC_TX_PAUSE; 1478 else 1479 fc_conf->mode = RTE_FC_NONE; 1480 } 1481 1482 return ret; 1483 } 1484 1485 static int 1486 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1487 { 1488 int ret = -EINVAL; 1489 struct dpaa2_dev_priv *priv; 1490 struct fsl_mc_io *dpni; 1491 struct dpni_link_state state = {0}; 1492 struct dpni_link_cfg cfg = {0}; 1493 1494 PMD_INIT_FUNC_TRACE(); 1495 1496 priv = dev->data->dev_private; 1497 dpni = (struct fsl_mc_io *)priv->hw; 1498 1499 if (dpni == NULL) { 1500 DPAA2_PMD_ERR("dpni is NULL"); 1501 return ret; 1502 } 1503 1504 /* It is necessary to obtain the current state before setting fc_conf 1505 * as MC would return error in case rate, autoneg or duplex values are 1506 * different. 1507 */ 1508 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1509 if (ret) { 1510 DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret); 1511 return -1; 1512 } 1513 1514 /* Disable link before setting configuration */ 1515 dpaa2_dev_set_link_down(dev); 1516 1517 /* Based on fc_conf, update cfg */ 1518 cfg.rate = state.rate; 1519 cfg.options = state.options; 1520 1521 /* update cfg with fc_conf */ 1522 switch (fc_conf->mode) { 1523 case RTE_FC_FULL: 1524 /* Full flow control; 1525 * OPT_PAUSE set, ASYM_PAUSE not set 1526 */ 1527 cfg.options |= DPNI_LINK_OPT_PAUSE; 1528 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1529 break; 1530 case RTE_FC_TX_PAUSE: 1531 /* Enable RX flow control 1532 * OPT_PAUSE not set; 1533 * ASYM_PAUSE set; 1534 */ 1535 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1536 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1537 break; 1538 case RTE_FC_RX_PAUSE: 1539 /* Enable TX Flow control 1540 * OPT_PAUSE set 1541 * ASYM_PAUSE set 1542 */ 1543 cfg.options |= DPNI_LINK_OPT_PAUSE; 1544 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1545 break; 1546 case RTE_FC_NONE: 1547 /* Disable Flow control 1548 * OPT_PAUSE not set 1549 * ASYM_PAUSE not set 1550 */ 1551 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1552 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1553 break; 1554 default: 1555 DPAA2_PMD_ERR("Incorrect Flow control flag (%d)", 1556 fc_conf->mode); 1557 return -1; 1558 } 1559 1560 ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); 1561 if (ret) 1562 DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)", 1563 ret); 1564 1565 /* Enable link */ 1566 dpaa2_dev_set_link_up(dev); 1567 1568 return ret; 1569 } 1570 1571 static int 1572 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, 1573 struct rte_eth_rss_conf *rss_conf) 1574 { 1575 struct rte_eth_dev_data *data = dev->data; 1576 struct rte_eth_conf *eth_conf = &data->dev_conf; 1577 int ret; 1578 1579 PMD_INIT_FUNC_TRACE(); 1580 1581 if (rss_conf->rss_hf) { 1582 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf); 1583 if (ret) { 1584 DPAA2_PMD_ERR("Unable to set flow dist"); 1585 return ret; 1586 } 1587 } else { 1588 ret = dpaa2_remove_flow_dist(dev, 0); 1589 if (ret) { 1590 DPAA2_PMD_ERR("Unable to remove flow dist"); 1591 return ret; 1592 } 1593 } 1594 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; 1595 return 0; 1596 } 1597 1598 static int 1599 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1600 struct rte_eth_rss_conf *rss_conf) 1601 { 1602 struct rte_eth_dev_data *data = dev->data; 1603 struct rte_eth_conf *eth_conf = &data->dev_conf; 1604 1605 /* dpaa2 does not support rss_key, so length should be 0*/ 1606 rss_conf->rss_key_len = 0; 1607 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; 1608 return 0; 1609 } 1610 1611 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, 1612 int eth_rx_queue_id, 1613 uint16_t dpcon_id, 1614 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 1615 { 1616 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1617 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1618 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1619 uint8_t flow_id = dpaa2_ethq->flow_id; 1620 struct dpni_queue cfg; 1621 uint8_t options; 1622 int ret; 1623 1624 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 1625 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; 1626 else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) 1627 dpaa2_ethq->cb = dpaa2_dev_process_atomic_event; 1628 else 1629 return -EINVAL; 1630 1631 memset(&cfg, 0, sizeof(struct dpni_queue)); 1632 options = DPNI_QUEUE_OPT_DEST; 1633 cfg.destination.type = DPNI_DEST_DPCON; 1634 cfg.destination.id = dpcon_id; 1635 cfg.destination.priority = queue_conf->ev.priority; 1636 1637 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { 1638 options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; 1639 cfg.destination.hold_active = 1; 1640 } 1641 1642 options |= DPNI_QUEUE_OPT_USER_CTX; 1643 cfg.user_context = (size_t)(dpaa2_ethq); 1644 1645 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1646 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1647 if (ret) { 1648 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 1649 return ret; 1650 } 1651 1652 memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event)); 1653 1654 return 0; 1655 } 1656 1657 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, 1658 int eth_rx_queue_id) 1659 { 1660 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1661 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1662 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1663 uint8_t flow_id = dpaa2_ethq->flow_id; 1664 struct dpni_queue cfg; 1665 uint8_t options; 1666 int ret; 1667 1668 memset(&cfg, 0, sizeof(struct dpni_queue)); 1669 options = DPNI_QUEUE_OPT_DEST; 1670 cfg.destination.type = DPNI_DEST_NONE; 1671 1672 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1673 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1674 if (ret) 1675 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 1676 1677 return ret; 1678 } 1679 1680 static struct eth_dev_ops dpaa2_ethdev_ops = { 1681 .dev_configure = dpaa2_eth_dev_configure, 1682 .dev_start = dpaa2_dev_start, 1683 .dev_stop = dpaa2_dev_stop, 1684 .dev_close = dpaa2_dev_close, 1685 .promiscuous_enable = dpaa2_dev_promiscuous_enable, 1686 .promiscuous_disable = dpaa2_dev_promiscuous_disable, 1687 .allmulticast_enable = dpaa2_dev_allmulticast_enable, 1688 .allmulticast_disable = dpaa2_dev_allmulticast_disable, 1689 .dev_set_link_up = dpaa2_dev_set_link_up, 1690 .dev_set_link_down = dpaa2_dev_set_link_down, 1691 .link_update = dpaa2_dev_link_update, 1692 .stats_get = dpaa2_dev_stats_get, 1693 .xstats_get = dpaa2_dev_xstats_get, 1694 .xstats_get_by_id = dpaa2_xstats_get_by_id, 1695 .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id, 1696 .xstats_get_names = dpaa2_xstats_get_names, 1697 .stats_reset = dpaa2_dev_stats_reset, 1698 .xstats_reset = dpaa2_dev_stats_reset, 1699 .fw_version_get = dpaa2_fw_version_get, 1700 .dev_infos_get = dpaa2_dev_info_get, 1701 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, 1702 .mtu_set = dpaa2_dev_mtu_set, 1703 .vlan_filter_set = dpaa2_vlan_filter_set, 1704 .vlan_offload_set = dpaa2_vlan_offload_set, 1705 .rx_queue_setup = dpaa2_dev_rx_queue_setup, 1706 .rx_queue_release = dpaa2_dev_rx_queue_release, 1707 .tx_queue_setup = dpaa2_dev_tx_queue_setup, 1708 .tx_queue_release = dpaa2_dev_tx_queue_release, 1709 .rx_queue_count = dpaa2_dev_rx_queue_count, 1710 .flow_ctrl_get = dpaa2_flow_ctrl_get, 1711 .flow_ctrl_set = dpaa2_flow_ctrl_set, 1712 .mac_addr_add = dpaa2_dev_add_mac_addr, 1713 .mac_addr_remove = dpaa2_dev_remove_mac_addr, 1714 .mac_addr_set = dpaa2_dev_set_mac_addr, 1715 .rss_hash_update = dpaa2_dev_rss_hash_update, 1716 .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, 1717 }; 1718 1719 static int 1720 dpaa2_dev_init(struct rte_eth_dev *eth_dev) 1721 { 1722 struct rte_device *dev = eth_dev->device; 1723 struct rte_dpaa2_device *dpaa2_dev; 1724 struct fsl_mc_io *dpni_dev; 1725 struct dpni_attr attr; 1726 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1727 struct dpni_buffer_layout layout; 1728 int ret, hw_id; 1729 1730 PMD_INIT_FUNC_TRACE(); 1731 1732 /* For secondary processes, the primary has done all the work */ 1733 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1734 return 0; 1735 1736 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 1737 1738 hw_id = dpaa2_dev->object_id; 1739 1740 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); 1741 if (!dpni_dev) { 1742 DPAA2_PMD_ERR("Memory allocation failed for dpni device"); 1743 return -1; 1744 } 1745 1746 dpni_dev->regs = rte_mcp_ptr_list[0]; 1747 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); 1748 if (ret) { 1749 DPAA2_PMD_ERR( 1750 "Failure in opening dpni@%d with err code %d", 1751 hw_id, ret); 1752 rte_free(dpni_dev); 1753 return -1; 1754 } 1755 1756 /* Clean the device first */ 1757 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); 1758 if (ret) { 1759 DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d", 1760 hw_id, ret); 1761 goto init_err; 1762 } 1763 1764 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); 1765 if (ret) { 1766 DPAA2_PMD_ERR( 1767 "Failure in get dpni@%d attribute, err code %d", 1768 hw_id, ret); 1769 goto init_err; 1770 } 1771 1772 priv->num_rx_tc = attr.num_rx_tcs; 1773 1774 /* Resetting the "num_rx_queues" to equal number of queues in first TC 1775 * as only one TC is supported on Rx Side. Once Multiple TCs will be 1776 * in use for Rx processing then this will be changed or removed. 1777 */ 1778 priv->nb_rx_queues = attr.num_queues; 1779 1780 /* Using number of TX queues as number of TX TCs */ 1781 priv->nb_tx_queues = attr.num_tx_tcs; 1782 1783 DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", 1784 priv->num_rx_tc, priv->nb_rx_queues, 1785 priv->nb_tx_queues); 1786 1787 priv->hw = dpni_dev; 1788 priv->hw_id = hw_id; 1789 priv->options = attr.options; 1790 priv->max_mac_filters = attr.mac_filter_entries; 1791 priv->max_vlan_filters = attr.vlan_filter_entries; 1792 priv->flags = 0; 1793 1794 /* Allocate memory for hardware structure for queues */ 1795 ret = dpaa2_alloc_rx_tx_queues(eth_dev); 1796 if (ret) { 1797 DPAA2_PMD_ERR("Queue allocation Failed"); 1798 goto init_err; 1799 } 1800 1801 /* Allocate memory for storing MAC addresses */ 1802 eth_dev->data->mac_addrs = rte_zmalloc("dpni", 1803 ETHER_ADDR_LEN * attr.mac_filter_entries, 0); 1804 if (eth_dev->data->mac_addrs == NULL) { 1805 DPAA2_PMD_ERR( 1806 "Failed to allocate %d bytes needed to store MAC addresses", 1807 ETHER_ADDR_LEN * attr.mac_filter_entries); 1808 ret = -ENOMEM; 1809 goto init_err; 1810 } 1811 1812 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 1813 priv->token, 1814 (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes)); 1815 if (ret) { 1816 DPAA2_PMD_ERR("DPNI get mac address failed:Err Code = %d", 1817 ret); 1818 goto init_err; 1819 } 1820 1821 /* ... tx buffer layout ... */ 1822 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1823 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1824 layout.pass_frame_status = 1; 1825 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1826 DPNI_QUEUE_TX, &layout); 1827 if (ret) { 1828 DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret); 1829 goto init_err; 1830 } 1831 1832 /* ... tx-conf and error buffer layout ... */ 1833 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1834 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1835 layout.pass_frame_status = 1; 1836 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1837 DPNI_QUEUE_TX_CONFIRM, &layout); 1838 if (ret) { 1839 DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout", 1840 ret); 1841 goto init_err; 1842 } 1843 1844 eth_dev->dev_ops = &dpaa2_ethdev_ops; 1845 1846 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 1847 eth_dev->tx_pkt_burst = dpaa2_dev_tx; 1848 rte_fslmc_vfio_dmamap(); 1849 1850 DPAA2_PMD_INFO("%s: netdev created", eth_dev->data->name); 1851 return 0; 1852 init_err: 1853 dpaa2_dev_uninit(eth_dev); 1854 return ret; 1855 } 1856 1857 static int 1858 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) 1859 { 1860 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1861 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1862 int i, ret; 1863 struct dpaa2_queue *dpaa2_q; 1864 1865 PMD_INIT_FUNC_TRACE(); 1866 1867 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1868 return 0; 1869 1870 if (!dpni) { 1871 DPAA2_PMD_WARN("Already closed or not started"); 1872 return -1; 1873 } 1874 1875 dpaa2_dev_close(eth_dev); 1876 1877 if (priv->rx_vq[0]) { 1878 /* cleaning up queue storage */ 1879 for (i = 0; i < priv->nb_rx_queues; i++) { 1880 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 1881 if (dpaa2_q->q_storage) 1882 rte_free(dpaa2_q->q_storage); 1883 } 1884 /*free the all queue memory */ 1885 rte_free(priv->rx_vq[0]); 1886 priv->rx_vq[0] = NULL; 1887 } 1888 1889 /* free memory for storing MAC addresses */ 1890 if (eth_dev->data->mac_addrs) { 1891 rte_free(eth_dev->data->mac_addrs); 1892 eth_dev->data->mac_addrs = NULL; 1893 } 1894 1895 /* Close the device at underlying layer*/ 1896 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); 1897 if (ret) { 1898 DPAA2_PMD_ERR( 1899 "Failure closing dpni device with err code %d", 1900 ret); 1901 } 1902 1903 /* Free the allocated memory for ethernet private data and dpni*/ 1904 priv->hw = NULL; 1905 rte_free(dpni); 1906 1907 eth_dev->dev_ops = NULL; 1908 eth_dev->rx_pkt_burst = NULL; 1909 eth_dev->tx_pkt_burst = NULL; 1910 1911 DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name); 1912 return 0; 1913 } 1914 1915 static int 1916 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, 1917 struct rte_dpaa2_device *dpaa2_dev) 1918 { 1919 struct rte_eth_dev *eth_dev; 1920 int diag; 1921 1922 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1923 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); 1924 if (!eth_dev) 1925 return -ENODEV; 1926 eth_dev->data->dev_private = rte_zmalloc( 1927 "ethdev private structure", 1928 sizeof(struct dpaa2_dev_priv), 1929 RTE_CACHE_LINE_SIZE); 1930 if (eth_dev->data->dev_private == NULL) { 1931 DPAA2_PMD_CRIT( 1932 "Unable to allocate memory for private data"); 1933 rte_eth_dev_release_port(eth_dev); 1934 return -ENOMEM; 1935 } 1936 } else { 1937 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); 1938 if (!eth_dev) 1939 return -ENODEV; 1940 } 1941 1942 eth_dev->device = &dpaa2_dev->device; 1943 eth_dev->device->driver = &dpaa2_drv->driver; 1944 1945 dpaa2_dev->eth_dev = eth_dev; 1946 eth_dev->data->rx_mbuf_alloc_failed = 0; 1947 1948 if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC) 1949 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 1950 1951 /* Invoke PMD device initialization function */ 1952 diag = dpaa2_dev_init(eth_dev); 1953 if (diag == 0) 1954 return 0; 1955 1956 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1957 rte_free(eth_dev->data->dev_private); 1958 rte_eth_dev_release_port(eth_dev); 1959 return diag; 1960 } 1961 1962 static int 1963 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) 1964 { 1965 struct rte_eth_dev *eth_dev; 1966 1967 eth_dev = dpaa2_dev->eth_dev; 1968 dpaa2_dev_uninit(eth_dev); 1969 1970 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1971 rte_free(eth_dev->data->dev_private); 1972 rte_eth_dev_release_port(eth_dev); 1973 1974 return 0; 1975 } 1976 1977 static struct rte_dpaa2_driver rte_dpaa2_pmd = { 1978 .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA, 1979 .drv_type = DPAA2_ETH, 1980 .probe = rte_dpaa2_probe, 1981 .remove = rte_dpaa2_remove, 1982 }; 1983 1984 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); 1985 1986 RTE_INIT(dpaa2_pmd_init_log); 1987 static void 1988 dpaa2_pmd_init_log(void) 1989 { 1990 dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2"); 1991 if (dpaa2_logtype_pmd >= 0) 1992 rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE); 1993 } 1994