1 /* * SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 11 #include <rte_mbuf.h> 12 #include <rte_ethdev.h> 13 #include <rte_malloc.h> 14 #include <rte_memcpy.h> 15 #include <rte_string_fns.h> 16 #include <rte_cycles.h> 17 #include <rte_kvargs.h> 18 #include <rte_dev.h> 19 #include <rte_fslmc.h> 20 21 #include <fslmc_logs.h> 22 #include <fslmc_vfio.h> 23 #include <dpaa2_hw_pvt.h> 24 #include <dpaa2_hw_mempool.h> 25 #include <dpaa2_hw_dpio.h> 26 #include <mc/fsl_dpmng.h> 27 #include "dpaa2_ethdev.h" 28 #include <fsl_qbman_debug.h> 29 30 struct rte_dpaa2_xstats_name_off { 31 char name[RTE_ETH_XSTATS_NAME_SIZE]; 32 uint8_t page_id; /* dpni statistics page id */ 33 uint8_t stats_id; /* stats id in the given page */ 34 }; 35 36 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { 37 {"ingress_multicast_frames", 0, 2}, 38 {"ingress_multicast_bytes", 0, 3}, 39 {"ingress_broadcast_frames", 0, 4}, 40 {"ingress_broadcast_bytes", 0, 5}, 41 {"egress_multicast_frames", 1, 2}, 42 {"egress_multicast_bytes", 1, 3}, 43 {"egress_broadcast_frames", 1, 4}, 44 {"egress_broadcast_bytes", 1, 5}, 45 {"ingress_filtered_frames", 2, 0}, 46 {"ingress_discarded_frames", 2, 1}, 47 {"ingress_nobuffer_discards", 2, 2}, 48 {"egress_discarded_frames", 2, 3}, 49 {"egress_confirmed_frames", 2, 4}, 50 }; 51 52 static struct rte_dpaa2_driver rte_dpaa2_pmd; 53 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev); 54 static int dpaa2_dev_link_update(struct rte_eth_dev *dev, 55 int wait_to_complete); 56 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); 57 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); 58 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 59 60 /** 61 * Atomically reads the link status information from global 62 * structure rte_eth_dev. 63 * 64 * @param dev 65 * - Pointer to the structure rte_eth_dev to read from. 66 * - Pointer to the buffer to be saved with the link status. 67 * 68 * @return 69 * - On success, zero. 70 * - On failure, negative value. 71 */ 72 static inline int 73 dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev, 74 struct rte_eth_link *link) 75 { 76 struct rte_eth_link *dst = link; 77 struct rte_eth_link *src = &dev->data->dev_link; 78 79 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 80 *(uint64_t *)src) == 0) 81 return -1; 82 83 return 0; 84 } 85 86 /** 87 * Atomically writes the link status information into global 88 * structure rte_eth_dev. 89 * 90 * @param dev 91 * - Pointer to the structure rte_eth_dev to read from. 92 * - Pointer to the buffer to be saved with the link status. 93 * 94 * @return 95 * - On success, zero. 96 * - On failure, negative value. 97 */ 98 static inline int 99 dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev, 100 struct rte_eth_link *link) 101 { 102 struct rte_eth_link *dst = &dev->data->dev_link; 103 struct rte_eth_link *src = link; 104 105 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 106 *(uint64_t *)src) == 0) 107 return -1; 108 109 return 0; 110 } 111 112 static int 113 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 114 { 115 int ret; 116 struct dpaa2_dev_priv *priv = dev->data->dev_private; 117 struct fsl_mc_io *dpni = priv->hw; 118 119 PMD_INIT_FUNC_TRACE(); 120 121 if (dpni == NULL) { 122 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 123 return -1; 124 } 125 126 if (on) 127 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, 128 priv->token, vlan_id); 129 else 130 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, 131 priv->token, vlan_id); 132 133 if (ret < 0) 134 PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d", 135 ret, vlan_id, priv->hw_id); 136 137 return ret; 138 } 139 140 static int 141 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) 142 { 143 struct dpaa2_dev_priv *priv = dev->data->dev_private; 144 struct fsl_mc_io *dpni = priv->hw; 145 int ret; 146 147 PMD_INIT_FUNC_TRACE(); 148 149 if (mask & ETH_VLAN_FILTER_MASK) { 150 if (dev->data->dev_conf.rxmode.hw_vlan_filter) 151 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 152 priv->token, true); 153 else 154 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 155 priv->token, false); 156 if (ret < 0) 157 RTE_LOG(ERR, PMD, "Unable to set vlan filter = %d\n", 158 ret); 159 } 160 161 if (mask & ETH_VLAN_EXTEND_MASK) { 162 if (dev->data->dev_conf.rxmode.hw_vlan_extend) 163 RTE_LOG(INFO, PMD, 164 "VLAN extend offload not supported\n"); 165 } 166 167 return 0; 168 } 169 170 static int 171 dpaa2_fw_version_get(struct rte_eth_dev *dev, 172 char *fw_version, 173 size_t fw_size) 174 { 175 int ret; 176 struct dpaa2_dev_priv *priv = dev->data->dev_private; 177 struct fsl_mc_io *dpni = priv->hw; 178 struct mc_soc_version mc_plat_info = {0}; 179 struct mc_version mc_ver_info = {0}; 180 181 PMD_INIT_FUNC_TRACE(); 182 183 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 184 RTE_LOG(WARNING, PMD, "\tmc_get_soc_version failed\n"); 185 186 if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) 187 RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n"); 188 189 ret = snprintf(fw_version, fw_size, 190 "%x-%d.%d.%d", 191 mc_plat_info.svr, 192 mc_ver_info.major, 193 mc_ver_info.minor, 194 mc_ver_info.revision); 195 196 ret += 1; /* add the size of '\0' */ 197 if (fw_size < (uint32_t)ret) 198 return ret; 199 else 200 return 0; 201 } 202 203 static void 204 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 205 { 206 struct dpaa2_dev_priv *priv = dev->data->dev_private; 207 208 PMD_INIT_FUNC_TRACE(); 209 210 dev_info->if_index = priv->hw_id; 211 212 dev_info->max_mac_addrs = priv->max_mac_filters; 213 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; 214 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; 215 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; 216 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; 217 dev_info->rx_offload_capa = 218 DEV_RX_OFFLOAD_IPV4_CKSUM | 219 DEV_RX_OFFLOAD_UDP_CKSUM | 220 DEV_RX_OFFLOAD_TCP_CKSUM | 221 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; 222 dev_info->tx_offload_capa = 223 DEV_TX_OFFLOAD_IPV4_CKSUM | 224 DEV_TX_OFFLOAD_UDP_CKSUM | 225 DEV_TX_OFFLOAD_TCP_CKSUM | 226 DEV_TX_OFFLOAD_SCTP_CKSUM | 227 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 228 dev_info->speed_capa = ETH_LINK_SPEED_1G | 229 ETH_LINK_SPEED_2_5G | 230 ETH_LINK_SPEED_10G; 231 } 232 233 static int 234 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) 235 { 236 struct dpaa2_dev_priv *priv = dev->data->dev_private; 237 uint16_t dist_idx; 238 uint32_t vq_id; 239 struct dpaa2_queue *mc_q, *mcq; 240 uint32_t tot_queues; 241 int i; 242 struct dpaa2_queue *dpaa2_q; 243 244 PMD_INIT_FUNC_TRACE(); 245 246 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; 247 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, 248 RTE_CACHE_LINE_SIZE); 249 if (!mc_q) { 250 PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n"); 251 return -1; 252 } 253 254 for (i = 0; i < priv->nb_rx_queues; i++) { 255 mc_q->dev = dev; 256 priv->rx_vq[i] = mc_q++; 257 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 258 dpaa2_q->q_storage = rte_malloc("dq_storage", 259 sizeof(struct queue_storage_info_t), 260 RTE_CACHE_LINE_SIZE); 261 if (!dpaa2_q->q_storage) 262 goto fail; 263 264 memset(dpaa2_q->q_storage, 0, 265 sizeof(struct queue_storage_info_t)); 266 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) 267 goto fail; 268 } 269 270 for (i = 0; i < priv->nb_tx_queues; i++) { 271 mc_q->dev = dev; 272 mc_q->flow_id = 0xffff; 273 priv->tx_vq[i] = mc_q++; 274 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 275 dpaa2_q->cscn = rte_malloc(NULL, 276 sizeof(struct qbman_result), 16); 277 if (!dpaa2_q->cscn) 278 goto fail_tx; 279 } 280 281 vq_id = 0; 282 for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { 283 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; 284 mcq->tc_index = DPAA2_DEF_TC; 285 mcq->flow_id = dist_idx; 286 vq_id++; 287 } 288 289 return 0; 290 fail_tx: 291 i -= 1; 292 while (i >= 0) { 293 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 294 rte_free(dpaa2_q->cscn); 295 priv->tx_vq[i--] = NULL; 296 } 297 i = priv->nb_rx_queues; 298 fail: 299 i -= 1; 300 mc_q = priv->rx_vq[0]; 301 while (i >= 0) { 302 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 303 dpaa2_free_dq_storage(dpaa2_q->q_storage); 304 rte_free(dpaa2_q->q_storage); 305 priv->rx_vq[i--] = NULL; 306 } 307 rte_free(mc_q); 308 return -1; 309 } 310 311 static int 312 dpaa2_eth_dev_configure(struct rte_eth_dev *dev) 313 { 314 struct dpaa2_dev_priv *priv = dev->data->dev_private; 315 struct fsl_mc_io *dpni = priv->hw; 316 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 317 int rx_ip_csum_offload = false; 318 int ret; 319 320 PMD_INIT_FUNC_TRACE(); 321 322 if (eth_conf->rxmode.jumbo_frame == 1) { 323 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { 324 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, 325 priv->token, eth_conf->rxmode.max_rx_pkt_len); 326 if (ret) { 327 PMD_INIT_LOG(ERR, 328 "unable to set mtu. check config\n"); 329 return ret; 330 } 331 } else { 332 return -1; 333 } 334 } 335 336 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { 337 ret = dpaa2_setup_flow_dist(dev, 338 eth_conf->rx_adv_conf.rss_conf.rss_hf); 339 if (ret) { 340 PMD_INIT_LOG(ERR, "unable to set flow distribution." 341 "please check queue config\n"); 342 return ret; 343 } 344 } 345 346 if (eth_conf->rxmode.hw_ip_checksum) 347 rx_ip_csum_offload = true; 348 349 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 350 DPNI_OFF_RX_L3_CSUM, rx_ip_csum_offload); 351 if (ret) { 352 PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret); 353 return ret; 354 } 355 356 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 357 DPNI_OFF_RX_L4_CSUM, rx_ip_csum_offload); 358 if (ret) { 359 PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret); 360 return ret; 361 } 362 363 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 364 DPNI_OFF_TX_L3_CSUM, true); 365 if (ret) { 366 PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret); 367 return ret; 368 } 369 370 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 371 DPNI_OFF_TX_L4_CSUM, true); 372 if (ret) { 373 PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret); 374 return ret; 375 } 376 377 /* update the current status */ 378 dpaa2_dev_link_update(dev, 0); 379 380 return 0; 381 } 382 383 /* Function to setup RX flow information. It contains traffic class ID, 384 * flow ID, destination configuration etc. 385 */ 386 static int 387 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, 388 uint16_t rx_queue_id, 389 uint16_t nb_rx_desc __rte_unused, 390 unsigned int socket_id __rte_unused, 391 const struct rte_eth_rxconf *rx_conf __rte_unused, 392 struct rte_mempool *mb_pool) 393 { 394 struct dpaa2_dev_priv *priv = dev->data->dev_private; 395 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 396 struct dpaa2_queue *dpaa2_q; 397 struct dpni_queue cfg; 398 uint8_t options = 0; 399 uint8_t flow_id; 400 uint32_t bpid; 401 int ret; 402 403 PMD_INIT_FUNC_TRACE(); 404 405 PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p", 406 dev, rx_queue_id, mb_pool, rx_conf); 407 408 if (!priv->bp_list || priv->bp_list->mp != mb_pool) { 409 bpid = mempool_to_bpid(mb_pool); 410 ret = dpaa2_attach_bp_list(priv, 411 rte_dpaa2_bpid_info[bpid].bp_list); 412 if (ret) 413 return ret; 414 } 415 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 416 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ 417 418 /*Get the flow id from given VQ id*/ 419 flow_id = rx_queue_id % priv->nb_rx_queues; 420 memset(&cfg, 0, sizeof(struct dpni_queue)); 421 422 options = options | DPNI_QUEUE_OPT_USER_CTX; 423 cfg.user_context = (uint64_t)(dpaa2_q); 424 425 /*if ls2088 or rev2 device, enable the stashing */ 426 427 if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) { 428 options |= DPNI_QUEUE_OPT_FLC; 429 cfg.flc.stash_control = true; 430 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0; 431 /* 00 00 00 - last 6 bit represent annotation, context stashing, 432 * data stashing setting 01 01 00 (0x14) 433 * (in following order ->DS AS CS) 434 * to enable 1 line data, 1 line annotation. 435 * For LX2, this setting should be 01 00 00 (0x10) 436 */ 437 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A) 438 cfg.flc.value |= 0x10; 439 else 440 cfg.flc.value |= 0x14; 441 } 442 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, 443 dpaa2_q->tc_index, flow_id, options, &cfg); 444 if (ret) { 445 PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret); 446 return -1; 447 } 448 449 if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { 450 struct dpni_taildrop taildrop; 451 452 taildrop.enable = 1; 453 /*enabling per rx queue congestion control */ 454 taildrop.threshold = CONG_THRESHOLD_RX_Q; 455 taildrop.units = DPNI_CONGESTION_UNIT_BYTES; 456 taildrop.oal = CONG_RX_OAL; 457 PMD_DRV_LOG(DEBUG, "Enabling Early Drop on queue = %d", 458 rx_queue_id); 459 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 460 DPNI_CP_QUEUE, DPNI_QUEUE_RX, 461 dpaa2_q->tc_index, flow_id, &taildrop); 462 if (ret) { 463 PMD_INIT_LOG(ERR, "Error in setting the rx flow" 464 " err : = %d\n", ret); 465 return -1; 466 } 467 } 468 469 dev->data->rx_queues[rx_queue_id] = dpaa2_q; 470 return 0; 471 } 472 473 static int 474 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, 475 uint16_t tx_queue_id, 476 uint16_t nb_tx_desc __rte_unused, 477 unsigned int socket_id __rte_unused, 478 const struct rte_eth_txconf *tx_conf __rte_unused) 479 { 480 struct dpaa2_dev_priv *priv = dev->data->dev_private; 481 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) 482 priv->tx_vq[tx_queue_id]; 483 struct fsl_mc_io *dpni = priv->hw; 484 struct dpni_queue tx_conf_cfg; 485 struct dpni_queue tx_flow_cfg; 486 uint8_t options = 0, flow_id; 487 uint32_t tc_id; 488 int ret; 489 490 PMD_INIT_FUNC_TRACE(); 491 492 /* Return if queue already configured */ 493 if (dpaa2_q->flow_id != 0xffff) { 494 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 495 return 0; 496 } 497 498 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); 499 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); 500 501 tc_id = tx_queue_id; 502 flow_id = 0; 503 504 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, 505 tc_id, flow_id, options, &tx_flow_cfg); 506 if (ret) { 507 PMD_INIT_LOG(ERR, "Error in setting the tx flow: " 508 "tc_id=%d, flow =%d ErrorCode = %x\n", 509 tc_id, flow_id, -ret); 510 return -1; 511 } 512 513 dpaa2_q->flow_id = flow_id; 514 515 if (tx_queue_id == 0) { 516 /*Set tx-conf and error configuration*/ 517 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 518 priv->token, 519 DPNI_CONF_DISABLE); 520 if (ret) { 521 PMD_INIT_LOG(ERR, "Error in set tx conf mode settings" 522 " ErrorCode = %x", ret); 523 return -1; 524 } 525 } 526 dpaa2_q->tc_index = tc_id; 527 528 if (!(priv->flags & DPAA2_TX_CGR_OFF)) { 529 struct dpni_congestion_notification_cfg cong_notif_cfg; 530 531 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; 532 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; 533 /* Notify that the queue is not congested when the data in 534 * the queue is below this thershold. 535 */ 536 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; 537 cong_notif_cfg.message_ctx = 0; 538 cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn; 539 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; 540 cong_notif_cfg.notification_mode = 541 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | 542 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | 543 DPNI_CONG_OPT_COHERENT_WRITE; 544 545 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, 546 priv->token, 547 DPNI_QUEUE_TX, 548 tc_id, 549 &cong_notif_cfg); 550 if (ret) { 551 PMD_INIT_LOG(ERR, 552 "Error in setting tx congestion notification: = %d", 553 -ret); 554 return -ret; 555 } 556 } 557 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 558 return 0; 559 } 560 561 static void 562 dpaa2_dev_rx_queue_release(void *q __rte_unused) 563 { 564 PMD_INIT_FUNC_TRACE(); 565 } 566 567 static void 568 dpaa2_dev_tx_queue_release(void *q __rte_unused) 569 { 570 PMD_INIT_FUNC_TRACE(); 571 } 572 573 static uint32_t 574 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 575 { 576 int32_t ret; 577 struct dpaa2_dev_priv *priv = dev->data->dev_private; 578 struct dpaa2_queue *dpaa2_q; 579 struct qbman_swp *swp; 580 struct qbman_fq_query_np_rslt state; 581 uint32_t frame_cnt = 0; 582 583 PMD_INIT_FUNC_TRACE(); 584 585 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 586 ret = dpaa2_affine_qbman_swp(); 587 if (ret) { 588 RTE_LOG(ERR, PMD, "Failure in affining portal\n"); 589 return -EINVAL; 590 } 591 } 592 swp = DPAA2_PER_LCORE_PORTAL; 593 594 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 595 596 if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) { 597 frame_cnt = qbman_fq_state_frame_count(&state); 598 RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n", 599 rx_queue_id, frame_cnt); 600 } 601 return frame_cnt; 602 } 603 604 static const uint32_t * 605 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) 606 { 607 static const uint32_t ptypes[] = { 608 /*todo -= add more types */ 609 RTE_PTYPE_L2_ETHER, 610 RTE_PTYPE_L3_IPV4, 611 RTE_PTYPE_L3_IPV4_EXT, 612 RTE_PTYPE_L3_IPV6, 613 RTE_PTYPE_L3_IPV6_EXT, 614 RTE_PTYPE_L4_TCP, 615 RTE_PTYPE_L4_UDP, 616 RTE_PTYPE_L4_SCTP, 617 RTE_PTYPE_L4_ICMP, 618 RTE_PTYPE_UNKNOWN 619 }; 620 621 if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx) 622 return ptypes; 623 return NULL; 624 } 625 626 /** 627 * Dpaa2 link Interrupt handler 628 * 629 * @param param 630 * The address of parameter (struct rte_eth_dev *) regsitered before. 631 * 632 * @return 633 * void 634 */ 635 static void 636 dpaa2_interrupt_handler(void *param) 637 { 638 struct rte_eth_dev *dev = param; 639 struct dpaa2_dev_priv *priv = dev->data->dev_private; 640 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 641 int ret; 642 int irq_index = DPNI_IRQ_INDEX; 643 unsigned int status = 0, clear = 0; 644 645 PMD_INIT_FUNC_TRACE(); 646 647 if (dpni == NULL) { 648 RTE_LOG(ERR, PMD, "dpni is NULL"); 649 return; 650 } 651 652 ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, 653 irq_index, &status); 654 if (unlikely(ret)) { 655 RTE_LOG(ERR, PMD, "Can't get irq status (err %d)", ret); 656 clear = 0xffffffff; 657 goto out; 658 } 659 660 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { 661 clear = DPNI_IRQ_EVENT_LINK_CHANGED; 662 dpaa2_dev_link_update(dev, 0); 663 /* calling all the apps registered for link status event */ 664 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 665 NULL, NULL); 666 } 667 out: 668 ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, 669 irq_index, clear); 670 if (unlikely(ret)) 671 RTE_LOG(ERR, PMD, "Can't clear irq status (err %d)", ret); 672 } 673 674 static int 675 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) 676 { 677 int err = 0; 678 struct dpaa2_dev_priv *priv = dev->data->dev_private; 679 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 680 int irq_index = DPNI_IRQ_INDEX; 681 unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; 682 683 PMD_INIT_FUNC_TRACE(); 684 685 err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, 686 irq_index, mask); 687 if (err < 0) { 688 PMD_INIT_LOG(ERR, "Error: dpni_set_irq_mask():%d (%s)", err, 689 strerror(-err)); 690 return err; 691 } 692 693 err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, 694 irq_index, enable); 695 if (err < 0) 696 PMD_INIT_LOG(ERR, "Error: dpni_set_irq_enable():%d (%s)", err, 697 strerror(-err)); 698 699 return err; 700 } 701 702 static int 703 dpaa2_dev_start(struct rte_eth_dev *dev) 704 { 705 struct rte_device *rdev = dev->device; 706 struct rte_dpaa2_device *dpaa2_dev; 707 struct rte_eth_dev_data *data = dev->data; 708 struct dpaa2_dev_priv *priv = data->dev_private; 709 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 710 struct dpni_queue cfg; 711 struct dpni_error_cfg err_cfg; 712 uint16_t qdid; 713 struct dpni_queue_id qid; 714 struct dpaa2_queue *dpaa2_q; 715 int ret, i; 716 struct rte_intr_handle *intr_handle; 717 718 dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); 719 intr_handle = &dpaa2_dev->intr_handle; 720 721 PMD_INIT_FUNC_TRACE(); 722 723 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 724 if (ret) { 725 PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n", 726 ret, priv->hw_id); 727 return ret; 728 } 729 730 /* Power up the phy. Needed to make the link go UP */ 731 dpaa2_dev_set_link_up(dev); 732 733 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, 734 DPNI_QUEUE_TX, &qdid); 735 if (ret) { 736 PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret); 737 return ret; 738 } 739 priv->qdid = qdid; 740 741 for (i = 0; i < data->nb_rx_queues; i++) { 742 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; 743 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 744 DPNI_QUEUE_RX, dpaa2_q->tc_index, 745 dpaa2_q->flow_id, &cfg, &qid); 746 if (ret) { 747 PMD_INIT_LOG(ERR, "Error to get flow " 748 "information Error code = %d\n", ret); 749 return ret; 750 } 751 dpaa2_q->fqid = qid.fqid; 752 } 753 754 /*checksum errors, send them to normal path and set it in annotation */ 755 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; 756 757 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; 758 err_cfg.set_frame_annotation = true; 759 760 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, 761 priv->token, &err_cfg); 762 if (ret) { 763 PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:" 764 "code = %d\n", ret); 765 return ret; 766 } 767 /* VLAN Offload Settings */ 768 if (priv->max_vlan_filters) { 769 ret = dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); 770 if (ret) { 771 PMD_INIT_LOG(ERR, "Error to dpaa2_vlan_offload_set:" 772 "code = %d\n", ret); 773 return ret; 774 } 775 } 776 777 778 /* if the interrupts were configured on this devices*/ 779 if (intr_handle && (intr_handle->fd) && 780 (dev->data->dev_conf.intr_conf.lsc != 0)) { 781 /* Registering LSC interrupt handler */ 782 rte_intr_callback_register(intr_handle, 783 dpaa2_interrupt_handler, 784 (void *)dev); 785 786 /* enable vfio intr/eventfd mapping 787 * Interrupt index 0 is required, so we can not use 788 * rte_intr_enable. 789 */ 790 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX); 791 792 /* enable dpni_irqs */ 793 dpaa2_eth_setup_irqs(dev, 1); 794 } 795 796 return 0; 797 } 798 799 /** 800 * This routine disables all traffic on the adapter by issuing a 801 * global reset on the MAC. 802 */ 803 static void 804 dpaa2_dev_stop(struct rte_eth_dev *dev) 805 { 806 struct dpaa2_dev_priv *priv = dev->data->dev_private; 807 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 808 int ret; 809 struct rte_eth_link link; 810 struct rte_intr_handle *intr_handle = dev->intr_handle; 811 812 PMD_INIT_FUNC_TRACE(); 813 814 /* reset interrupt callback */ 815 if (intr_handle && (intr_handle->fd) && 816 (dev->data->dev_conf.intr_conf.lsc != 0)) { 817 /*disable dpni irqs */ 818 dpaa2_eth_setup_irqs(dev, 0); 819 820 /* disable vfio intr before callback unregister */ 821 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX); 822 823 /* Unregistering LSC interrupt handler */ 824 rte_intr_callback_unregister(intr_handle, 825 dpaa2_interrupt_handler, 826 (void *)dev); 827 } 828 829 dpaa2_dev_set_link_down(dev); 830 831 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); 832 if (ret) { 833 PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n", 834 ret, priv->hw_id); 835 return; 836 } 837 838 /* clear the recorded link status */ 839 memset(&link, 0, sizeof(link)); 840 dpaa2_dev_atomic_write_link_status(dev, &link); 841 } 842 843 static void 844 dpaa2_dev_close(struct rte_eth_dev *dev) 845 { 846 struct rte_eth_dev_data *data = dev->data; 847 struct dpaa2_dev_priv *priv = dev->data->dev_private; 848 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 849 int i, ret; 850 struct rte_eth_link link; 851 struct dpaa2_queue *dpaa2_q; 852 853 PMD_INIT_FUNC_TRACE(); 854 855 for (i = 0; i < data->nb_tx_queues; i++) { 856 dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i]; 857 if (!dpaa2_q->cscn) { 858 rte_free(dpaa2_q->cscn); 859 dpaa2_q->cscn = NULL; 860 } 861 } 862 863 /* Clean the device first */ 864 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); 865 if (ret) { 866 PMD_INIT_LOG(ERR, "Failure cleaning dpni device with" 867 " error code %d\n", ret); 868 return; 869 } 870 871 memset(&link, 0, sizeof(link)); 872 dpaa2_dev_atomic_write_link_status(dev, &link); 873 } 874 875 static void 876 dpaa2_dev_promiscuous_enable( 877 struct rte_eth_dev *dev) 878 { 879 int ret; 880 struct dpaa2_dev_priv *priv = dev->data->dev_private; 881 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 882 883 PMD_INIT_FUNC_TRACE(); 884 885 if (dpni == NULL) { 886 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 887 return; 888 } 889 890 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 891 if (ret < 0) 892 RTE_LOG(ERR, PMD, "Unable to enable U promisc mode %d\n", ret); 893 894 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 895 if (ret < 0) 896 RTE_LOG(ERR, PMD, "Unable to enable M promisc mode %d\n", ret); 897 } 898 899 static void 900 dpaa2_dev_promiscuous_disable( 901 struct rte_eth_dev *dev) 902 { 903 int ret; 904 struct dpaa2_dev_priv *priv = dev->data->dev_private; 905 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 906 907 PMD_INIT_FUNC_TRACE(); 908 909 if (dpni == NULL) { 910 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 911 return; 912 } 913 914 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 915 if (ret < 0) 916 RTE_LOG(ERR, PMD, "Unable to disable U promisc mode %d\n", ret); 917 918 if (dev->data->all_multicast == 0) { 919 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, 920 priv->token, false); 921 if (ret < 0) 922 RTE_LOG(ERR, PMD, 923 "Unable to disable M promisc mode %d\n", 924 ret); 925 } 926 } 927 928 static void 929 dpaa2_dev_allmulticast_enable( 930 struct rte_eth_dev *dev) 931 { 932 int ret; 933 struct dpaa2_dev_priv *priv = dev->data->dev_private; 934 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 935 936 PMD_INIT_FUNC_TRACE(); 937 938 if (dpni == NULL) { 939 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 940 return; 941 } 942 943 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 944 if (ret < 0) 945 RTE_LOG(ERR, PMD, "Unable to enable multicast mode %d\n", ret); 946 } 947 948 static void 949 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) 950 { 951 int ret; 952 struct dpaa2_dev_priv *priv = dev->data->dev_private; 953 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 954 955 PMD_INIT_FUNC_TRACE(); 956 957 if (dpni == NULL) { 958 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 959 return; 960 } 961 962 /* must remain on for all promiscuous */ 963 if (dev->data->promiscuous == 1) 964 return; 965 966 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 967 if (ret < 0) 968 RTE_LOG(ERR, PMD, "Unable to disable multicast mode %d\n", ret); 969 } 970 971 static int 972 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 973 { 974 int ret; 975 struct dpaa2_dev_priv *priv = dev->data->dev_private; 976 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 977 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN 978 + VLAN_TAG_SIZE; 979 980 PMD_INIT_FUNC_TRACE(); 981 982 if (dpni == NULL) { 983 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 984 return -EINVAL; 985 } 986 987 /* check that mtu is within the allowed range */ 988 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) 989 return -EINVAL; 990 991 if (frame_size > ETHER_MAX_LEN) 992 dev->data->dev_conf.rxmode.jumbo_frame = 1; 993 else 994 dev->data->dev_conf.rxmode.jumbo_frame = 0; 995 996 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 997 998 /* Set the Max Rx frame length as 'mtu' + 999 * Maximum Ethernet header length 1000 */ 1001 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 1002 frame_size); 1003 if (ret) { 1004 PMD_DRV_LOG(ERR, "setting the max frame length failed"); 1005 return -1; 1006 } 1007 PMD_DRV_LOG(INFO, "MTU is configured %d for the device", mtu); 1008 return 0; 1009 } 1010 1011 static int 1012 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, 1013 struct ether_addr *addr, 1014 __rte_unused uint32_t index, 1015 __rte_unused uint32_t pool) 1016 { 1017 int ret; 1018 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1019 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1020 1021 PMD_INIT_FUNC_TRACE(); 1022 1023 if (dpni == NULL) { 1024 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1025 return -1; 1026 } 1027 1028 ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, 1029 priv->token, addr->addr_bytes); 1030 if (ret) 1031 RTE_LOG(ERR, PMD, 1032 "error: Adding the MAC ADDR failed: err = %d\n", ret); 1033 return 0; 1034 } 1035 1036 static void 1037 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, 1038 uint32_t index) 1039 { 1040 int ret; 1041 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1042 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1043 struct rte_eth_dev_data *data = dev->data; 1044 struct ether_addr *macaddr; 1045 1046 PMD_INIT_FUNC_TRACE(); 1047 1048 macaddr = &data->mac_addrs[index]; 1049 1050 if (dpni == NULL) { 1051 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1052 return; 1053 } 1054 1055 ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, 1056 priv->token, macaddr->addr_bytes); 1057 if (ret) 1058 RTE_LOG(ERR, PMD, 1059 "error: Removing the MAC ADDR failed: err = %d\n", ret); 1060 } 1061 1062 static void 1063 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, 1064 struct ether_addr *addr) 1065 { 1066 int ret; 1067 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1068 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1069 1070 PMD_INIT_FUNC_TRACE(); 1071 1072 if (dpni == NULL) { 1073 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1074 return; 1075 } 1076 1077 ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, 1078 priv->token, addr->addr_bytes); 1079 1080 if (ret) 1081 RTE_LOG(ERR, PMD, 1082 "error: Setting the MAC ADDR failed %d\n", ret); 1083 } 1084 static 1085 int dpaa2_dev_stats_get(struct rte_eth_dev *dev, 1086 struct rte_eth_stats *stats) 1087 { 1088 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1089 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1090 int32_t retcode; 1091 uint8_t page0 = 0, page1 = 1, page2 = 2; 1092 union dpni_statistics value; 1093 1094 memset(&value, 0, sizeof(union dpni_statistics)); 1095 1096 PMD_INIT_FUNC_TRACE(); 1097 1098 if (!dpni) { 1099 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1100 return -EINVAL; 1101 } 1102 1103 if (!stats) { 1104 RTE_LOG(ERR, PMD, "stats is NULL\n"); 1105 return -EINVAL; 1106 } 1107 1108 /*Get Counters from page_0*/ 1109 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1110 page0, 0, &value); 1111 if (retcode) 1112 goto err; 1113 1114 stats->ipackets = value.page_0.ingress_all_frames; 1115 stats->ibytes = value.page_0.ingress_all_bytes; 1116 1117 /*Get Counters from page_1*/ 1118 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1119 page1, 0, &value); 1120 if (retcode) 1121 goto err; 1122 1123 stats->opackets = value.page_1.egress_all_frames; 1124 stats->obytes = value.page_1.egress_all_bytes; 1125 1126 /*Get Counters from page_2*/ 1127 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1128 page2, 0, &value); 1129 if (retcode) 1130 goto err; 1131 1132 /* Ingress drop frame count due to configured rules */ 1133 stats->ierrors = value.page_2.ingress_filtered_frames; 1134 /* Ingress drop frame count due to error */ 1135 stats->ierrors += value.page_2.ingress_discarded_frames; 1136 1137 stats->oerrors = value.page_2.egress_discarded_frames; 1138 stats->imissed = value.page_2.ingress_nobuffer_discards; 1139 1140 return 0; 1141 1142 err: 1143 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); 1144 return retcode; 1145 }; 1146 1147 static int 1148 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1149 unsigned int n) 1150 { 1151 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1152 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1153 int32_t retcode; 1154 union dpni_statistics value[3] = {}; 1155 unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); 1156 1157 if (xstats == NULL) 1158 return 0; 1159 1160 if (n < num) 1161 return num; 1162 1163 /* Get Counters from page_0*/ 1164 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1165 0, 0, &value[0]); 1166 if (retcode) 1167 goto err; 1168 1169 /* Get Counters from page_1*/ 1170 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1171 1, 0, &value[1]); 1172 if (retcode) 1173 goto err; 1174 1175 /* Get Counters from page_2*/ 1176 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1177 2, 0, &value[2]); 1178 if (retcode) 1179 goto err; 1180 1181 for (i = 0; i < num; i++) { 1182 xstats[i].id = i; 1183 xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. 1184 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1185 } 1186 return i; 1187 err: 1188 RTE_LOG(ERR, PMD, "Error in obtaining extended stats (%d)\n", retcode); 1189 return retcode; 1190 } 1191 1192 static int 1193 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1194 struct rte_eth_xstat_name *xstats_names, 1195 __rte_unused unsigned int limit) 1196 { 1197 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1198 1199 if (xstats_names != NULL) 1200 for (i = 0; i < stat_cnt; i++) 1201 snprintf(xstats_names[i].name, 1202 sizeof(xstats_names[i].name), 1203 "%s", 1204 dpaa2_xstats_strings[i].name); 1205 1206 return stat_cnt; 1207 } 1208 1209 static int 1210 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1211 uint64_t *values, unsigned int n) 1212 { 1213 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1214 uint64_t values_copy[stat_cnt]; 1215 1216 if (!ids) { 1217 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1218 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1219 int32_t retcode; 1220 union dpni_statistics value[3] = {}; 1221 1222 if (n < stat_cnt) 1223 return stat_cnt; 1224 1225 if (!values) 1226 return 0; 1227 1228 /* Get Counters from page_0*/ 1229 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1230 0, 0, &value[0]); 1231 if (retcode) 1232 return 0; 1233 1234 /* Get Counters from page_1*/ 1235 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1236 1, 0, &value[1]); 1237 if (retcode) 1238 return 0; 1239 1240 /* Get Counters from page_2*/ 1241 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1242 2, 0, &value[2]); 1243 if (retcode) 1244 return 0; 1245 1246 for (i = 0; i < stat_cnt; i++) { 1247 values[i] = value[dpaa2_xstats_strings[i].page_id]. 1248 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1249 } 1250 return stat_cnt; 1251 } 1252 1253 dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 1254 1255 for (i = 0; i < n; i++) { 1256 if (ids[i] >= stat_cnt) { 1257 PMD_INIT_LOG(ERR, "id value isn't valid"); 1258 return -1; 1259 } 1260 values[i] = values_copy[ids[i]]; 1261 } 1262 return n; 1263 } 1264 1265 static int 1266 dpaa2_xstats_get_names_by_id( 1267 struct rte_eth_dev *dev, 1268 struct rte_eth_xstat_name *xstats_names, 1269 const uint64_t *ids, 1270 unsigned int limit) 1271 { 1272 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1273 struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 1274 1275 if (!ids) 1276 return dpaa2_xstats_get_names(dev, xstats_names, limit); 1277 1278 dpaa2_xstats_get_names(dev, xstats_names_copy, limit); 1279 1280 for (i = 0; i < limit; i++) { 1281 if (ids[i] >= stat_cnt) { 1282 PMD_INIT_LOG(ERR, "id value isn't valid"); 1283 return -1; 1284 } 1285 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 1286 } 1287 return limit; 1288 } 1289 1290 static void 1291 dpaa2_dev_stats_reset(struct rte_eth_dev *dev) 1292 { 1293 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1294 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1295 int32_t retcode; 1296 1297 PMD_INIT_FUNC_TRACE(); 1298 1299 if (dpni == NULL) { 1300 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1301 return; 1302 } 1303 1304 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); 1305 if (retcode) 1306 goto error; 1307 1308 return; 1309 1310 error: 1311 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); 1312 return; 1313 }; 1314 1315 /* return 0 means link status changed, -1 means not changed */ 1316 static int 1317 dpaa2_dev_link_update(struct rte_eth_dev *dev, 1318 int wait_to_complete __rte_unused) 1319 { 1320 int ret; 1321 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1322 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1323 struct rte_eth_link link, old; 1324 struct dpni_link_state state = {0}; 1325 1326 if (dpni == NULL) { 1327 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1328 return 0; 1329 } 1330 memset(&old, 0, sizeof(old)); 1331 dpaa2_dev_atomic_read_link_status(dev, &old); 1332 1333 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1334 if (ret < 0) { 1335 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); 1336 return -1; 1337 } 1338 1339 if ((old.link_status == state.up) && (old.link_speed == state.rate)) { 1340 RTE_LOG(DEBUG, PMD, "No change in status\n"); 1341 return -1; 1342 } 1343 1344 memset(&link, 0, sizeof(struct rte_eth_link)); 1345 link.link_status = state.up; 1346 link.link_speed = state.rate; 1347 1348 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) 1349 link.link_duplex = ETH_LINK_HALF_DUPLEX; 1350 else 1351 link.link_duplex = ETH_LINK_FULL_DUPLEX; 1352 1353 dpaa2_dev_atomic_write_link_status(dev, &link); 1354 1355 if (link.link_status) 1356 PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id); 1357 else 1358 PMD_DRV_LOG(INFO, "Port %d Link is Down", dev->data->port_id); 1359 return 0; 1360 } 1361 1362 /** 1363 * Toggle the DPNI to enable, if not already enabled. 1364 * This is not strictly PHY up/down - it is more of logical toggling. 1365 */ 1366 static int 1367 dpaa2_dev_set_link_up(struct rte_eth_dev *dev) 1368 { 1369 int ret = -EINVAL; 1370 struct dpaa2_dev_priv *priv; 1371 struct fsl_mc_io *dpni; 1372 int en = 0; 1373 struct dpni_link_state state = {0}; 1374 1375 priv = dev->data->dev_private; 1376 dpni = (struct fsl_mc_io *)priv->hw; 1377 1378 if (dpni == NULL) { 1379 RTE_LOG(ERR, PMD, "DPNI is NULL\n"); 1380 return ret; 1381 } 1382 1383 /* Check if DPNI is currently enabled */ 1384 ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); 1385 if (ret) { 1386 /* Unable to obtain dpni status; Not continuing */ 1387 PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret); 1388 return -EINVAL; 1389 } 1390 1391 /* Enable link if not already enabled */ 1392 if (!en) { 1393 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 1394 if (ret) { 1395 PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret); 1396 return -EINVAL; 1397 } 1398 } 1399 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1400 if (ret < 0) { 1401 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); 1402 return -1; 1403 } 1404 1405 /* changing tx burst function to start enqueues */ 1406 dev->tx_pkt_burst = dpaa2_dev_tx; 1407 dev->data->dev_link.link_status = state.up; 1408 1409 if (state.up) 1410 PMD_DRV_LOG(INFO, "Port %d Link is set as UP", 1411 dev->data->port_id); 1412 else 1413 PMD_DRV_LOG(INFO, "Port %d Link is DOWN", dev->data->port_id); 1414 return ret; 1415 } 1416 1417 /** 1418 * Toggle the DPNI to disable, if not already disabled. 1419 * This is not strictly PHY up/down - it is more of logical toggling. 1420 */ 1421 static int 1422 dpaa2_dev_set_link_down(struct rte_eth_dev *dev) 1423 { 1424 int ret = -EINVAL; 1425 struct dpaa2_dev_priv *priv; 1426 struct fsl_mc_io *dpni; 1427 int dpni_enabled = 0; 1428 int retries = 10; 1429 1430 PMD_INIT_FUNC_TRACE(); 1431 1432 priv = dev->data->dev_private; 1433 dpni = (struct fsl_mc_io *)priv->hw; 1434 1435 if (dpni == NULL) { 1436 RTE_LOG(ERR, PMD, "Device has not yet been configured\n"); 1437 return ret; 1438 } 1439 1440 /*changing tx burst function to avoid any more enqueues */ 1441 dev->tx_pkt_burst = dummy_dev_tx; 1442 1443 /* Loop while dpni_disable() attempts to drain the egress FQs 1444 * and confirm them back to us. 1445 */ 1446 do { 1447 ret = dpni_disable(dpni, 0, priv->token); 1448 if (ret) { 1449 PMD_DRV_LOG(ERR, "dpni disable failed (%d)", ret); 1450 return ret; 1451 } 1452 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); 1453 if (ret) { 1454 PMD_DRV_LOG(ERR, "dpni_is_enabled failed (%d)", ret); 1455 return ret; 1456 } 1457 if (dpni_enabled) 1458 /* Allow the MC some slack */ 1459 rte_delay_us(100 * 1000); 1460 } while (dpni_enabled && --retries); 1461 1462 if (!retries) { 1463 PMD_DRV_LOG(WARNING, "Retry count exceeded disabling DPNI\n"); 1464 /* todo- we may have to manually cleanup queues. 1465 */ 1466 } else { 1467 PMD_DRV_LOG(INFO, "Port %d Link DOWN successful", 1468 dev->data->port_id); 1469 } 1470 1471 dev->data->dev_link.link_status = 0; 1472 1473 return ret; 1474 } 1475 1476 static int 1477 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1478 { 1479 int ret = -EINVAL; 1480 struct dpaa2_dev_priv *priv; 1481 struct fsl_mc_io *dpni; 1482 struct dpni_link_state state = {0}; 1483 1484 PMD_INIT_FUNC_TRACE(); 1485 1486 priv = dev->data->dev_private; 1487 dpni = (struct fsl_mc_io *)priv->hw; 1488 1489 if (dpni == NULL || fc_conf == NULL) { 1490 RTE_LOG(ERR, PMD, "device not configured\n"); 1491 return ret; 1492 } 1493 1494 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1495 if (ret) { 1496 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); 1497 return ret; 1498 } 1499 1500 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); 1501 if (state.options & DPNI_LINK_OPT_PAUSE) { 1502 /* DPNI_LINK_OPT_PAUSE set 1503 * if ASYM_PAUSE not set, 1504 * RX Side flow control (handle received Pause frame) 1505 * TX side flow control (send Pause frame) 1506 * if ASYM_PAUSE set, 1507 * RX Side flow control (handle received Pause frame) 1508 * No TX side flow control (send Pause frame disabled) 1509 */ 1510 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) 1511 fc_conf->mode = RTE_FC_FULL; 1512 else 1513 fc_conf->mode = RTE_FC_RX_PAUSE; 1514 } else { 1515 /* DPNI_LINK_OPT_PAUSE not set 1516 * if ASYM_PAUSE set, 1517 * TX side flow control (send Pause frame) 1518 * No RX side flow control (No action on pause frame rx) 1519 * if ASYM_PAUSE not set, 1520 * Flow control disabled 1521 */ 1522 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) 1523 fc_conf->mode = RTE_FC_TX_PAUSE; 1524 else 1525 fc_conf->mode = RTE_FC_NONE; 1526 } 1527 1528 return ret; 1529 } 1530 1531 static int 1532 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1533 { 1534 int ret = -EINVAL; 1535 struct dpaa2_dev_priv *priv; 1536 struct fsl_mc_io *dpni; 1537 struct dpni_link_state state = {0}; 1538 struct dpni_link_cfg cfg = {0}; 1539 1540 PMD_INIT_FUNC_TRACE(); 1541 1542 priv = dev->data->dev_private; 1543 dpni = (struct fsl_mc_io *)priv->hw; 1544 1545 if (dpni == NULL) { 1546 RTE_LOG(ERR, PMD, "dpni is NULL\n"); 1547 return ret; 1548 } 1549 1550 /* It is necessary to obtain the current state before setting fc_conf 1551 * as MC would return error in case rate, autoneg or duplex values are 1552 * different. 1553 */ 1554 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1555 if (ret) { 1556 RTE_LOG(ERR, PMD, "Unable to get link state (err=%d)\n", ret); 1557 return -1; 1558 } 1559 1560 /* Disable link before setting configuration */ 1561 dpaa2_dev_set_link_down(dev); 1562 1563 /* Based on fc_conf, update cfg */ 1564 cfg.rate = state.rate; 1565 cfg.options = state.options; 1566 1567 /* update cfg with fc_conf */ 1568 switch (fc_conf->mode) { 1569 case RTE_FC_FULL: 1570 /* Full flow control; 1571 * OPT_PAUSE set, ASYM_PAUSE not set 1572 */ 1573 cfg.options |= DPNI_LINK_OPT_PAUSE; 1574 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1575 break; 1576 case RTE_FC_TX_PAUSE: 1577 /* Enable RX flow control 1578 * OPT_PAUSE not set; 1579 * ASYM_PAUSE set; 1580 */ 1581 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1582 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1583 break; 1584 case RTE_FC_RX_PAUSE: 1585 /* Enable TX Flow control 1586 * OPT_PAUSE set 1587 * ASYM_PAUSE set 1588 */ 1589 cfg.options |= DPNI_LINK_OPT_PAUSE; 1590 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1591 break; 1592 case RTE_FC_NONE: 1593 /* Disable Flow control 1594 * OPT_PAUSE not set 1595 * ASYM_PAUSE not set 1596 */ 1597 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1598 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1599 break; 1600 default: 1601 RTE_LOG(ERR, PMD, "Incorrect Flow control flag (%d)\n", 1602 fc_conf->mode); 1603 return -1; 1604 } 1605 1606 ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); 1607 if (ret) 1608 RTE_LOG(ERR, PMD, 1609 "Unable to set Link configuration (err=%d)\n", 1610 ret); 1611 1612 /* Enable link */ 1613 dpaa2_dev_set_link_up(dev); 1614 1615 return ret; 1616 } 1617 1618 static int 1619 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, 1620 struct rte_eth_rss_conf *rss_conf) 1621 { 1622 struct rte_eth_dev_data *data = dev->data; 1623 struct rte_eth_conf *eth_conf = &data->dev_conf; 1624 int ret; 1625 1626 PMD_INIT_FUNC_TRACE(); 1627 1628 if (rss_conf->rss_hf) { 1629 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf); 1630 if (ret) { 1631 PMD_INIT_LOG(ERR, "unable to set flow dist"); 1632 return ret; 1633 } 1634 } else { 1635 ret = dpaa2_remove_flow_dist(dev, 0); 1636 if (ret) { 1637 PMD_INIT_LOG(ERR, "unable to remove flow dist"); 1638 return ret; 1639 } 1640 } 1641 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; 1642 return 0; 1643 } 1644 1645 static int 1646 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1647 struct rte_eth_rss_conf *rss_conf) 1648 { 1649 struct rte_eth_dev_data *data = dev->data; 1650 struct rte_eth_conf *eth_conf = &data->dev_conf; 1651 1652 /* dpaa2 does not support rss_key, so length should be 0*/ 1653 rss_conf->rss_key_len = 0; 1654 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; 1655 return 0; 1656 } 1657 1658 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, 1659 int eth_rx_queue_id, 1660 uint16_t dpcon_id, 1661 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 1662 { 1663 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1664 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1665 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1666 uint8_t flow_id = dpaa2_ethq->flow_id; 1667 struct dpni_queue cfg; 1668 uint8_t options; 1669 int ret; 1670 1671 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 1672 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; 1673 else 1674 return -EINVAL; 1675 1676 memset(&cfg, 0, sizeof(struct dpni_queue)); 1677 options = DPNI_QUEUE_OPT_DEST; 1678 cfg.destination.type = DPNI_DEST_DPCON; 1679 cfg.destination.id = dpcon_id; 1680 cfg.destination.priority = queue_conf->ev.priority; 1681 1682 options |= DPNI_QUEUE_OPT_USER_CTX; 1683 cfg.user_context = (uint64_t)(dpaa2_ethq); 1684 1685 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1686 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1687 if (ret) { 1688 RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret); 1689 return ret; 1690 } 1691 1692 memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event)); 1693 1694 return 0; 1695 } 1696 1697 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, 1698 int eth_rx_queue_id) 1699 { 1700 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1701 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1702 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1703 uint8_t flow_id = dpaa2_ethq->flow_id; 1704 struct dpni_queue cfg; 1705 uint8_t options; 1706 int ret; 1707 1708 memset(&cfg, 0, sizeof(struct dpni_queue)); 1709 options = DPNI_QUEUE_OPT_DEST; 1710 cfg.destination.type = DPNI_DEST_NONE; 1711 1712 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1713 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1714 if (ret) 1715 RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret); 1716 1717 return ret; 1718 } 1719 1720 static struct eth_dev_ops dpaa2_ethdev_ops = { 1721 .dev_configure = dpaa2_eth_dev_configure, 1722 .dev_start = dpaa2_dev_start, 1723 .dev_stop = dpaa2_dev_stop, 1724 .dev_close = dpaa2_dev_close, 1725 .promiscuous_enable = dpaa2_dev_promiscuous_enable, 1726 .promiscuous_disable = dpaa2_dev_promiscuous_disable, 1727 .allmulticast_enable = dpaa2_dev_allmulticast_enable, 1728 .allmulticast_disable = dpaa2_dev_allmulticast_disable, 1729 .dev_set_link_up = dpaa2_dev_set_link_up, 1730 .dev_set_link_down = dpaa2_dev_set_link_down, 1731 .link_update = dpaa2_dev_link_update, 1732 .stats_get = dpaa2_dev_stats_get, 1733 .xstats_get = dpaa2_dev_xstats_get, 1734 .xstats_get_by_id = dpaa2_xstats_get_by_id, 1735 .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id, 1736 .xstats_get_names = dpaa2_xstats_get_names, 1737 .stats_reset = dpaa2_dev_stats_reset, 1738 .xstats_reset = dpaa2_dev_stats_reset, 1739 .fw_version_get = dpaa2_fw_version_get, 1740 .dev_infos_get = dpaa2_dev_info_get, 1741 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, 1742 .mtu_set = dpaa2_dev_mtu_set, 1743 .vlan_filter_set = dpaa2_vlan_filter_set, 1744 .vlan_offload_set = dpaa2_vlan_offload_set, 1745 .rx_queue_setup = dpaa2_dev_rx_queue_setup, 1746 .rx_queue_release = dpaa2_dev_rx_queue_release, 1747 .tx_queue_setup = dpaa2_dev_tx_queue_setup, 1748 .tx_queue_release = dpaa2_dev_tx_queue_release, 1749 .rx_queue_count = dpaa2_dev_rx_queue_count, 1750 .flow_ctrl_get = dpaa2_flow_ctrl_get, 1751 .flow_ctrl_set = dpaa2_flow_ctrl_set, 1752 .mac_addr_add = dpaa2_dev_add_mac_addr, 1753 .mac_addr_remove = dpaa2_dev_remove_mac_addr, 1754 .mac_addr_set = dpaa2_dev_set_mac_addr, 1755 .rss_hash_update = dpaa2_dev_rss_hash_update, 1756 .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, 1757 }; 1758 1759 static int 1760 dpaa2_dev_init(struct rte_eth_dev *eth_dev) 1761 { 1762 struct rte_device *dev = eth_dev->device; 1763 struct rte_dpaa2_device *dpaa2_dev; 1764 struct fsl_mc_io *dpni_dev; 1765 struct dpni_attr attr; 1766 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1767 struct dpni_buffer_layout layout; 1768 int ret, hw_id; 1769 1770 PMD_INIT_FUNC_TRACE(); 1771 1772 /* For secondary processes, the primary has done all the work */ 1773 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1774 return 0; 1775 1776 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 1777 1778 hw_id = dpaa2_dev->object_id; 1779 1780 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); 1781 if (!dpni_dev) { 1782 PMD_INIT_LOG(ERR, "malloc failed for dpni device\n"); 1783 return -1; 1784 } 1785 1786 dpni_dev->regs = rte_mcp_ptr_list[0]; 1787 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); 1788 if (ret) { 1789 PMD_INIT_LOG(ERR, 1790 "Failure in opening dpni@%d with err code %d\n", 1791 hw_id, ret); 1792 rte_free(dpni_dev); 1793 return -1; 1794 } 1795 1796 /* Clean the device first */ 1797 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); 1798 if (ret) { 1799 PMD_INIT_LOG(ERR, 1800 "Failure cleaning dpni@%d with err code %d\n", 1801 hw_id, ret); 1802 goto init_err; 1803 } 1804 1805 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); 1806 if (ret) { 1807 PMD_INIT_LOG(ERR, 1808 "Failure in get dpni@%d attribute, err code %d\n", 1809 hw_id, ret); 1810 goto init_err; 1811 } 1812 1813 priv->num_rx_tc = attr.num_rx_tcs; 1814 1815 /* Resetting the "num_rx_queues" to equal number of queues in first TC 1816 * as only one TC is supported on Rx Side. Once Multiple TCs will be 1817 * in use for Rx processing then this will be changed or removed. 1818 */ 1819 priv->nb_rx_queues = attr.num_queues; 1820 1821 /* Using number of TX queues as number of TX TCs */ 1822 priv->nb_tx_queues = attr.num_tx_tcs; 1823 1824 PMD_DRV_LOG(DEBUG, "RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", 1825 priv->num_rx_tc, priv->nb_rx_queues, priv->nb_tx_queues); 1826 1827 priv->hw = dpni_dev; 1828 priv->hw_id = hw_id; 1829 priv->options = attr.options; 1830 priv->max_mac_filters = attr.mac_filter_entries; 1831 priv->max_vlan_filters = attr.vlan_filter_entries; 1832 priv->flags = 0; 1833 1834 /* Allocate memory for hardware structure for queues */ 1835 ret = dpaa2_alloc_rx_tx_queues(eth_dev); 1836 if (ret) { 1837 PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n"); 1838 goto init_err; 1839 } 1840 1841 /* Allocate memory for storing MAC addresses */ 1842 eth_dev->data->mac_addrs = rte_zmalloc("dpni", 1843 ETHER_ADDR_LEN * attr.mac_filter_entries, 0); 1844 if (eth_dev->data->mac_addrs == NULL) { 1845 PMD_INIT_LOG(ERR, 1846 "Failed to allocate %d bytes needed to store MAC addresses", 1847 ETHER_ADDR_LEN * attr.mac_filter_entries); 1848 ret = -ENOMEM; 1849 goto init_err; 1850 } 1851 1852 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 1853 priv->token, 1854 (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes)); 1855 if (ret) { 1856 PMD_INIT_LOG(ERR, "DPNI get mac address failed:Err Code = %d\n", 1857 ret); 1858 goto init_err; 1859 } 1860 1861 /* ... tx buffer layout ... */ 1862 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1863 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1864 layout.pass_frame_status = 1; 1865 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1866 DPNI_QUEUE_TX, &layout); 1867 if (ret) { 1868 PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer layout", 1869 ret); 1870 goto init_err; 1871 } 1872 1873 /* ... tx-conf and error buffer layout ... */ 1874 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 1875 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 1876 layout.pass_frame_status = 1; 1877 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 1878 DPNI_QUEUE_TX_CONFIRM, &layout); 1879 if (ret) { 1880 PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer layout", 1881 ret); 1882 goto init_err; 1883 } 1884 1885 eth_dev->dev_ops = &dpaa2_ethdev_ops; 1886 1887 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 1888 eth_dev->tx_pkt_burst = dpaa2_dev_tx; 1889 rte_fslmc_vfio_dmamap(); 1890 1891 RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); 1892 return 0; 1893 init_err: 1894 dpaa2_dev_uninit(eth_dev); 1895 return ret; 1896 } 1897 1898 static int 1899 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) 1900 { 1901 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1902 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1903 int i, ret; 1904 struct dpaa2_queue *dpaa2_q; 1905 1906 PMD_INIT_FUNC_TRACE(); 1907 1908 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1909 return 0; 1910 1911 if (!dpni) { 1912 PMD_INIT_LOG(WARNING, "Already closed or not started"); 1913 return -1; 1914 } 1915 1916 dpaa2_dev_close(eth_dev); 1917 1918 if (priv->rx_vq[0]) { 1919 /* cleaning up queue storage */ 1920 for (i = 0; i < priv->nb_rx_queues; i++) { 1921 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 1922 if (dpaa2_q->q_storage) 1923 rte_free(dpaa2_q->q_storage); 1924 } 1925 /*free the all queue memory */ 1926 rte_free(priv->rx_vq[0]); 1927 priv->rx_vq[0] = NULL; 1928 } 1929 1930 /* free memory for storing MAC addresses */ 1931 if (eth_dev->data->mac_addrs) { 1932 rte_free(eth_dev->data->mac_addrs); 1933 eth_dev->data->mac_addrs = NULL; 1934 } 1935 1936 /* Close the device at underlying layer*/ 1937 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); 1938 if (ret) { 1939 PMD_INIT_LOG(ERR, 1940 "Failure closing dpni device with err code %d\n", 1941 ret); 1942 } 1943 1944 /* Free the allocated memory for ethernet private data and dpni*/ 1945 priv->hw = NULL; 1946 rte_free(dpni); 1947 1948 eth_dev->dev_ops = NULL; 1949 eth_dev->rx_pkt_burst = NULL; 1950 eth_dev->tx_pkt_burst = NULL; 1951 1952 RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); 1953 return 0; 1954 } 1955 1956 static int 1957 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, 1958 struct rte_dpaa2_device *dpaa2_dev) 1959 { 1960 struct rte_eth_dev *eth_dev; 1961 int diag; 1962 1963 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1964 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); 1965 if (!eth_dev) 1966 return -ENODEV; 1967 eth_dev->data->dev_private = rte_zmalloc( 1968 "ethdev private structure", 1969 sizeof(struct dpaa2_dev_priv), 1970 RTE_CACHE_LINE_SIZE); 1971 if (eth_dev->data->dev_private == NULL) { 1972 PMD_INIT_LOG(CRIT, "Cannot allocate memzone for" 1973 " private port data\n"); 1974 rte_eth_dev_release_port(eth_dev); 1975 return -ENOMEM; 1976 } 1977 } else { 1978 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); 1979 if (!eth_dev) 1980 return -ENODEV; 1981 } 1982 1983 eth_dev->device = &dpaa2_dev->device; 1984 eth_dev->device->driver = &dpaa2_drv->driver; 1985 1986 dpaa2_dev->eth_dev = eth_dev; 1987 eth_dev->data->rx_mbuf_alloc_failed = 0; 1988 1989 if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC) 1990 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 1991 1992 /* Invoke PMD device initialization function */ 1993 diag = dpaa2_dev_init(eth_dev); 1994 if (diag == 0) 1995 return 0; 1996 1997 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1998 rte_free(eth_dev->data->dev_private); 1999 rte_eth_dev_release_port(eth_dev); 2000 return diag; 2001 } 2002 2003 static int 2004 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) 2005 { 2006 struct rte_eth_dev *eth_dev; 2007 2008 eth_dev = dpaa2_dev->eth_dev; 2009 dpaa2_dev_uninit(eth_dev); 2010 2011 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 2012 rte_free(eth_dev->data->dev_private); 2013 rte_eth_dev_release_port(eth_dev); 2014 2015 return 0; 2016 } 2017 2018 static struct rte_dpaa2_driver rte_dpaa2_pmd = { 2019 .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA, 2020 .drv_type = DPAA2_ETH, 2021 .probe = rte_dpaa2_probe, 2022 .remove = rte_dpaa2_remove, 2023 }; 2024 2025 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); 2026