1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018-2019 NXP 3 */ 4 5 #include <stdbool.h> 6 #include <rte_ethdev_pci.h> 7 #include <rte_random.h> 8 #include <dpaax_iova_table.h> 9 10 #include "enetc_logs.h" 11 #include "enetc.h" 12 13 int enetc_logtype_pmd; 14 15 static int 16 enetc_dev_start(struct rte_eth_dev *dev) 17 { 18 struct enetc_eth_hw *hw = 19 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 20 struct enetc_hw *enetc_hw = &hw->hw; 21 uint32_t val; 22 23 PMD_INIT_FUNC_TRACE(); 24 val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG); 25 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, 26 val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN); 27 28 /* Enable port */ 29 val = enetc_port_rd(enetc_hw, ENETC_PMR); 30 enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN); 31 32 /* set auto-speed for RGMII */ 33 if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) { 34 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE, 35 ENETC_PM0_IFM_RGAUTO); 36 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE, 37 ENETC_PM0_IFM_RGAUTO); 38 } 39 if (enetc_global_rd(enetc_hw, 40 ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) { 41 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE, 42 ENETC_PM0_IFM_XGMII); 43 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE, 44 ENETC_PM0_IFM_XGMII); 45 } 46 47 return 0; 48 } 49 50 static void 51 enetc_dev_stop(struct rte_eth_dev *dev) 52 { 53 struct enetc_eth_hw *hw = 54 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 55 struct enetc_hw *enetc_hw = &hw->hw; 56 uint32_t val; 57 58 PMD_INIT_FUNC_TRACE(); 59 /* Disable port */ 60 val = enetc_port_rd(enetc_hw, ENETC_PMR); 61 enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN)); 62 63 val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG); 64 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, 65 val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN))); 66 } 67 68 static const uint32_t * 69 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) 70 { 71 static const uint32_t ptypes[] = { 72 RTE_PTYPE_L2_ETHER, 73 RTE_PTYPE_L3_IPV4, 74 RTE_PTYPE_L3_IPV6, 75 RTE_PTYPE_L4_TCP, 76 RTE_PTYPE_L4_UDP, 77 RTE_PTYPE_L4_SCTP, 78 RTE_PTYPE_L4_ICMP, 79 RTE_PTYPE_UNKNOWN 80 }; 81 82 return ptypes; 83 } 84 85 /* return 0 means link status changed, -1 means not changed */ 86 static int 87 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) 88 { 89 struct enetc_eth_hw *hw = 90 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 91 struct enetc_hw *enetc_hw = &hw->hw; 92 struct rte_eth_link link; 93 uint32_t status; 94 95 PMD_INIT_FUNC_TRACE(); 96 97 memset(&link, 0, sizeof(link)); 98 99 status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS); 100 101 if (status & ENETC_LINK_MODE) 102 link.link_duplex = ETH_LINK_FULL_DUPLEX; 103 else 104 link.link_duplex = ETH_LINK_HALF_DUPLEX; 105 106 if (status & ENETC_LINK_STATUS) 107 link.link_status = ETH_LINK_UP; 108 else 109 link.link_status = ETH_LINK_DOWN; 110 111 switch (status & ENETC_LINK_SPEED_MASK) { 112 case ENETC_LINK_SPEED_1G: 113 link.link_speed = ETH_SPEED_NUM_1G; 114 break; 115 116 case ENETC_LINK_SPEED_100M: 117 link.link_speed = ETH_SPEED_NUM_100M; 118 break; 119 120 default: 121 case ENETC_LINK_SPEED_10M: 122 link.link_speed = ETH_SPEED_NUM_10M; 123 } 124 125 return rte_eth_linkstatus_set(dev, &link); 126 } 127 128 static void 129 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr) 130 { 131 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 132 133 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 134 ENETC_PMD_NOTICE("%s%s\n", name, buf); 135 } 136 137 static int 138 enetc_hardware_init(struct enetc_eth_hw *hw) 139 { 140 struct enetc_hw *enetc_hw = &hw->hw; 141 uint32_t *mac = (uint32_t *)hw->mac.addr; 142 uint32_t high_mac = 0; 143 uint16_t low_mac = 0; 144 145 PMD_INIT_FUNC_TRACE(); 146 /* Calculating and storing the base HW addresses */ 147 hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE); 148 hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE); 149 150 /* Enabling Station Interface */ 151 enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN); 152 153 *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0)); 154 high_mac = (uint32_t)*mac; 155 mac++; 156 *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0)); 157 low_mac = (uint16_t)*mac; 158 159 if ((high_mac | low_mac) == 0) { 160 char *first_byte; 161 162 ENETC_PMD_NOTICE("MAC is not available for this SI, " 163 "set random MAC\n"); 164 mac = (uint32_t *)hw->mac.addr; 165 *mac = (uint32_t)rte_rand(); 166 first_byte = (char *)mac; 167 *first_byte &= 0xfe; /* clear multicast bit */ 168 *first_byte |= 0x02; /* set local assignment bit (IEEE802) */ 169 170 enetc_port_wr(enetc_hw, ENETC_PSIPMAR0(0), *mac); 171 mac++; 172 *mac = (uint16_t)rte_rand(); 173 enetc_port_wr(enetc_hw, ENETC_PSIPMAR1(0), *mac); 174 print_ethaddr("New address: ", 175 (const struct rte_ether_addr *)hw->mac.addr); 176 } 177 178 return 0; 179 } 180 181 static int 182 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused, 183 struct rte_eth_dev_info *dev_info) 184 { 185 PMD_INIT_FUNC_TRACE(); 186 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 187 .nb_max = MAX_BD_COUNT, 188 .nb_min = MIN_BD_COUNT, 189 .nb_align = BD_ALIGN, 190 }; 191 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 192 .nb_max = MAX_BD_COUNT, 193 .nb_min = MIN_BD_COUNT, 194 .nb_align = BD_ALIGN, 195 }; 196 dev_info->max_rx_queues = MAX_RX_RINGS; 197 dev_info->max_tx_queues = MAX_TX_RINGS; 198 dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE; 199 dev_info->rx_offload_capa = 200 (DEV_RX_OFFLOAD_IPV4_CKSUM | 201 DEV_RX_OFFLOAD_UDP_CKSUM | 202 DEV_RX_OFFLOAD_TCP_CKSUM | 203 DEV_RX_OFFLOAD_KEEP_CRC | 204 DEV_RX_OFFLOAD_JUMBO_FRAME); 205 206 return 0; 207 } 208 209 static int 210 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc) 211 { 212 int size; 213 214 size = nb_desc * sizeof(struct enetc_swbd); 215 txr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN); 216 if (txr->q_swbd == NULL) 217 return -ENOMEM; 218 219 size = nb_desc * sizeof(struct enetc_tx_bd); 220 txr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN); 221 if (txr->bd_base == NULL) { 222 rte_free(txr->q_swbd); 223 txr->q_swbd = NULL; 224 return -ENOMEM; 225 } 226 227 txr->bd_count = nb_desc; 228 txr->next_to_clean = 0; 229 txr->next_to_use = 0; 230 231 return 0; 232 } 233 234 static void 235 enetc_free_bdr(struct enetc_bdr *rxr) 236 { 237 rte_free(rxr->q_swbd); 238 rte_free(rxr->bd_base); 239 rxr->q_swbd = NULL; 240 rxr->bd_base = NULL; 241 } 242 243 static void 244 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 245 { 246 int idx = tx_ring->index; 247 phys_addr_t bd_address; 248 249 bd_address = (phys_addr_t) 250 rte_mem_virt2iova((const void *)tx_ring->bd_base); 251 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0, 252 lower_32_bits((uint64_t)bd_address)); 253 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1, 254 upper_32_bits((uint64_t)bd_address)); 255 enetc_txbdr_wr(hw, idx, ENETC_TBLENR, 256 ENETC_RTBLENR_LEN(tx_ring->bd_count)); 257 258 enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0); 259 enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0); 260 tx_ring->tcir = (void *)((size_t)hw->reg + 261 ENETC_BDR(TX, idx, ENETC_TBCIR)); 262 tx_ring->tcisr = (void *)((size_t)hw->reg + 263 ENETC_BDR(TX, idx, ENETC_TBCISR)); 264 } 265 266 static int 267 enetc_tx_queue_setup(struct rte_eth_dev *dev, 268 uint16_t queue_idx, 269 uint16_t nb_desc, 270 unsigned int socket_id __rte_unused, 271 const struct rte_eth_txconf *tx_conf) 272 { 273 int err = 0; 274 struct enetc_bdr *tx_ring; 275 struct rte_eth_dev_data *data = dev->data; 276 struct enetc_eth_adapter *priv = 277 ENETC_DEV_PRIVATE(data->dev_private); 278 279 PMD_INIT_FUNC_TRACE(); 280 if (nb_desc > MAX_BD_COUNT) 281 return -1; 282 283 tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0); 284 if (tx_ring == NULL) { 285 ENETC_PMD_ERR("Failed to allocate TX ring memory"); 286 err = -ENOMEM; 287 return -1; 288 } 289 290 err = enetc_alloc_txbdr(tx_ring, nb_desc); 291 if (err) 292 goto fail; 293 294 tx_ring->index = queue_idx; 295 tx_ring->ndev = dev; 296 enetc_setup_txbdr(&priv->hw.hw, tx_ring); 297 data->tx_queues[queue_idx] = tx_ring; 298 299 if (!tx_conf->tx_deferred_start) { 300 /* enable ring */ 301 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, 302 ENETC_TBMR, ENETC_TBMR_EN); 303 dev->data->tx_queue_state[tx_ring->index] = 304 RTE_ETH_QUEUE_STATE_STARTED; 305 } else { 306 dev->data->tx_queue_state[tx_ring->index] = 307 RTE_ETH_QUEUE_STATE_STOPPED; 308 } 309 310 return 0; 311 fail: 312 rte_free(tx_ring); 313 314 return err; 315 } 316 317 static void 318 enetc_tx_queue_release(void *txq) 319 { 320 if (txq == NULL) 321 return; 322 323 struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq; 324 struct enetc_eth_hw *eth_hw = 325 ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private); 326 struct enetc_hw *hw; 327 struct enetc_swbd *tx_swbd; 328 int i; 329 uint32_t val; 330 331 /* Disable the ring */ 332 hw = ð_hw->hw; 333 val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR); 334 val &= (~ENETC_TBMR_EN); 335 enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val); 336 337 /* clean the ring*/ 338 i = tx_ring->next_to_clean; 339 tx_swbd = &tx_ring->q_swbd[i]; 340 while (tx_swbd->buffer_addr != NULL) { 341 rte_pktmbuf_free(tx_swbd->buffer_addr); 342 tx_swbd->buffer_addr = NULL; 343 tx_swbd++; 344 i++; 345 if (unlikely(i == tx_ring->bd_count)) { 346 i = 0; 347 tx_swbd = &tx_ring->q_swbd[i]; 348 } 349 } 350 351 enetc_free_bdr(tx_ring); 352 rte_free(tx_ring); 353 } 354 355 static int 356 enetc_alloc_rxbdr(struct enetc_bdr *rxr, 357 uint16_t nb_rx_desc) 358 { 359 int size; 360 361 size = nb_rx_desc * sizeof(struct enetc_swbd); 362 rxr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN); 363 if (rxr->q_swbd == NULL) 364 return -ENOMEM; 365 366 size = nb_rx_desc * sizeof(union enetc_rx_bd); 367 rxr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN); 368 if (rxr->bd_base == NULL) { 369 rte_free(rxr->q_swbd); 370 rxr->q_swbd = NULL; 371 return -ENOMEM; 372 } 373 374 rxr->bd_count = nb_rx_desc; 375 rxr->next_to_clean = 0; 376 rxr->next_to_use = 0; 377 rxr->next_to_alloc = 0; 378 379 return 0; 380 } 381 382 static void 383 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring, 384 struct rte_mempool *mb_pool) 385 { 386 int idx = rx_ring->index; 387 uint16_t buf_size; 388 phys_addr_t bd_address; 389 390 bd_address = (phys_addr_t) 391 rte_mem_virt2iova((const void *)rx_ring->bd_base); 392 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0, 393 lower_32_bits((uint64_t)bd_address)); 394 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1, 395 upper_32_bits((uint64_t)bd_address)); 396 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR, 397 ENETC_RTBLENR_LEN(rx_ring->bd_count)); 398 399 rx_ring->mb_pool = mb_pool; 400 rx_ring->rcir = (void *)((size_t)hw->reg + 401 ENETC_BDR(RX, idx, ENETC_RBCIR)); 402 enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring))); 403 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) - 404 RTE_PKTMBUF_HEADROOM); 405 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size); 406 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); 407 } 408 409 static int 410 enetc_rx_queue_setup(struct rte_eth_dev *dev, 411 uint16_t rx_queue_id, 412 uint16_t nb_rx_desc, 413 unsigned int socket_id __rte_unused, 414 const struct rte_eth_rxconf *rx_conf, 415 struct rte_mempool *mb_pool) 416 { 417 int err = 0; 418 struct enetc_bdr *rx_ring; 419 struct rte_eth_dev_data *data = dev->data; 420 struct enetc_eth_adapter *adapter = 421 ENETC_DEV_PRIVATE(data->dev_private); 422 uint64_t rx_offloads = data->dev_conf.rxmode.offloads; 423 424 PMD_INIT_FUNC_TRACE(); 425 if (nb_rx_desc > MAX_BD_COUNT) 426 return -1; 427 428 rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0); 429 if (rx_ring == NULL) { 430 ENETC_PMD_ERR("Failed to allocate RX ring memory"); 431 err = -ENOMEM; 432 return err; 433 } 434 435 err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc); 436 if (err) 437 goto fail; 438 439 rx_ring->index = rx_queue_id; 440 rx_ring->ndev = dev; 441 enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool); 442 data->rx_queues[rx_queue_id] = rx_ring; 443 444 if (!rx_conf->rx_deferred_start) { 445 /* enable ring */ 446 enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR, 447 ENETC_RBMR_EN); 448 dev->data->rx_queue_state[rx_ring->index] = 449 RTE_ETH_QUEUE_STATE_STARTED; 450 } else { 451 dev->data->rx_queue_state[rx_ring->index] = 452 RTE_ETH_QUEUE_STATE_STOPPED; 453 } 454 455 rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ? 456 RTE_ETHER_CRC_LEN : 0); 457 458 return 0; 459 fail: 460 rte_free(rx_ring); 461 462 return err; 463 } 464 465 static void 466 enetc_rx_queue_release(void *rxq) 467 { 468 if (rxq == NULL) 469 return; 470 471 struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq; 472 struct enetc_eth_hw *eth_hw = 473 ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private); 474 struct enetc_swbd *q_swbd; 475 struct enetc_hw *hw; 476 uint32_t val; 477 int i; 478 479 /* Disable the ring */ 480 hw = ð_hw->hw; 481 val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR); 482 val &= (~ENETC_RBMR_EN); 483 enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val); 484 485 /* Clean the ring */ 486 i = rx_ring->next_to_clean; 487 q_swbd = &rx_ring->q_swbd[i]; 488 while (i != rx_ring->next_to_use) { 489 rte_pktmbuf_free(q_swbd->buffer_addr); 490 q_swbd->buffer_addr = NULL; 491 q_swbd++; 492 i++; 493 if (unlikely(i == rx_ring->bd_count)) { 494 i = 0; 495 q_swbd = &rx_ring->q_swbd[i]; 496 } 497 } 498 499 enetc_free_bdr(rx_ring); 500 rte_free(rx_ring); 501 } 502 503 static 504 int enetc_stats_get(struct rte_eth_dev *dev, 505 struct rte_eth_stats *stats) 506 { 507 struct enetc_eth_hw *hw = 508 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 509 struct enetc_hw *enetc_hw = &hw->hw; 510 511 /* Total received packets, bad + good, if we want to get counters of 512 * only good received packets then use ENETC_PM0_RFRM, 513 * ENETC_PM0_TFRM registers. 514 */ 515 stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT); 516 stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT); 517 stats->ibytes = enetc_port_rd(enetc_hw, ENETC_PM0_REOCT); 518 stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT); 519 /* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without 520 * truncated packets 521 */ 522 stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP); 523 stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR); 524 stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR); 525 526 return 0; 527 } 528 529 static int 530 enetc_stats_reset(struct rte_eth_dev *dev) 531 { 532 struct enetc_eth_hw *hw = 533 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 534 struct enetc_hw *enetc_hw = &hw->hw; 535 536 enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS); 537 538 return 0; 539 } 540 541 static void 542 enetc_dev_close(struct rte_eth_dev *dev) 543 { 544 uint16_t i; 545 546 PMD_INIT_FUNC_TRACE(); 547 enetc_dev_stop(dev); 548 549 for (i = 0; i < dev->data->nb_rx_queues; i++) { 550 enetc_rx_queue_release(dev->data->rx_queues[i]); 551 dev->data->rx_queues[i] = NULL; 552 } 553 dev->data->nb_rx_queues = 0; 554 555 for (i = 0; i < dev->data->nb_tx_queues; i++) { 556 enetc_tx_queue_release(dev->data->tx_queues[i]); 557 dev->data->tx_queues[i] = NULL; 558 } 559 dev->data->nb_tx_queues = 0; 560 } 561 562 static int 563 enetc_promiscuous_enable(struct rte_eth_dev *dev) 564 { 565 struct enetc_eth_hw *hw = 566 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 567 struct enetc_hw *enetc_hw = &hw->hw; 568 uint32_t psipmr = 0; 569 570 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR); 571 572 /* Setting to enable promiscuous mode*/ 573 psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0); 574 575 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr); 576 577 return 0; 578 } 579 580 static int 581 enetc_promiscuous_disable(struct rte_eth_dev *dev) 582 { 583 struct enetc_eth_hw *hw = 584 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 585 struct enetc_hw *enetc_hw = &hw->hw; 586 uint32_t psipmr = 0; 587 588 /* Setting to disable promiscuous mode for SI0*/ 589 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR); 590 psipmr &= (~ENETC_PSIPMR_SET_UP(0)); 591 592 if (dev->data->all_multicast == 0) 593 psipmr &= (~ENETC_PSIPMR_SET_MP(0)); 594 595 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr); 596 597 return 0; 598 } 599 600 static int 601 enetc_allmulticast_enable(struct rte_eth_dev *dev) 602 { 603 struct enetc_eth_hw *hw = 604 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 605 struct enetc_hw *enetc_hw = &hw->hw; 606 uint32_t psipmr = 0; 607 608 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR); 609 610 /* Setting to enable allmulticast mode for SI0*/ 611 psipmr |= ENETC_PSIPMR_SET_MP(0); 612 613 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr); 614 615 return 0; 616 } 617 618 static int 619 enetc_allmulticast_disable(struct rte_eth_dev *dev) 620 { 621 struct enetc_eth_hw *hw = 622 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 623 struct enetc_hw *enetc_hw = &hw->hw; 624 uint32_t psipmr = 0; 625 626 if (dev->data->promiscuous == 1) 627 return 0; /* must remain in all_multicast mode */ 628 629 /* Setting to disable all multicast mode for SI0*/ 630 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) & 631 ~(ENETC_PSIPMR_SET_MP(0)); 632 633 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr); 634 635 return 0; 636 } 637 638 static int 639 enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 640 { 641 struct enetc_eth_hw *hw = 642 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 643 struct enetc_hw *enetc_hw = &hw->hw; 644 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 645 646 /* check that mtu is within the allowed range */ 647 if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE) 648 return -EINVAL; 649 650 /* 651 * Refuse mtu that requires the support of scattered packets 652 * when this feature has not been enabled before. 653 */ 654 if (dev->data->min_rx_buf_size && 655 !dev->data->scattered_rx && frame_size > 656 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 657 ENETC_PMD_ERR("SG not enabled, will not fit in one buffer"); 658 return -EINVAL; 659 } 660 661 if (frame_size > RTE_ETHER_MAX_LEN) 662 dev->data->dev_conf.rxmode.offloads &= 663 DEV_RX_OFFLOAD_JUMBO_FRAME; 664 else 665 dev->data->dev_conf.rxmode.offloads &= 666 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 667 668 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE); 669 enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE); 670 671 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 672 673 /*setting the MTU*/ 674 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) | 675 ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE)); 676 677 return 0; 678 } 679 680 static int 681 enetc_dev_configure(struct rte_eth_dev *dev) 682 { 683 struct enetc_eth_hw *hw = 684 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 685 struct enetc_hw *enetc_hw = &hw->hw; 686 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 687 uint64_t rx_offloads = eth_conf->rxmode.offloads; 688 uint32_t checksum = L3_CKSUM | L4_CKSUM; 689 690 PMD_INIT_FUNC_TRACE(); 691 692 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 693 uint32_t max_len; 694 695 max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; 696 697 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, 698 ENETC_SET_MAXFRM(max_len)); 699 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), 700 ENETC_MAC_MAXFRM_SIZE); 701 enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 702 2 * ENETC_MAC_MAXFRM_SIZE); 703 dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN - 704 RTE_ETHER_CRC_LEN; 705 } 706 707 if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) { 708 int config; 709 710 config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG); 711 config |= ENETC_PM0_CRC; 712 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config); 713 } 714 715 if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) 716 checksum &= ~L3_CKSUM; 717 718 if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM)) 719 checksum &= ~L4_CKSUM; 720 721 enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum); 722 723 724 return 0; 725 } 726 727 static int 728 enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 729 { 730 struct enetc_eth_adapter *priv = 731 ENETC_DEV_PRIVATE(dev->data->dev_private); 732 struct enetc_bdr *rx_ring; 733 uint32_t rx_data; 734 735 rx_ring = dev->data->rx_queues[qidx]; 736 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) { 737 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index, 738 ENETC_RBMR); 739 rx_data = rx_data | ENETC_RBMR_EN; 740 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR, 741 rx_data); 742 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; 743 } 744 745 return 0; 746 } 747 748 static int 749 enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 750 { 751 struct enetc_eth_adapter *priv = 752 ENETC_DEV_PRIVATE(dev->data->dev_private); 753 struct enetc_bdr *rx_ring; 754 uint32_t rx_data; 755 756 rx_ring = dev->data->rx_queues[qidx]; 757 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) { 758 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index, 759 ENETC_RBMR); 760 rx_data = rx_data & (~ENETC_RBMR_EN); 761 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR, 762 rx_data); 763 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 764 } 765 766 return 0; 767 } 768 769 static int 770 enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 771 { 772 struct enetc_eth_adapter *priv = 773 ENETC_DEV_PRIVATE(dev->data->dev_private); 774 struct enetc_bdr *tx_ring; 775 uint32_t tx_data; 776 777 tx_ring = dev->data->tx_queues[qidx]; 778 if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) { 779 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index, 780 ENETC_TBMR); 781 tx_data = tx_data | ENETC_TBMR_EN; 782 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR, 783 tx_data); 784 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; 785 } 786 787 return 0; 788 } 789 790 static int 791 enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 792 { 793 struct enetc_eth_adapter *priv = 794 ENETC_DEV_PRIVATE(dev->data->dev_private); 795 struct enetc_bdr *tx_ring; 796 uint32_t tx_data; 797 798 tx_ring = dev->data->tx_queues[qidx]; 799 if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) { 800 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index, 801 ENETC_TBMR); 802 tx_data = tx_data & (~ENETC_TBMR_EN); 803 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR, 804 tx_data); 805 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 806 } 807 808 return 0; 809 } 810 811 /* 812 * The set of PCI devices this driver supports 813 */ 814 static const struct rte_pci_id pci_id_enetc_map[] = { 815 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) }, 816 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) }, 817 { .vendor_id = 0, /* sentinel */ }, 818 }; 819 820 /* Features supported by this driver */ 821 static const struct eth_dev_ops enetc_ops = { 822 .dev_configure = enetc_dev_configure, 823 .dev_start = enetc_dev_start, 824 .dev_stop = enetc_dev_stop, 825 .dev_close = enetc_dev_close, 826 .link_update = enetc_link_update, 827 .stats_get = enetc_stats_get, 828 .stats_reset = enetc_stats_reset, 829 .promiscuous_enable = enetc_promiscuous_enable, 830 .promiscuous_disable = enetc_promiscuous_disable, 831 .allmulticast_enable = enetc_allmulticast_enable, 832 .allmulticast_disable = enetc_allmulticast_disable, 833 .dev_infos_get = enetc_dev_infos_get, 834 .mtu_set = enetc_mtu_set, 835 .rx_queue_setup = enetc_rx_queue_setup, 836 .rx_queue_start = enetc_rx_queue_start, 837 .rx_queue_stop = enetc_rx_queue_stop, 838 .rx_queue_release = enetc_rx_queue_release, 839 .tx_queue_setup = enetc_tx_queue_setup, 840 .tx_queue_start = enetc_tx_queue_start, 841 .tx_queue_stop = enetc_tx_queue_stop, 842 .tx_queue_release = enetc_tx_queue_release, 843 .dev_supported_ptypes_get = enetc_supported_ptypes_get, 844 }; 845 846 /** 847 * Initialisation of the enetc device 848 * 849 * @param eth_dev 850 * - Pointer to the structure rte_eth_dev 851 * 852 * @return 853 * - On success, zero. 854 * - On failure, negative value. 855 */ 856 static int 857 enetc_dev_init(struct rte_eth_dev *eth_dev) 858 { 859 int error = 0; 860 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 861 struct enetc_eth_hw *hw = 862 ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 863 864 PMD_INIT_FUNC_TRACE(); 865 eth_dev->dev_ops = &enetc_ops; 866 eth_dev->rx_pkt_burst = &enetc_recv_pkts; 867 eth_dev->tx_pkt_burst = &enetc_xmit_pkts; 868 869 /* Retrieving and storing the HW base address of device */ 870 hw->hw.reg = (void *)pci_dev->mem_resource[0].addr; 871 hw->device_id = pci_dev->id.device_id; 872 873 error = enetc_hardware_init(hw); 874 if (error != 0) { 875 ENETC_PMD_ERR("Hardware initialization failed"); 876 return -1; 877 } 878 879 /* Allocate memory for storing MAC addresses */ 880 eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth", 881 RTE_ETHER_ADDR_LEN, 0); 882 if (!eth_dev->data->mac_addrs) { 883 ENETC_PMD_ERR("Failed to allocate %d bytes needed to " 884 "store MAC addresses", 885 RTE_ETHER_ADDR_LEN * 1); 886 error = -ENOMEM; 887 return -1; 888 } 889 890 /* Copy the permanent MAC address */ 891 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, 892 ð_dev->data->mac_addrs[0]); 893 894 /* Set MTU */ 895 enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM, 896 ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN)); 897 eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN - 898 RTE_ETHER_CRC_LEN; 899 900 if (rte_eal_iova_mode() == RTE_IOVA_PA) 901 dpaax_iova_table_populate(); 902 903 ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x", 904 eth_dev->data->port_id, pci_dev->id.vendor_id, 905 pci_dev->id.device_id); 906 return 0; 907 } 908 909 static int 910 enetc_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused) 911 { 912 PMD_INIT_FUNC_TRACE(); 913 914 if (rte_eal_iova_mode() == RTE_IOVA_PA) 915 dpaax_iova_table_depopulate(); 916 917 return 0; 918 } 919 920 static int 921 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 922 struct rte_pci_device *pci_dev) 923 { 924 return rte_eth_dev_pci_generic_probe(pci_dev, 925 sizeof(struct enetc_eth_adapter), 926 enetc_dev_init); 927 } 928 929 static int 930 enetc_pci_remove(struct rte_pci_device *pci_dev) 931 { 932 return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit); 933 } 934 935 static struct rte_pci_driver rte_enetc_pmd = { 936 .id_table = pci_id_enetc_map, 937 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 938 .probe = enetc_pci_probe, 939 .remove = enetc_pci_remove, 940 }; 941 942 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd); 943 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map); 944 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci"); 945 946 RTE_INIT(enetc_pmd_init_log) 947 { 948 enetc_logtype_pmd = rte_log_register("pmd.net.enetc"); 949 if (enetc_logtype_pmd >= 0) 950 rte_log_set_level(enetc_logtype_pmd, RTE_LOG_NOTICE); 951 } 952