1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018-2020 NXP 3 */ 4 5 #include <stdbool.h> 6 #include <rte_ethdev_pci.h> 7 #include <rte_random.h> 8 #include <dpaax_iova_table.h> 9 10 #include "enetc_logs.h" 11 #include "enetc.h" 12 13 static int 14 enetc_dev_start(struct rte_eth_dev *dev) 15 { 16 struct enetc_eth_hw *hw = 17 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 18 struct enetc_hw *enetc_hw = &hw->hw; 19 uint32_t val; 20 21 PMD_INIT_FUNC_TRACE(); 22 val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG); 23 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, 24 val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN); 25 26 /* Enable port */ 27 val = enetc_port_rd(enetc_hw, ENETC_PMR); 28 enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN); 29 30 /* set auto-speed for RGMII */ 31 if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) { 32 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE, 33 ENETC_PM0_IFM_RGAUTO); 34 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE, 35 ENETC_PM0_IFM_RGAUTO); 36 } 37 if (enetc_global_rd(enetc_hw, 38 ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) { 39 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE, 40 ENETC_PM0_IFM_XGMII); 41 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE, 42 ENETC_PM0_IFM_XGMII); 43 } 44 45 return 0; 46 } 47 48 static void 49 enetc_dev_stop(struct rte_eth_dev *dev) 50 { 51 struct enetc_eth_hw *hw = 52 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 53 struct enetc_hw *enetc_hw = &hw->hw; 54 uint32_t val; 55 56 PMD_INIT_FUNC_TRACE(); 57 /* Disable port */ 58 val = enetc_port_rd(enetc_hw, ENETC_PMR); 59 enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN)); 60 61 val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG); 62 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, 63 val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN))); 64 } 65 66 static const uint32_t * 67 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) 68 { 69 static const uint32_t ptypes[] = { 70 RTE_PTYPE_L2_ETHER, 71 RTE_PTYPE_L3_IPV4, 72 RTE_PTYPE_L3_IPV6, 73 RTE_PTYPE_L4_TCP, 74 RTE_PTYPE_L4_UDP, 75 RTE_PTYPE_L4_SCTP, 76 RTE_PTYPE_L4_ICMP, 77 RTE_PTYPE_UNKNOWN 78 }; 79 80 return ptypes; 81 } 82 83 /* return 0 means link status changed, -1 means not changed */ 84 static int 85 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) 86 { 87 struct enetc_eth_hw *hw = 88 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 89 struct enetc_hw *enetc_hw = &hw->hw; 90 struct rte_eth_link link; 91 uint32_t status; 92 93 PMD_INIT_FUNC_TRACE(); 94 95 memset(&link, 0, sizeof(link)); 96 97 status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS); 98 99 if (status & ENETC_LINK_MODE) 100 link.link_duplex = ETH_LINK_FULL_DUPLEX; 101 else 102 link.link_duplex = ETH_LINK_HALF_DUPLEX; 103 104 if (status & ENETC_LINK_STATUS) 105 link.link_status = ETH_LINK_UP; 106 else 107 link.link_status = ETH_LINK_DOWN; 108 109 switch (status & ENETC_LINK_SPEED_MASK) { 110 case ENETC_LINK_SPEED_1G: 111 link.link_speed = ETH_SPEED_NUM_1G; 112 break; 113 114 case ENETC_LINK_SPEED_100M: 115 link.link_speed = ETH_SPEED_NUM_100M; 116 break; 117 118 default: 119 case ENETC_LINK_SPEED_10M: 120 link.link_speed = ETH_SPEED_NUM_10M; 121 } 122 123 return rte_eth_linkstatus_set(dev, &link); 124 } 125 126 static void 127 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr) 128 { 129 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 130 131 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 132 ENETC_PMD_NOTICE("%s%s\n", name, buf); 133 } 134 135 static int 136 enetc_hardware_init(struct enetc_eth_hw *hw) 137 { 138 struct enetc_hw *enetc_hw = &hw->hw; 139 uint32_t *mac = (uint32_t *)hw->mac.addr; 140 uint32_t high_mac = 0; 141 uint16_t low_mac = 0; 142 143 PMD_INIT_FUNC_TRACE(); 144 /* Calculating and storing the base HW addresses */ 145 hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE); 146 hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE); 147 148 /* WA for Rx lock-up HW erratum */ 149 enetc_port_wr(enetc_hw, ENETC_PM0_RX_FIFO, 1); 150 151 /* set ENETC transaction flags to coherent, don't allocate. 152 * BD writes merge with surrounding cache line data, frame data writes 153 * overwrite cache line. 154 */ 155 enetc_wr(enetc_hw, ENETC_SICAR0, ENETC_SICAR0_COHERENT); 156 157 /* Enabling Station Interface */ 158 enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN); 159 160 *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0)); 161 high_mac = (uint32_t)*mac; 162 mac++; 163 *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0)); 164 low_mac = (uint16_t)*mac; 165 166 if ((high_mac | low_mac) == 0) { 167 char *first_byte; 168 169 ENETC_PMD_NOTICE("MAC is not available for this SI, " 170 "set random MAC\n"); 171 mac = (uint32_t *)hw->mac.addr; 172 *mac = (uint32_t)rte_rand(); 173 first_byte = (char *)mac; 174 *first_byte &= 0xfe; /* clear multicast bit */ 175 *first_byte |= 0x02; /* set local assignment bit (IEEE802) */ 176 177 enetc_port_wr(enetc_hw, ENETC_PSIPMAR0(0), *mac); 178 mac++; 179 *mac = (uint16_t)rte_rand(); 180 enetc_port_wr(enetc_hw, ENETC_PSIPMAR1(0), *mac); 181 print_ethaddr("New address: ", 182 (const struct rte_ether_addr *)hw->mac.addr); 183 } 184 185 return 0; 186 } 187 188 static int 189 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused, 190 struct rte_eth_dev_info *dev_info) 191 { 192 PMD_INIT_FUNC_TRACE(); 193 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 194 .nb_max = MAX_BD_COUNT, 195 .nb_min = MIN_BD_COUNT, 196 .nb_align = BD_ALIGN, 197 }; 198 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 199 .nb_max = MAX_BD_COUNT, 200 .nb_min = MIN_BD_COUNT, 201 .nb_align = BD_ALIGN, 202 }; 203 dev_info->max_rx_queues = MAX_RX_RINGS; 204 dev_info->max_tx_queues = MAX_TX_RINGS; 205 dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE; 206 dev_info->rx_offload_capa = 207 (DEV_RX_OFFLOAD_IPV4_CKSUM | 208 DEV_RX_OFFLOAD_UDP_CKSUM | 209 DEV_RX_OFFLOAD_TCP_CKSUM | 210 DEV_RX_OFFLOAD_KEEP_CRC | 211 DEV_RX_OFFLOAD_JUMBO_FRAME); 212 213 return 0; 214 } 215 216 static int 217 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc) 218 { 219 int size; 220 221 size = nb_desc * sizeof(struct enetc_swbd); 222 txr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN); 223 if (txr->q_swbd == NULL) 224 return -ENOMEM; 225 226 size = nb_desc * sizeof(struct enetc_tx_bd); 227 txr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN); 228 if (txr->bd_base == NULL) { 229 rte_free(txr->q_swbd); 230 txr->q_swbd = NULL; 231 return -ENOMEM; 232 } 233 234 txr->bd_count = nb_desc; 235 txr->next_to_clean = 0; 236 txr->next_to_use = 0; 237 238 return 0; 239 } 240 241 static void 242 enetc_free_bdr(struct enetc_bdr *rxr) 243 { 244 rte_free(rxr->q_swbd); 245 rte_free(rxr->bd_base); 246 rxr->q_swbd = NULL; 247 rxr->bd_base = NULL; 248 } 249 250 static void 251 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 252 { 253 int idx = tx_ring->index; 254 phys_addr_t bd_address; 255 256 bd_address = (phys_addr_t) 257 rte_mem_virt2iova((const void *)tx_ring->bd_base); 258 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0, 259 lower_32_bits((uint64_t)bd_address)); 260 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1, 261 upper_32_bits((uint64_t)bd_address)); 262 enetc_txbdr_wr(hw, idx, ENETC_TBLENR, 263 ENETC_RTBLENR_LEN(tx_ring->bd_count)); 264 265 enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0); 266 enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0); 267 tx_ring->tcir = (void *)((size_t)hw->reg + 268 ENETC_BDR(TX, idx, ENETC_TBCIR)); 269 tx_ring->tcisr = (void *)((size_t)hw->reg + 270 ENETC_BDR(TX, idx, ENETC_TBCISR)); 271 } 272 273 static int 274 enetc_tx_queue_setup(struct rte_eth_dev *dev, 275 uint16_t queue_idx, 276 uint16_t nb_desc, 277 unsigned int socket_id __rte_unused, 278 const struct rte_eth_txconf *tx_conf) 279 { 280 int err = 0; 281 struct enetc_bdr *tx_ring; 282 struct rte_eth_dev_data *data = dev->data; 283 struct enetc_eth_adapter *priv = 284 ENETC_DEV_PRIVATE(data->dev_private); 285 286 PMD_INIT_FUNC_TRACE(); 287 if (nb_desc > MAX_BD_COUNT) 288 return -1; 289 290 tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0); 291 if (tx_ring == NULL) { 292 ENETC_PMD_ERR("Failed to allocate TX ring memory"); 293 err = -ENOMEM; 294 return -1; 295 } 296 297 err = enetc_alloc_txbdr(tx_ring, nb_desc); 298 if (err) 299 goto fail; 300 301 tx_ring->index = queue_idx; 302 tx_ring->ndev = dev; 303 enetc_setup_txbdr(&priv->hw.hw, tx_ring); 304 data->tx_queues[queue_idx] = tx_ring; 305 306 if (!tx_conf->tx_deferred_start) { 307 /* enable ring */ 308 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, 309 ENETC_TBMR, ENETC_TBMR_EN); 310 dev->data->tx_queue_state[tx_ring->index] = 311 RTE_ETH_QUEUE_STATE_STARTED; 312 } else { 313 dev->data->tx_queue_state[tx_ring->index] = 314 RTE_ETH_QUEUE_STATE_STOPPED; 315 } 316 317 return 0; 318 fail: 319 rte_free(tx_ring); 320 321 return err; 322 } 323 324 static void 325 enetc_tx_queue_release(void *txq) 326 { 327 if (txq == NULL) 328 return; 329 330 struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq; 331 struct enetc_eth_hw *eth_hw = 332 ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private); 333 struct enetc_hw *hw; 334 struct enetc_swbd *tx_swbd; 335 int i; 336 uint32_t val; 337 338 /* Disable the ring */ 339 hw = ð_hw->hw; 340 val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR); 341 val &= (~ENETC_TBMR_EN); 342 enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val); 343 344 /* clean the ring*/ 345 i = tx_ring->next_to_clean; 346 tx_swbd = &tx_ring->q_swbd[i]; 347 while (tx_swbd->buffer_addr != NULL) { 348 rte_pktmbuf_free(tx_swbd->buffer_addr); 349 tx_swbd->buffer_addr = NULL; 350 tx_swbd++; 351 i++; 352 if (unlikely(i == tx_ring->bd_count)) { 353 i = 0; 354 tx_swbd = &tx_ring->q_swbd[i]; 355 } 356 } 357 358 enetc_free_bdr(tx_ring); 359 rte_free(tx_ring); 360 } 361 362 static int 363 enetc_alloc_rxbdr(struct enetc_bdr *rxr, 364 uint16_t nb_rx_desc) 365 { 366 int size; 367 368 size = nb_rx_desc * sizeof(struct enetc_swbd); 369 rxr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN); 370 if (rxr->q_swbd == NULL) 371 return -ENOMEM; 372 373 size = nb_rx_desc * sizeof(union enetc_rx_bd); 374 rxr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN); 375 if (rxr->bd_base == NULL) { 376 rte_free(rxr->q_swbd); 377 rxr->q_swbd = NULL; 378 return -ENOMEM; 379 } 380 381 rxr->bd_count = nb_rx_desc; 382 rxr->next_to_clean = 0; 383 rxr->next_to_use = 0; 384 rxr->next_to_alloc = 0; 385 386 return 0; 387 } 388 389 static void 390 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring, 391 struct rte_mempool *mb_pool) 392 { 393 int idx = rx_ring->index; 394 uint16_t buf_size; 395 phys_addr_t bd_address; 396 397 bd_address = (phys_addr_t) 398 rte_mem_virt2iova((const void *)rx_ring->bd_base); 399 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0, 400 lower_32_bits((uint64_t)bd_address)); 401 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1, 402 upper_32_bits((uint64_t)bd_address)); 403 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR, 404 ENETC_RTBLENR_LEN(rx_ring->bd_count)); 405 406 rx_ring->mb_pool = mb_pool; 407 rx_ring->rcir = (void *)((size_t)hw->reg + 408 ENETC_BDR(RX, idx, ENETC_RBCIR)); 409 enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring))); 410 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) - 411 RTE_PKTMBUF_HEADROOM); 412 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size); 413 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); 414 } 415 416 static int 417 enetc_rx_queue_setup(struct rte_eth_dev *dev, 418 uint16_t rx_queue_id, 419 uint16_t nb_rx_desc, 420 unsigned int socket_id __rte_unused, 421 const struct rte_eth_rxconf *rx_conf, 422 struct rte_mempool *mb_pool) 423 { 424 int err = 0; 425 struct enetc_bdr *rx_ring; 426 struct rte_eth_dev_data *data = dev->data; 427 struct enetc_eth_adapter *adapter = 428 ENETC_DEV_PRIVATE(data->dev_private); 429 uint64_t rx_offloads = data->dev_conf.rxmode.offloads; 430 431 PMD_INIT_FUNC_TRACE(); 432 if (nb_rx_desc > MAX_BD_COUNT) 433 return -1; 434 435 rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0); 436 if (rx_ring == NULL) { 437 ENETC_PMD_ERR("Failed to allocate RX ring memory"); 438 err = -ENOMEM; 439 return err; 440 } 441 442 err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc); 443 if (err) 444 goto fail; 445 446 rx_ring->index = rx_queue_id; 447 rx_ring->ndev = dev; 448 enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool); 449 data->rx_queues[rx_queue_id] = rx_ring; 450 451 if (!rx_conf->rx_deferred_start) { 452 /* enable ring */ 453 enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR, 454 ENETC_RBMR_EN); 455 dev->data->rx_queue_state[rx_ring->index] = 456 RTE_ETH_QUEUE_STATE_STARTED; 457 } else { 458 dev->data->rx_queue_state[rx_ring->index] = 459 RTE_ETH_QUEUE_STATE_STOPPED; 460 } 461 462 rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ? 463 RTE_ETHER_CRC_LEN : 0); 464 465 return 0; 466 fail: 467 rte_free(rx_ring); 468 469 return err; 470 } 471 472 static void 473 enetc_rx_queue_release(void *rxq) 474 { 475 if (rxq == NULL) 476 return; 477 478 struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq; 479 struct enetc_eth_hw *eth_hw = 480 ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private); 481 struct enetc_swbd *q_swbd; 482 struct enetc_hw *hw; 483 uint32_t val; 484 int i; 485 486 /* Disable the ring */ 487 hw = ð_hw->hw; 488 val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR); 489 val &= (~ENETC_RBMR_EN); 490 enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val); 491 492 /* Clean the ring */ 493 i = rx_ring->next_to_clean; 494 q_swbd = &rx_ring->q_swbd[i]; 495 while (i != rx_ring->next_to_use) { 496 rte_pktmbuf_free(q_swbd->buffer_addr); 497 q_swbd->buffer_addr = NULL; 498 q_swbd++; 499 i++; 500 if (unlikely(i == rx_ring->bd_count)) { 501 i = 0; 502 q_swbd = &rx_ring->q_swbd[i]; 503 } 504 } 505 506 enetc_free_bdr(rx_ring); 507 rte_free(rx_ring); 508 } 509 510 static 511 int enetc_stats_get(struct rte_eth_dev *dev, 512 struct rte_eth_stats *stats) 513 { 514 struct enetc_eth_hw *hw = 515 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 516 struct enetc_hw *enetc_hw = &hw->hw; 517 518 /* Total received packets, bad + good, if we want to get counters of 519 * only good received packets then use ENETC_PM0_RFRM, 520 * ENETC_PM0_TFRM registers. 521 */ 522 stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT); 523 stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT); 524 stats->ibytes = enetc_port_rd(enetc_hw, ENETC_PM0_REOCT); 525 stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT); 526 /* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without 527 * truncated packets 528 */ 529 stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP); 530 stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR); 531 stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR); 532 533 return 0; 534 } 535 536 static int 537 enetc_stats_reset(struct rte_eth_dev *dev) 538 { 539 struct enetc_eth_hw *hw = 540 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 541 struct enetc_hw *enetc_hw = &hw->hw; 542 543 enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS); 544 545 return 0; 546 } 547 548 static int 549 enetc_dev_close(struct rte_eth_dev *dev) 550 { 551 uint16_t i; 552 553 PMD_INIT_FUNC_TRACE(); 554 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 555 return 0; 556 557 enetc_dev_stop(dev); 558 559 for (i = 0; i < dev->data->nb_rx_queues; i++) { 560 enetc_rx_queue_release(dev->data->rx_queues[i]); 561 dev->data->rx_queues[i] = NULL; 562 } 563 dev->data->nb_rx_queues = 0; 564 565 for (i = 0; i < dev->data->nb_tx_queues; i++) { 566 enetc_tx_queue_release(dev->data->tx_queues[i]); 567 dev->data->tx_queues[i] = NULL; 568 } 569 dev->data->nb_tx_queues = 0; 570 571 if (rte_eal_iova_mode() == RTE_IOVA_PA) 572 dpaax_iova_table_depopulate(); 573 574 return 0; 575 } 576 577 static int 578 enetc_promiscuous_enable(struct rte_eth_dev *dev) 579 { 580 struct enetc_eth_hw *hw = 581 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 582 struct enetc_hw *enetc_hw = &hw->hw; 583 uint32_t psipmr = 0; 584 585 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR); 586 587 /* Setting to enable promiscuous mode*/ 588 psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0); 589 590 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr); 591 592 return 0; 593 } 594 595 static int 596 enetc_promiscuous_disable(struct rte_eth_dev *dev) 597 { 598 struct enetc_eth_hw *hw = 599 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 600 struct enetc_hw *enetc_hw = &hw->hw; 601 uint32_t psipmr = 0; 602 603 /* Setting to disable promiscuous mode for SI0*/ 604 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR); 605 psipmr &= (~ENETC_PSIPMR_SET_UP(0)); 606 607 if (dev->data->all_multicast == 0) 608 psipmr &= (~ENETC_PSIPMR_SET_MP(0)); 609 610 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr); 611 612 return 0; 613 } 614 615 static int 616 enetc_allmulticast_enable(struct rte_eth_dev *dev) 617 { 618 struct enetc_eth_hw *hw = 619 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 620 struct enetc_hw *enetc_hw = &hw->hw; 621 uint32_t psipmr = 0; 622 623 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR); 624 625 /* Setting to enable allmulticast mode for SI0*/ 626 psipmr |= ENETC_PSIPMR_SET_MP(0); 627 628 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr); 629 630 return 0; 631 } 632 633 static int 634 enetc_allmulticast_disable(struct rte_eth_dev *dev) 635 { 636 struct enetc_eth_hw *hw = 637 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 638 struct enetc_hw *enetc_hw = &hw->hw; 639 uint32_t psipmr = 0; 640 641 if (dev->data->promiscuous == 1) 642 return 0; /* must remain in all_multicast mode */ 643 644 /* Setting to disable all multicast mode for SI0*/ 645 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) & 646 ~(ENETC_PSIPMR_SET_MP(0)); 647 648 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr); 649 650 return 0; 651 } 652 653 static int 654 enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 655 { 656 struct enetc_eth_hw *hw = 657 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 658 struct enetc_hw *enetc_hw = &hw->hw; 659 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 660 661 /* check that mtu is within the allowed range */ 662 if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE) 663 return -EINVAL; 664 665 /* 666 * Refuse mtu that requires the support of scattered packets 667 * when this feature has not been enabled before. 668 */ 669 if (dev->data->min_rx_buf_size && 670 !dev->data->scattered_rx && frame_size > 671 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 672 ENETC_PMD_ERR("SG not enabled, will not fit in one buffer"); 673 return -EINVAL; 674 } 675 676 if (frame_size > RTE_ETHER_MAX_LEN) 677 dev->data->dev_conf.rxmode.offloads &= 678 DEV_RX_OFFLOAD_JUMBO_FRAME; 679 else 680 dev->data->dev_conf.rxmode.offloads &= 681 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 682 683 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE); 684 enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE); 685 686 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 687 688 /*setting the MTU*/ 689 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) | 690 ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE)); 691 692 return 0; 693 } 694 695 static int 696 enetc_dev_configure(struct rte_eth_dev *dev) 697 { 698 struct enetc_eth_hw *hw = 699 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 700 struct enetc_hw *enetc_hw = &hw->hw; 701 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 702 uint64_t rx_offloads = eth_conf->rxmode.offloads; 703 uint32_t checksum = L3_CKSUM | L4_CKSUM; 704 705 PMD_INIT_FUNC_TRACE(); 706 707 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 708 uint32_t max_len; 709 710 max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; 711 712 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, 713 ENETC_SET_MAXFRM(max_len)); 714 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), 715 ENETC_MAC_MAXFRM_SIZE); 716 enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 717 2 * ENETC_MAC_MAXFRM_SIZE); 718 dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN - 719 RTE_ETHER_CRC_LEN; 720 } 721 722 if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) { 723 int config; 724 725 config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG); 726 config |= ENETC_PM0_CRC; 727 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config); 728 } 729 730 if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) 731 checksum &= ~L3_CKSUM; 732 733 if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM)) 734 checksum &= ~L4_CKSUM; 735 736 enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum); 737 738 739 return 0; 740 } 741 742 static int 743 enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 744 { 745 struct enetc_eth_adapter *priv = 746 ENETC_DEV_PRIVATE(dev->data->dev_private); 747 struct enetc_bdr *rx_ring; 748 uint32_t rx_data; 749 750 rx_ring = dev->data->rx_queues[qidx]; 751 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) { 752 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index, 753 ENETC_RBMR); 754 rx_data = rx_data | ENETC_RBMR_EN; 755 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR, 756 rx_data); 757 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; 758 } 759 760 return 0; 761 } 762 763 static int 764 enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 765 { 766 struct enetc_eth_adapter *priv = 767 ENETC_DEV_PRIVATE(dev->data->dev_private); 768 struct enetc_bdr *rx_ring; 769 uint32_t rx_data; 770 771 rx_ring = dev->data->rx_queues[qidx]; 772 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) { 773 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index, 774 ENETC_RBMR); 775 rx_data = rx_data & (~ENETC_RBMR_EN); 776 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR, 777 rx_data); 778 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 779 } 780 781 return 0; 782 } 783 784 static int 785 enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 786 { 787 struct enetc_eth_adapter *priv = 788 ENETC_DEV_PRIVATE(dev->data->dev_private); 789 struct enetc_bdr *tx_ring; 790 uint32_t tx_data; 791 792 tx_ring = dev->data->tx_queues[qidx]; 793 if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) { 794 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index, 795 ENETC_TBMR); 796 tx_data = tx_data | ENETC_TBMR_EN; 797 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR, 798 tx_data); 799 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; 800 } 801 802 return 0; 803 } 804 805 static int 806 enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 807 { 808 struct enetc_eth_adapter *priv = 809 ENETC_DEV_PRIVATE(dev->data->dev_private); 810 struct enetc_bdr *tx_ring; 811 uint32_t tx_data; 812 813 tx_ring = dev->data->tx_queues[qidx]; 814 if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) { 815 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index, 816 ENETC_TBMR); 817 tx_data = tx_data & (~ENETC_TBMR_EN); 818 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR, 819 tx_data); 820 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 821 } 822 823 return 0; 824 } 825 826 /* 827 * The set of PCI devices this driver supports 828 */ 829 static const struct rte_pci_id pci_id_enetc_map[] = { 830 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) }, 831 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) }, 832 { .vendor_id = 0, /* sentinel */ }, 833 }; 834 835 /* Features supported by this driver */ 836 static const struct eth_dev_ops enetc_ops = { 837 .dev_configure = enetc_dev_configure, 838 .dev_start = enetc_dev_start, 839 .dev_stop = enetc_dev_stop, 840 .dev_close = enetc_dev_close, 841 .link_update = enetc_link_update, 842 .stats_get = enetc_stats_get, 843 .stats_reset = enetc_stats_reset, 844 .promiscuous_enable = enetc_promiscuous_enable, 845 .promiscuous_disable = enetc_promiscuous_disable, 846 .allmulticast_enable = enetc_allmulticast_enable, 847 .allmulticast_disable = enetc_allmulticast_disable, 848 .dev_infos_get = enetc_dev_infos_get, 849 .mtu_set = enetc_mtu_set, 850 .rx_queue_setup = enetc_rx_queue_setup, 851 .rx_queue_start = enetc_rx_queue_start, 852 .rx_queue_stop = enetc_rx_queue_stop, 853 .rx_queue_release = enetc_rx_queue_release, 854 .tx_queue_setup = enetc_tx_queue_setup, 855 .tx_queue_start = enetc_tx_queue_start, 856 .tx_queue_stop = enetc_tx_queue_stop, 857 .tx_queue_release = enetc_tx_queue_release, 858 .dev_supported_ptypes_get = enetc_supported_ptypes_get, 859 }; 860 861 /** 862 * Initialisation of the enetc device 863 * 864 * @param eth_dev 865 * - Pointer to the structure rte_eth_dev 866 * 867 * @return 868 * - On success, zero. 869 * - On failure, negative value. 870 */ 871 static int 872 enetc_dev_init(struct rte_eth_dev *eth_dev) 873 { 874 int error = 0; 875 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 876 struct enetc_eth_hw *hw = 877 ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 878 879 PMD_INIT_FUNC_TRACE(); 880 eth_dev->dev_ops = &enetc_ops; 881 eth_dev->rx_pkt_burst = &enetc_recv_pkts; 882 eth_dev->tx_pkt_burst = &enetc_xmit_pkts; 883 884 /* Retrieving and storing the HW base address of device */ 885 hw->hw.reg = (void *)pci_dev->mem_resource[0].addr; 886 hw->device_id = pci_dev->id.device_id; 887 888 error = enetc_hardware_init(hw); 889 if (error != 0) { 890 ENETC_PMD_ERR("Hardware initialization failed"); 891 return -1; 892 } 893 894 /* Allocate memory for storing MAC addresses */ 895 eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth", 896 RTE_ETHER_ADDR_LEN, 0); 897 if (!eth_dev->data->mac_addrs) { 898 ENETC_PMD_ERR("Failed to allocate %d bytes needed to " 899 "store MAC addresses", 900 RTE_ETHER_ADDR_LEN * 1); 901 error = -ENOMEM; 902 return -1; 903 } 904 905 /* Copy the permanent MAC address */ 906 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, 907 ð_dev->data->mac_addrs[0]); 908 909 /* Set MTU */ 910 enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM, 911 ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN)); 912 eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN - 913 RTE_ETHER_CRC_LEN; 914 915 if (rte_eal_iova_mode() == RTE_IOVA_PA) 916 dpaax_iova_table_populate(); 917 918 ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x", 919 eth_dev->data->port_id, pci_dev->id.vendor_id, 920 pci_dev->id.device_id); 921 return 0; 922 } 923 924 static int 925 enetc_dev_uninit(struct rte_eth_dev *eth_dev) 926 { 927 PMD_INIT_FUNC_TRACE(); 928 929 return enetc_dev_close(eth_dev); 930 } 931 932 static int 933 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 934 struct rte_pci_device *pci_dev) 935 { 936 return rte_eth_dev_pci_generic_probe(pci_dev, 937 sizeof(struct enetc_eth_adapter), 938 enetc_dev_init); 939 } 940 941 static int 942 enetc_pci_remove(struct rte_pci_device *pci_dev) 943 { 944 return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit); 945 } 946 947 static struct rte_pci_driver rte_enetc_pmd = { 948 .id_table = pci_id_enetc_map, 949 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 950 .probe = enetc_pci_probe, 951 .remove = enetc_pci_remove, 952 }; 953 954 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd); 955 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map); 956 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci"); 957 RTE_LOG_REGISTER(enetc_logtype_pmd, pmd.net.enetc, NOTICE); 958