1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018-2020 NXP 3 */ 4 5 #include <stdbool.h> 6 #include <ethdev_pci.h> 7 #include <rte_random.h> 8 #include <dpaax_iova_table.h> 9 10 #include "enetc_logs.h" 11 #include "enetc.h" 12 13 static int 14 enetc_dev_start(struct rte_eth_dev *dev) 15 { 16 struct enetc_eth_hw *hw = 17 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 18 struct enetc_hw *enetc_hw = &hw->hw; 19 uint32_t val; 20 21 PMD_INIT_FUNC_TRACE(); 22 val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG); 23 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, 24 val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN); 25 26 /* Enable port */ 27 val = enetc_port_rd(enetc_hw, ENETC_PMR); 28 enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN); 29 30 /* set auto-speed for RGMII */ 31 if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) { 32 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE, 33 ENETC_PM0_IFM_RGAUTO); 34 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE, 35 ENETC_PM0_IFM_RGAUTO); 36 } 37 if (enetc_global_rd(enetc_hw, 38 ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) { 39 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE, 40 ENETC_PM0_IFM_XGMII); 41 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE, 42 ENETC_PM0_IFM_XGMII); 43 } 44 45 return 0; 46 } 47 48 static int 49 enetc_dev_stop(struct rte_eth_dev *dev) 50 { 51 struct enetc_eth_hw *hw = 52 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 53 struct enetc_hw *enetc_hw = &hw->hw; 54 uint32_t val; 55 56 PMD_INIT_FUNC_TRACE(); 57 dev->data->dev_started = 0; 58 /* Disable port */ 59 val = enetc_port_rd(enetc_hw, ENETC_PMR); 60 enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN)); 61 62 val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG); 63 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, 64 val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN))); 65 66 return 0; 67 } 68 69 static const uint32_t * 70 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) 71 { 72 static const uint32_t ptypes[] = { 73 RTE_PTYPE_L2_ETHER, 74 RTE_PTYPE_L3_IPV4, 75 RTE_PTYPE_L3_IPV6, 76 RTE_PTYPE_L4_TCP, 77 RTE_PTYPE_L4_UDP, 78 RTE_PTYPE_L4_SCTP, 79 RTE_PTYPE_L4_ICMP, 80 RTE_PTYPE_UNKNOWN 81 }; 82 83 return ptypes; 84 } 85 86 /* return 0 means link status changed, -1 means not changed */ 87 static int 88 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) 89 { 90 struct enetc_eth_hw *hw = 91 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 92 struct enetc_hw *enetc_hw = &hw->hw; 93 struct rte_eth_link link; 94 uint32_t status; 95 96 PMD_INIT_FUNC_TRACE(); 97 98 memset(&link, 0, sizeof(link)); 99 100 status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS); 101 102 if (status & ENETC_LINK_MODE) 103 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 104 else 105 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 106 107 if (status & ENETC_LINK_STATUS) 108 link.link_status = RTE_ETH_LINK_UP; 109 else 110 link.link_status = RTE_ETH_LINK_DOWN; 111 112 switch (status & ENETC_LINK_SPEED_MASK) { 113 case ENETC_LINK_SPEED_1G: 114 link.link_speed = RTE_ETH_SPEED_NUM_1G; 115 break; 116 117 case ENETC_LINK_SPEED_100M: 118 link.link_speed = RTE_ETH_SPEED_NUM_100M; 119 break; 120 121 default: 122 case ENETC_LINK_SPEED_10M: 123 link.link_speed = RTE_ETH_SPEED_NUM_10M; 124 } 125 126 return rte_eth_linkstatus_set(dev, &link); 127 } 128 129 static void 130 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr) 131 { 132 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 133 134 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 135 ENETC_PMD_NOTICE("%s%s\n", name, buf); 136 } 137 138 static int 139 enetc_hardware_init(struct enetc_eth_hw *hw) 140 { 141 struct enetc_hw *enetc_hw = &hw->hw; 142 uint32_t *mac = (uint32_t *)hw->mac.addr; 143 uint32_t high_mac = 0; 144 uint16_t low_mac = 0; 145 146 PMD_INIT_FUNC_TRACE(); 147 /* Calculating and storing the base HW addresses */ 148 hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE); 149 hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE); 150 151 /* WA for Rx lock-up HW erratum */ 152 enetc_port_wr(enetc_hw, ENETC_PM0_RX_FIFO, 1); 153 154 /* set ENETC transaction flags to coherent, don't allocate. 155 * BD writes merge with surrounding cache line data, frame data writes 156 * overwrite cache line. 157 */ 158 enetc_wr(enetc_hw, ENETC_SICAR0, ENETC_SICAR0_COHERENT); 159 160 /* Enabling Station Interface */ 161 enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN); 162 163 *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0)); 164 high_mac = (uint32_t)*mac; 165 mac++; 166 *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0)); 167 low_mac = (uint16_t)*mac; 168 169 if ((high_mac | low_mac) == 0) { 170 char *first_byte; 171 172 ENETC_PMD_NOTICE("MAC is not available for this SI, " 173 "set random MAC\n"); 174 mac = (uint32_t *)hw->mac.addr; 175 *mac = (uint32_t)rte_rand(); 176 first_byte = (char *)mac; 177 *first_byte &= 0xfe; /* clear multicast bit */ 178 *first_byte |= 0x02; /* set local assignment bit (IEEE802) */ 179 180 enetc_port_wr(enetc_hw, ENETC_PSIPMAR0(0), *mac); 181 mac++; 182 *mac = (uint16_t)rte_rand(); 183 enetc_port_wr(enetc_hw, ENETC_PSIPMAR1(0), *mac); 184 print_ethaddr("New address: ", 185 (const struct rte_ether_addr *)hw->mac.addr); 186 } 187 188 return 0; 189 } 190 191 static int 192 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused, 193 struct rte_eth_dev_info *dev_info) 194 { 195 PMD_INIT_FUNC_TRACE(); 196 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 197 .nb_max = MAX_BD_COUNT, 198 .nb_min = MIN_BD_COUNT, 199 .nb_align = BD_ALIGN, 200 }; 201 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 202 .nb_max = MAX_BD_COUNT, 203 .nb_min = MIN_BD_COUNT, 204 .nb_align = BD_ALIGN, 205 }; 206 dev_info->max_rx_queues = MAX_RX_RINGS; 207 dev_info->max_tx_queues = MAX_TX_RINGS; 208 dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE; 209 dev_info->rx_offload_capa = 210 (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 211 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 212 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 213 RTE_ETH_RX_OFFLOAD_KEEP_CRC); 214 215 return 0; 216 } 217 218 static int 219 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc) 220 { 221 int size; 222 223 size = nb_desc * sizeof(struct enetc_swbd); 224 txr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN); 225 if (txr->q_swbd == NULL) 226 return -ENOMEM; 227 228 size = nb_desc * sizeof(struct enetc_tx_bd); 229 txr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN); 230 if (txr->bd_base == NULL) { 231 rte_free(txr->q_swbd); 232 txr->q_swbd = NULL; 233 return -ENOMEM; 234 } 235 236 txr->bd_count = nb_desc; 237 txr->next_to_clean = 0; 238 txr->next_to_use = 0; 239 240 return 0; 241 } 242 243 static void 244 enetc_free_bdr(struct enetc_bdr *rxr) 245 { 246 rte_free(rxr->q_swbd); 247 rte_free(rxr->bd_base); 248 rxr->q_swbd = NULL; 249 rxr->bd_base = NULL; 250 } 251 252 static void 253 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 254 { 255 int idx = tx_ring->index; 256 phys_addr_t bd_address; 257 258 bd_address = (phys_addr_t) 259 rte_mem_virt2iova((const void *)tx_ring->bd_base); 260 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0, 261 lower_32_bits((uint64_t)bd_address)); 262 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1, 263 upper_32_bits((uint64_t)bd_address)); 264 enetc_txbdr_wr(hw, idx, ENETC_TBLENR, 265 ENETC_RTBLENR_LEN(tx_ring->bd_count)); 266 267 enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0); 268 enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0); 269 tx_ring->tcir = (void *)((size_t)hw->reg + 270 ENETC_BDR(TX, idx, ENETC_TBCIR)); 271 tx_ring->tcisr = (void *)((size_t)hw->reg + 272 ENETC_BDR(TX, idx, ENETC_TBCISR)); 273 } 274 275 static int 276 enetc_tx_queue_setup(struct rte_eth_dev *dev, 277 uint16_t queue_idx, 278 uint16_t nb_desc, 279 unsigned int socket_id __rte_unused, 280 const struct rte_eth_txconf *tx_conf) 281 { 282 int err = 0; 283 struct enetc_bdr *tx_ring; 284 struct rte_eth_dev_data *data = dev->data; 285 struct enetc_eth_adapter *priv = 286 ENETC_DEV_PRIVATE(data->dev_private); 287 288 PMD_INIT_FUNC_TRACE(); 289 if (nb_desc > MAX_BD_COUNT) 290 return -1; 291 292 tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0); 293 if (tx_ring == NULL) { 294 ENETC_PMD_ERR("Failed to allocate TX ring memory"); 295 err = -ENOMEM; 296 return -1; 297 } 298 299 err = enetc_alloc_txbdr(tx_ring, nb_desc); 300 if (err) 301 goto fail; 302 303 tx_ring->index = queue_idx; 304 tx_ring->ndev = dev; 305 enetc_setup_txbdr(&priv->hw.hw, tx_ring); 306 data->tx_queues[queue_idx] = tx_ring; 307 308 if (!tx_conf->tx_deferred_start) { 309 /* enable ring */ 310 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, 311 ENETC_TBMR, ENETC_TBMR_EN); 312 dev->data->tx_queue_state[tx_ring->index] = 313 RTE_ETH_QUEUE_STATE_STARTED; 314 } else { 315 dev->data->tx_queue_state[tx_ring->index] = 316 RTE_ETH_QUEUE_STATE_STOPPED; 317 } 318 319 return 0; 320 fail: 321 rte_free(tx_ring); 322 323 return err; 324 } 325 326 static void 327 enetc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 328 { 329 void *txq = dev->data->tx_queues[qid]; 330 331 if (txq == NULL) 332 return; 333 334 struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq; 335 struct enetc_eth_hw *eth_hw = 336 ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private); 337 struct enetc_hw *hw; 338 struct enetc_swbd *tx_swbd; 339 int i; 340 uint32_t val; 341 342 /* Disable the ring */ 343 hw = ð_hw->hw; 344 val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR); 345 val &= (~ENETC_TBMR_EN); 346 enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val); 347 348 /* clean the ring*/ 349 i = tx_ring->next_to_clean; 350 tx_swbd = &tx_ring->q_swbd[i]; 351 while (tx_swbd->buffer_addr != NULL) { 352 rte_pktmbuf_free(tx_swbd->buffer_addr); 353 tx_swbd->buffer_addr = NULL; 354 tx_swbd++; 355 i++; 356 if (unlikely(i == tx_ring->bd_count)) { 357 i = 0; 358 tx_swbd = &tx_ring->q_swbd[i]; 359 } 360 } 361 362 enetc_free_bdr(tx_ring); 363 rte_free(tx_ring); 364 } 365 366 static int 367 enetc_alloc_rxbdr(struct enetc_bdr *rxr, 368 uint16_t nb_rx_desc) 369 { 370 int size; 371 372 size = nb_rx_desc * sizeof(struct enetc_swbd); 373 rxr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN); 374 if (rxr->q_swbd == NULL) 375 return -ENOMEM; 376 377 size = nb_rx_desc * sizeof(union enetc_rx_bd); 378 rxr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN); 379 if (rxr->bd_base == NULL) { 380 rte_free(rxr->q_swbd); 381 rxr->q_swbd = NULL; 382 return -ENOMEM; 383 } 384 385 rxr->bd_count = nb_rx_desc; 386 rxr->next_to_clean = 0; 387 rxr->next_to_use = 0; 388 rxr->next_to_alloc = 0; 389 390 return 0; 391 } 392 393 static void 394 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring, 395 struct rte_mempool *mb_pool) 396 { 397 int idx = rx_ring->index; 398 uint16_t buf_size; 399 phys_addr_t bd_address; 400 401 bd_address = (phys_addr_t) 402 rte_mem_virt2iova((const void *)rx_ring->bd_base); 403 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0, 404 lower_32_bits((uint64_t)bd_address)); 405 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1, 406 upper_32_bits((uint64_t)bd_address)); 407 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR, 408 ENETC_RTBLENR_LEN(rx_ring->bd_count)); 409 410 rx_ring->mb_pool = mb_pool; 411 rx_ring->rcir = (void *)((size_t)hw->reg + 412 ENETC_BDR(RX, idx, ENETC_RBCIR)); 413 enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring))); 414 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) - 415 RTE_PKTMBUF_HEADROOM); 416 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size); 417 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); 418 } 419 420 static int 421 enetc_rx_queue_setup(struct rte_eth_dev *dev, 422 uint16_t rx_queue_id, 423 uint16_t nb_rx_desc, 424 unsigned int socket_id __rte_unused, 425 const struct rte_eth_rxconf *rx_conf, 426 struct rte_mempool *mb_pool) 427 { 428 int err = 0; 429 struct enetc_bdr *rx_ring; 430 struct rte_eth_dev_data *data = dev->data; 431 struct enetc_eth_adapter *adapter = 432 ENETC_DEV_PRIVATE(data->dev_private); 433 uint64_t rx_offloads = data->dev_conf.rxmode.offloads; 434 435 PMD_INIT_FUNC_TRACE(); 436 if (nb_rx_desc > MAX_BD_COUNT) 437 return -1; 438 439 rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0); 440 if (rx_ring == NULL) { 441 ENETC_PMD_ERR("Failed to allocate RX ring memory"); 442 err = -ENOMEM; 443 return err; 444 } 445 446 err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc); 447 if (err) 448 goto fail; 449 450 rx_ring->index = rx_queue_id; 451 rx_ring->ndev = dev; 452 enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool); 453 data->rx_queues[rx_queue_id] = rx_ring; 454 455 if (!rx_conf->rx_deferred_start) { 456 /* enable ring */ 457 enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR, 458 ENETC_RBMR_EN); 459 dev->data->rx_queue_state[rx_ring->index] = 460 RTE_ETH_QUEUE_STATE_STARTED; 461 } else { 462 dev->data->rx_queue_state[rx_ring->index] = 463 RTE_ETH_QUEUE_STATE_STOPPED; 464 } 465 466 rx_ring->crc_len = (uint8_t)((rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 467 RTE_ETHER_CRC_LEN : 0); 468 469 return 0; 470 fail: 471 rte_free(rx_ring); 472 473 return err; 474 } 475 476 static void 477 enetc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 478 { 479 void *rxq = dev->data->rx_queues[qid]; 480 481 if (rxq == NULL) 482 return; 483 484 struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq; 485 struct enetc_eth_hw *eth_hw = 486 ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private); 487 struct enetc_swbd *q_swbd; 488 struct enetc_hw *hw; 489 uint32_t val; 490 int i; 491 492 /* Disable the ring */ 493 hw = ð_hw->hw; 494 val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR); 495 val &= (~ENETC_RBMR_EN); 496 enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val); 497 498 /* Clean the ring */ 499 i = rx_ring->next_to_clean; 500 q_swbd = &rx_ring->q_swbd[i]; 501 while (i != rx_ring->next_to_use) { 502 rte_pktmbuf_free(q_swbd->buffer_addr); 503 q_swbd->buffer_addr = NULL; 504 q_swbd++; 505 i++; 506 if (unlikely(i == rx_ring->bd_count)) { 507 i = 0; 508 q_swbd = &rx_ring->q_swbd[i]; 509 } 510 } 511 512 enetc_free_bdr(rx_ring); 513 rte_free(rx_ring); 514 } 515 516 static 517 int enetc_stats_get(struct rte_eth_dev *dev, 518 struct rte_eth_stats *stats) 519 { 520 struct enetc_eth_hw *hw = 521 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 522 struct enetc_hw *enetc_hw = &hw->hw; 523 524 /* Total received packets, bad + good, if we want to get counters of 525 * only good received packets then use ENETC_PM0_RFRM, 526 * ENETC_PM0_TFRM registers. 527 */ 528 stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT); 529 stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT); 530 stats->ibytes = enetc_port_rd(enetc_hw, ENETC_PM0_REOCT); 531 stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT); 532 /* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without 533 * truncated packets 534 */ 535 stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP); 536 stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR); 537 stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR); 538 539 return 0; 540 } 541 542 static int 543 enetc_stats_reset(struct rte_eth_dev *dev) 544 { 545 struct enetc_eth_hw *hw = 546 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 547 struct enetc_hw *enetc_hw = &hw->hw; 548 549 enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS); 550 551 return 0; 552 } 553 554 static int 555 enetc_dev_close(struct rte_eth_dev *dev) 556 { 557 uint16_t i; 558 int ret; 559 560 PMD_INIT_FUNC_TRACE(); 561 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 562 return 0; 563 564 ret = enetc_dev_stop(dev); 565 566 for (i = 0; i < dev->data->nb_rx_queues; i++) { 567 enetc_rx_queue_release(dev, i); 568 dev->data->rx_queues[i] = NULL; 569 } 570 dev->data->nb_rx_queues = 0; 571 572 for (i = 0; i < dev->data->nb_tx_queues; i++) { 573 enetc_tx_queue_release(dev, i); 574 dev->data->tx_queues[i] = NULL; 575 } 576 dev->data->nb_tx_queues = 0; 577 578 if (rte_eal_iova_mode() == RTE_IOVA_PA) 579 dpaax_iova_table_depopulate(); 580 581 return ret; 582 } 583 584 static int 585 enetc_promiscuous_enable(struct rte_eth_dev *dev) 586 { 587 struct enetc_eth_hw *hw = 588 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 589 struct enetc_hw *enetc_hw = &hw->hw; 590 uint32_t psipmr = 0; 591 592 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR); 593 594 /* Setting to enable promiscuous mode*/ 595 psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0); 596 597 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr); 598 599 return 0; 600 } 601 602 static int 603 enetc_promiscuous_disable(struct rte_eth_dev *dev) 604 { 605 struct enetc_eth_hw *hw = 606 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 607 struct enetc_hw *enetc_hw = &hw->hw; 608 uint32_t psipmr = 0; 609 610 /* Setting to disable promiscuous mode for SI0*/ 611 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR); 612 psipmr &= (~ENETC_PSIPMR_SET_UP(0)); 613 614 if (dev->data->all_multicast == 0) 615 psipmr &= (~ENETC_PSIPMR_SET_MP(0)); 616 617 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr); 618 619 return 0; 620 } 621 622 static int 623 enetc_allmulticast_enable(struct rte_eth_dev *dev) 624 { 625 struct enetc_eth_hw *hw = 626 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 627 struct enetc_hw *enetc_hw = &hw->hw; 628 uint32_t psipmr = 0; 629 630 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR); 631 632 /* Setting to enable allmulticast mode for SI0*/ 633 psipmr |= ENETC_PSIPMR_SET_MP(0); 634 635 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr); 636 637 return 0; 638 } 639 640 static int 641 enetc_allmulticast_disable(struct rte_eth_dev *dev) 642 { 643 struct enetc_eth_hw *hw = 644 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 645 struct enetc_hw *enetc_hw = &hw->hw; 646 uint32_t psipmr = 0; 647 648 if (dev->data->promiscuous == 1) 649 return 0; /* must remain in all_multicast mode */ 650 651 /* Setting to disable all multicast mode for SI0*/ 652 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) & 653 ~(ENETC_PSIPMR_SET_MP(0)); 654 655 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr); 656 657 return 0; 658 } 659 660 static int 661 enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 662 { 663 struct enetc_eth_hw *hw = 664 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 665 struct enetc_hw *enetc_hw = &hw->hw; 666 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 667 668 /* 669 * Refuse mtu that requires the support of scattered packets 670 * when this feature has not been enabled before. 671 */ 672 if (dev->data->min_rx_buf_size && 673 !dev->data->scattered_rx && frame_size > 674 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 675 ENETC_PMD_ERR("SG not enabled, will not fit in one buffer"); 676 return -EINVAL; 677 } 678 679 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE); 680 enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE); 681 682 /*setting the MTU*/ 683 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) | 684 ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE)); 685 686 return 0; 687 } 688 689 static int 690 enetc_dev_configure(struct rte_eth_dev *dev) 691 { 692 struct enetc_eth_hw *hw = 693 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); 694 struct enetc_hw *enetc_hw = &hw->hw; 695 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 696 uint64_t rx_offloads = eth_conf->rxmode.offloads; 697 uint32_t checksum = L3_CKSUM | L4_CKSUM; 698 uint32_t max_len; 699 700 PMD_INIT_FUNC_TRACE(); 701 702 max_len = dev->data->dev_conf.rxmode.mtu + RTE_ETHER_HDR_LEN + 703 RTE_ETHER_CRC_LEN; 704 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(max_len)); 705 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE); 706 enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE); 707 708 if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) { 709 int config; 710 711 config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG); 712 config |= ENETC_PM0_CRC; 713 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config); 714 } 715 716 if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) 717 checksum &= ~L3_CKSUM; 718 719 if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM)) 720 checksum &= ~L4_CKSUM; 721 722 enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum); 723 724 725 return 0; 726 } 727 728 static int 729 enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 730 { 731 struct enetc_eth_adapter *priv = 732 ENETC_DEV_PRIVATE(dev->data->dev_private); 733 struct enetc_bdr *rx_ring; 734 uint32_t rx_data; 735 736 rx_ring = dev->data->rx_queues[qidx]; 737 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) { 738 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index, 739 ENETC_RBMR); 740 rx_data = rx_data | ENETC_RBMR_EN; 741 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR, 742 rx_data); 743 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; 744 } 745 746 return 0; 747 } 748 749 static int 750 enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 751 { 752 struct enetc_eth_adapter *priv = 753 ENETC_DEV_PRIVATE(dev->data->dev_private); 754 struct enetc_bdr *rx_ring; 755 uint32_t rx_data; 756 757 rx_ring = dev->data->rx_queues[qidx]; 758 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) { 759 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index, 760 ENETC_RBMR); 761 rx_data = rx_data & (~ENETC_RBMR_EN); 762 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR, 763 rx_data); 764 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 765 } 766 767 return 0; 768 } 769 770 static int 771 enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 772 { 773 struct enetc_eth_adapter *priv = 774 ENETC_DEV_PRIVATE(dev->data->dev_private); 775 struct enetc_bdr *tx_ring; 776 uint32_t tx_data; 777 778 tx_ring = dev->data->tx_queues[qidx]; 779 if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) { 780 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index, 781 ENETC_TBMR); 782 tx_data = tx_data | ENETC_TBMR_EN; 783 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR, 784 tx_data); 785 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; 786 } 787 788 return 0; 789 } 790 791 static int 792 enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 793 { 794 struct enetc_eth_adapter *priv = 795 ENETC_DEV_PRIVATE(dev->data->dev_private); 796 struct enetc_bdr *tx_ring; 797 uint32_t tx_data; 798 799 tx_ring = dev->data->tx_queues[qidx]; 800 if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) { 801 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index, 802 ENETC_TBMR); 803 tx_data = tx_data & (~ENETC_TBMR_EN); 804 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR, 805 tx_data); 806 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 807 } 808 809 return 0; 810 } 811 812 /* 813 * The set of PCI devices this driver supports 814 */ 815 static const struct rte_pci_id pci_id_enetc_map[] = { 816 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) }, 817 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) }, 818 { .vendor_id = 0, /* sentinel */ }, 819 }; 820 821 /* Features supported by this driver */ 822 static const struct eth_dev_ops enetc_ops = { 823 .dev_configure = enetc_dev_configure, 824 .dev_start = enetc_dev_start, 825 .dev_stop = enetc_dev_stop, 826 .dev_close = enetc_dev_close, 827 .link_update = enetc_link_update, 828 .stats_get = enetc_stats_get, 829 .stats_reset = enetc_stats_reset, 830 .promiscuous_enable = enetc_promiscuous_enable, 831 .promiscuous_disable = enetc_promiscuous_disable, 832 .allmulticast_enable = enetc_allmulticast_enable, 833 .allmulticast_disable = enetc_allmulticast_disable, 834 .dev_infos_get = enetc_dev_infos_get, 835 .mtu_set = enetc_mtu_set, 836 .rx_queue_setup = enetc_rx_queue_setup, 837 .rx_queue_start = enetc_rx_queue_start, 838 .rx_queue_stop = enetc_rx_queue_stop, 839 .rx_queue_release = enetc_rx_queue_release, 840 .tx_queue_setup = enetc_tx_queue_setup, 841 .tx_queue_start = enetc_tx_queue_start, 842 .tx_queue_stop = enetc_tx_queue_stop, 843 .tx_queue_release = enetc_tx_queue_release, 844 .dev_supported_ptypes_get = enetc_supported_ptypes_get, 845 }; 846 847 /** 848 * Initialisation of the enetc device 849 * 850 * @param eth_dev 851 * - Pointer to the structure rte_eth_dev 852 * 853 * @return 854 * - On success, zero. 855 * - On failure, negative value. 856 */ 857 static int 858 enetc_dev_init(struct rte_eth_dev *eth_dev) 859 { 860 int error = 0; 861 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 862 struct enetc_eth_hw *hw = 863 ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 864 865 PMD_INIT_FUNC_TRACE(); 866 eth_dev->dev_ops = &enetc_ops; 867 eth_dev->rx_pkt_burst = &enetc_recv_pkts; 868 eth_dev->tx_pkt_burst = &enetc_xmit_pkts; 869 870 /* Retrieving and storing the HW base address of device */ 871 hw->hw.reg = (void *)pci_dev->mem_resource[0].addr; 872 hw->device_id = pci_dev->id.device_id; 873 874 error = enetc_hardware_init(hw); 875 if (error != 0) { 876 ENETC_PMD_ERR("Hardware initialization failed"); 877 return -1; 878 } 879 880 /* Allocate memory for storing MAC addresses */ 881 eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth", 882 RTE_ETHER_ADDR_LEN, 0); 883 if (!eth_dev->data->mac_addrs) { 884 ENETC_PMD_ERR("Failed to allocate %d bytes needed to " 885 "store MAC addresses", 886 RTE_ETHER_ADDR_LEN * 1); 887 error = -ENOMEM; 888 return -1; 889 } 890 891 /* Copy the permanent MAC address */ 892 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, 893 ð_dev->data->mac_addrs[0]); 894 895 /* Set MTU */ 896 enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM, 897 ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN)); 898 eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN - 899 RTE_ETHER_CRC_LEN; 900 901 if (rte_eal_iova_mode() == RTE_IOVA_PA) 902 dpaax_iova_table_populate(); 903 904 ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x", 905 eth_dev->data->port_id, pci_dev->id.vendor_id, 906 pci_dev->id.device_id); 907 return 0; 908 } 909 910 static int 911 enetc_dev_uninit(struct rte_eth_dev *eth_dev) 912 { 913 PMD_INIT_FUNC_TRACE(); 914 915 return enetc_dev_close(eth_dev); 916 } 917 918 static int 919 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 920 struct rte_pci_device *pci_dev) 921 { 922 return rte_eth_dev_pci_generic_probe(pci_dev, 923 sizeof(struct enetc_eth_adapter), 924 enetc_dev_init); 925 } 926 927 static int 928 enetc_pci_remove(struct rte_pci_device *pci_dev) 929 { 930 return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit); 931 } 932 933 static struct rte_pci_driver rte_enetc_pmd = { 934 .id_table = pci_id_enetc_map, 935 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 936 .probe = enetc_pci_probe, 937 .remove = enetc_pci_remove, 938 }; 939 940 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd); 941 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map); 942 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci"); 943 RTE_LOG_REGISTER_DEFAULT(enetc_logtype_pmd, NOTICE); 944