1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd. 3 * Copyright(c) 2010-2017 Intel Corporation 4 */ 5 6 #include <sys/queue.h> 7 8 #include <stdint.h> 9 #include <rte_ethdev.h> 10 #include <ethdev_driver.h> 11 #include <rte_malloc.h> 12 13 #include "ngbe_logs.h" 14 #include "base/ngbe.h" 15 #include "ngbe_ethdev.h" 16 #include "ngbe_rxtx.h" 17 18 /********************************************************************* 19 * 20 * Queue management functions 21 * 22 **********************************************************************/ 23 24 static void 25 ngbe_tx_queue_release_mbufs(struct ngbe_tx_queue *txq) 26 { 27 unsigned int i; 28 29 if (txq->sw_ring != NULL) { 30 for (i = 0; i < txq->nb_tx_desc; i++) { 31 if (txq->sw_ring[i].mbuf != NULL) { 32 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); 33 txq->sw_ring[i].mbuf = NULL; 34 } 35 } 36 } 37 } 38 39 static void 40 ngbe_tx_free_swring(struct ngbe_tx_queue *txq) 41 { 42 if (txq != NULL) 43 rte_free(txq->sw_ring); 44 } 45 46 static void 47 ngbe_tx_queue_release(struct ngbe_tx_queue *txq) 48 { 49 if (txq != NULL) { 50 if (txq->ops != NULL) { 51 txq->ops->release_mbufs(txq); 52 txq->ops->free_swring(txq); 53 } 54 rte_free(txq); 55 } 56 } 57 58 void 59 ngbe_dev_tx_queue_release(void *txq) 60 { 61 ngbe_tx_queue_release(txq); 62 } 63 64 /* (Re)set dynamic ngbe_tx_queue fields to defaults */ 65 static void 66 ngbe_reset_tx_queue(struct ngbe_tx_queue *txq) 67 { 68 static const struct ngbe_tx_desc zeroed_desc = {0}; 69 struct ngbe_tx_entry *txe = txq->sw_ring; 70 uint16_t prev, i; 71 72 /* Zero out HW ring memory */ 73 for (i = 0; i < txq->nb_tx_desc; i++) 74 txq->tx_ring[i] = zeroed_desc; 75 76 /* Initialize SW ring entries */ 77 prev = (uint16_t)(txq->nb_tx_desc - 1); 78 for (i = 0; i < txq->nb_tx_desc; i++) { 79 /* the ring can also be modified by hardware */ 80 volatile struct ngbe_tx_desc *txd = &txq->tx_ring[i]; 81 82 txd->dw3 = rte_cpu_to_le_32(NGBE_TXD_DD); 83 txe[i].mbuf = NULL; 84 txe[i].last_id = i; 85 txe[prev].next_id = i; 86 prev = i; 87 } 88 89 txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1); 90 txq->tx_tail = 0; 91 92 /* 93 * Always allow 1 descriptor to be un-allocated to avoid 94 * a H/W race condition 95 */ 96 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); 97 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); 98 txq->ctx_curr = 0; 99 memset((void *)&txq->ctx_cache, 0, 100 NGBE_CTX_NUM * sizeof(struct ngbe_ctx_info)); 101 } 102 103 static const struct ngbe_txq_ops def_txq_ops = { 104 .release_mbufs = ngbe_tx_queue_release_mbufs, 105 .free_swring = ngbe_tx_free_swring, 106 .reset = ngbe_reset_tx_queue, 107 }; 108 109 int 110 ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev, 111 uint16_t queue_idx, 112 uint16_t nb_desc, 113 unsigned int socket_id, 114 const struct rte_eth_txconf *tx_conf) 115 { 116 const struct rte_memzone *tz; 117 struct ngbe_tx_queue *txq; 118 struct ngbe_hw *hw; 119 uint16_t tx_free_thresh; 120 121 PMD_INIT_FUNC_TRACE(); 122 hw = ngbe_dev_hw(dev); 123 124 /* 125 * The Tx descriptor ring will be cleaned after txq->tx_free_thresh 126 * descriptors are used or if the number of descriptors required 127 * to transmit a packet is greater than the number of free Tx 128 * descriptors. 129 * One descriptor in the Tx ring is used as a sentinel to avoid a 130 * H/W race condition, hence the maximum threshold constraints. 131 * When set to zero use default values. 132 */ 133 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? 134 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); 135 if (tx_free_thresh >= (nb_desc - 3)) { 136 PMD_INIT_LOG(ERR, 137 "tx_free_thresh must be less than the number of TX descriptors minus 3. (tx_free_thresh=%u port=%d queue=%d)", 138 (unsigned int)tx_free_thresh, 139 (int)dev->data->port_id, (int)queue_idx); 140 return -(EINVAL); 141 } 142 143 if (nb_desc % tx_free_thresh != 0) { 144 PMD_INIT_LOG(ERR, 145 "tx_free_thresh must be a divisor of the number of Tx descriptors. (tx_free_thresh=%u port=%d queue=%d)", 146 (unsigned int)tx_free_thresh, 147 (int)dev->data->port_id, (int)queue_idx); 148 return -(EINVAL); 149 } 150 151 /* Free memory prior to re-allocation if needed... */ 152 if (dev->data->tx_queues[queue_idx] != NULL) { 153 ngbe_tx_queue_release(dev->data->tx_queues[queue_idx]); 154 dev->data->tx_queues[queue_idx] = NULL; 155 } 156 157 /* First allocate the Tx queue data structure */ 158 txq = rte_zmalloc_socket("ethdev Tx queue", 159 sizeof(struct ngbe_tx_queue), 160 RTE_CACHE_LINE_SIZE, socket_id); 161 if (txq == NULL) 162 return -ENOMEM; 163 164 /* 165 * Allocate Tx ring hardware descriptors. A memzone large enough to 166 * handle the maximum ring size is allocated in order to allow for 167 * resizing in later calls to the queue setup function. 168 */ 169 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, 170 sizeof(struct ngbe_tx_desc) * NGBE_RING_DESC_MAX, 171 NGBE_ALIGN, socket_id); 172 if (tz == NULL) { 173 ngbe_tx_queue_release(txq); 174 return -ENOMEM; 175 } 176 177 txq->nb_tx_desc = nb_desc; 178 txq->tx_free_thresh = tx_free_thresh; 179 txq->pthresh = tx_conf->tx_thresh.pthresh; 180 txq->hthresh = tx_conf->tx_thresh.hthresh; 181 txq->wthresh = tx_conf->tx_thresh.wthresh; 182 txq->queue_id = queue_idx; 183 txq->reg_idx = queue_idx; 184 txq->port_id = dev->data->port_id; 185 txq->ops = &def_txq_ops; 186 txq->tx_deferred_start = tx_conf->tx_deferred_start; 187 188 txq->tdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXWP(txq->reg_idx)); 189 txq->tdc_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXCFG(txq->reg_idx)); 190 191 txq->tx_ring_phys_addr = TMZ_PADDR(tz); 192 txq->tx_ring = (struct ngbe_tx_desc *)TMZ_VADDR(tz); 193 194 /* Allocate software ring */ 195 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring", 196 sizeof(struct ngbe_tx_entry) * nb_desc, 197 RTE_CACHE_LINE_SIZE, socket_id); 198 if (txq->sw_ring == NULL) { 199 ngbe_tx_queue_release(txq); 200 return -ENOMEM; 201 } 202 PMD_INIT_LOG(DEBUG, 203 "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64, 204 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); 205 206 txq->ops->reset(txq); 207 208 dev->data->tx_queues[queue_idx] = txq; 209 210 return 0; 211 } 212 213 /** 214 * ngbe_free_sc_cluster - free the not-yet-completed scattered cluster 215 * 216 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters 217 * in the sw_sc_ring is not set to NULL but rather points to the next 218 * mbuf of this RSC aggregation (that has not been completed yet and still 219 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we 220 * will just free first "nb_segs" segments of the cluster explicitly by calling 221 * an rte_pktmbuf_free_seg(). 222 * 223 * @m scattered cluster head 224 */ 225 static void 226 ngbe_free_sc_cluster(struct rte_mbuf *m) 227 { 228 uint16_t i, nb_segs = m->nb_segs; 229 struct rte_mbuf *next_seg; 230 231 for (i = 0; i < nb_segs; i++) { 232 next_seg = m->next; 233 rte_pktmbuf_free_seg(m); 234 m = next_seg; 235 } 236 } 237 238 static void 239 ngbe_rx_queue_release_mbufs(struct ngbe_rx_queue *rxq) 240 { 241 unsigned int i; 242 243 if (rxq->sw_ring != NULL) { 244 for (i = 0; i < rxq->nb_rx_desc; i++) { 245 if (rxq->sw_ring[i].mbuf != NULL) { 246 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); 247 rxq->sw_ring[i].mbuf = NULL; 248 } 249 } 250 for (i = 0; i < rxq->rx_nb_avail; ++i) { 251 struct rte_mbuf *mb; 252 253 mb = rxq->rx_stage[rxq->rx_next_avail + i]; 254 rte_pktmbuf_free_seg(mb); 255 } 256 rxq->rx_nb_avail = 0; 257 } 258 259 if (rxq->sw_sc_ring != NULL) 260 for (i = 0; i < rxq->nb_rx_desc; i++) 261 if (rxq->sw_sc_ring[i].fbuf != NULL) { 262 ngbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf); 263 rxq->sw_sc_ring[i].fbuf = NULL; 264 } 265 } 266 267 static void 268 ngbe_rx_queue_release(struct ngbe_rx_queue *rxq) 269 { 270 if (rxq != NULL) { 271 ngbe_rx_queue_release_mbufs(rxq); 272 rte_free(rxq->sw_ring); 273 rte_free(rxq->sw_sc_ring); 274 rte_free(rxq); 275 } 276 } 277 278 void 279 ngbe_dev_rx_queue_release(void *rxq) 280 { 281 ngbe_rx_queue_release(rxq); 282 } 283 284 /* 285 * Check if Rx Burst Bulk Alloc function can be used. 286 * Return 287 * 0: the preconditions are satisfied and the bulk allocation function 288 * can be used. 289 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst 290 * function must be used. 291 */ 292 static inline int 293 check_rx_burst_bulk_alloc_preconditions(struct ngbe_rx_queue *rxq) 294 { 295 int ret = 0; 296 297 /* 298 * Make sure the following pre-conditions are satisfied: 299 * rxq->rx_free_thresh >= RTE_PMD_NGBE_RX_MAX_BURST 300 * rxq->rx_free_thresh < rxq->nb_rx_desc 301 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0 302 * Scattered packets are not supported. This should be checked 303 * outside of this function. 304 */ 305 if (rxq->rx_free_thresh < RTE_PMD_NGBE_RX_MAX_BURST) { 306 PMD_INIT_LOG(DEBUG, 307 "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, RTE_PMD_NGBE_RX_MAX_BURST=%d", 308 rxq->rx_free_thresh, RTE_PMD_NGBE_RX_MAX_BURST); 309 ret = -EINVAL; 310 } else if (rxq->rx_free_thresh >= rxq->nb_rx_desc) { 311 PMD_INIT_LOG(DEBUG, 312 "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, rxq->nb_rx_desc=%d", 313 rxq->rx_free_thresh, rxq->nb_rx_desc); 314 ret = -EINVAL; 315 } else if ((rxq->nb_rx_desc % rxq->rx_free_thresh) != 0) { 316 PMD_INIT_LOG(DEBUG, 317 "Rx Burst Bulk Alloc Preconditions: rxq->nb_rx_desc=%d, rxq->rx_free_thresh=%d", 318 rxq->nb_rx_desc, rxq->rx_free_thresh); 319 ret = -EINVAL; 320 } 321 322 return ret; 323 } 324 325 /* Reset dynamic ngbe_rx_queue fields back to defaults */ 326 static void 327 ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq) 328 { 329 static const struct ngbe_rx_desc zeroed_desc = { 330 {{0}, {0} }, {{0}, {0} } }; 331 unsigned int i; 332 uint16_t len = rxq->nb_rx_desc; 333 334 /* 335 * By default, the Rx queue setup function allocates enough memory for 336 * NGBE_RING_DESC_MAX. The Rx Burst bulk allocation function requires 337 * extra memory at the end of the descriptor ring to be zero'd out. 338 */ 339 if (adapter->rx_bulk_alloc_allowed) 340 /* zero out extra memory */ 341 len += RTE_PMD_NGBE_RX_MAX_BURST; 342 343 /* 344 * Zero out HW ring memory. Zero out extra memory at the end of 345 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function 346 * reads extra memory as zeros. 347 */ 348 for (i = 0; i < len; i++) 349 rxq->rx_ring[i] = zeroed_desc; 350 351 /* 352 * initialize extra software ring entries. Space for these extra 353 * entries is always allocated 354 */ 355 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf)); 356 for (i = rxq->nb_rx_desc; i < len; ++i) 357 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf; 358 359 rxq->rx_nb_avail = 0; 360 rxq->rx_next_avail = 0; 361 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); 362 rxq->rx_tail = 0; 363 rxq->nb_rx_hold = 0; 364 rxq->pkt_first_seg = NULL; 365 rxq->pkt_last_seg = NULL; 366 } 367 368 int 369 ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev, 370 uint16_t queue_idx, 371 uint16_t nb_desc, 372 unsigned int socket_id, 373 const struct rte_eth_rxconf *rx_conf, 374 struct rte_mempool *mp) 375 { 376 const struct rte_memzone *rz; 377 struct ngbe_rx_queue *rxq; 378 struct ngbe_hw *hw; 379 uint16_t len; 380 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 381 382 PMD_INIT_FUNC_TRACE(); 383 hw = ngbe_dev_hw(dev); 384 385 /* Free memory prior to re-allocation if needed... */ 386 if (dev->data->rx_queues[queue_idx] != NULL) { 387 ngbe_rx_queue_release(dev->data->rx_queues[queue_idx]); 388 dev->data->rx_queues[queue_idx] = NULL; 389 } 390 391 /* First allocate the Rx queue data structure */ 392 rxq = rte_zmalloc_socket("ethdev RX queue", 393 sizeof(struct ngbe_rx_queue), 394 RTE_CACHE_LINE_SIZE, socket_id); 395 if (rxq == NULL) 396 return -ENOMEM; 397 rxq->mb_pool = mp; 398 rxq->nb_rx_desc = nb_desc; 399 rxq->rx_free_thresh = rx_conf->rx_free_thresh; 400 rxq->queue_id = queue_idx; 401 rxq->reg_idx = queue_idx; 402 rxq->port_id = dev->data->port_id; 403 rxq->drop_en = rx_conf->rx_drop_en; 404 rxq->rx_deferred_start = rx_conf->rx_deferred_start; 405 406 /* 407 * Allocate Rx ring hardware descriptors. A memzone large enough to 408 * handle the maximum ring size is allocated in order to allow for 409 * resizing in later calls to the queue setup function. 410 */ 411 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, 412 RX_RING_SZ, NGBE_ALIGN, socket_id); 413 if (rz == NULL) { 414 ngbe_rx_queue_release(rxq); 415 return -ENOMEM; 416 } 417 418 /* 419 * Zero init all the descriptors in the ring. 420 */ 421 memset(rz->addr, 0, RX_RING_SZ); 422 423 rxq->rdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXWP(rxq->reg_idx)); 424 rxq->rdh_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXRP(rxq->reg_idx)); 425 426 rxq->rx_ring_phys_addr = TMZ_PADDR(rz); 427 rxq->rx_ring = (struct ngbe_rx_desc *)TMZ_VADDR(rz); 428 429 /* 430 * Certain constraints must be met in order to use the bulk buffer 431 * allocation Rx burst function. If any of Rx queues doesn't meet them 432 * the feature should be disabled for the whole port. 433 */ 434 if (check_rx_burst_bulk_alloc_preconditions(rxq)) { 435 PMD_INIT_LOG(DEBUG, 436 "queue[%d] doesn't meet Rx Bulk Alloc preconditions - canceling the feature for the whole port[%d]", 437 rxq->queue_id, rxq->port_id); 438 adapter->rx_bulk_alloc_allowed = false; 439 } 440 441 /* 442 * Allocate software ring. Allow for space at the end of the 443 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst 444 * function does not access an invalid memory region. 445 */ 446 len = nb_desc; 447 if (adapter->rx_bulk_alloc_allowed) 448 len += RTE_PMD_NGBE_RX_MAX_BURST; 449 450 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring", 451 sizeof(struct ngbe_rx_entry) * len, 452 RTE_CACHE_LINE_SIZE, socket_id); 453 if (rxq->sw_ring == NULL) { 454 ngbe_rx_queue_release(rxq); 455 return -ENOMEM; 456 } 457 458 /* 459 * Always allocate even if it's not going to be needed in order to 460 * simplify the code. 461 * 462 * This ring is used in Scattered Rx cases and Scattered Rx may 463 * be requested in ngbe_dev_rx_init(), which is called later from 464 * dev_start() flow. 465 */ 466 rxq->sw_sc_ring = 467 rte_zmalloc_socket("rxq->sw_sc_ring", 468 sizeof(struct ngbe_scattered_rx_entry) * len, 469 RTE_CACHE_LINE_SIZE, socket_id); 470 if (rxq->sw_sc_ring == NULL) { 471 ngbe_rx_queue_release(rxq); 472 return -ENOMEM; 473 } 474 475 PMD_INIT_LOG(DEBUG, 476 "sw_ring=%p sw_sc_ring=%p hw_ring=%p dma_addr=0x%" PRIx64, 477 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring, 478 rxq->rx_ring_phys_addr); 479 480 dev->data->rx_queues[queue_idx] = rxq; 481 482 ngbe_reset_rx_queue(adapter, rxq); 483 484 return 0; 485 } 486 487 void 488 ngbe_dev_clear_queues(struct rte_eth_dev *dev) 489 { 490 unsigned int i; 491 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 492 493 PMD_INIT_FUNC_TRACE(); 494 495 for (i = 0; i < dev->data->nb_tx_queues; i++) { 496 struct ngbe_tx_queue *txq = dev->data->tx_queues[i]; 497 498 if (txq != NULL) { 499 txq->ops->release_mbufs(txq); 500 txq->ops->reset(txq); 501 } 502 } 503 504 for (i = 0; i < dev->data->nb_rx_queues; i++) { 505 struct ngbe_rx_queue *rxq = dev->data->rx_queues[i]; 506 507 if (rxq != NULL) { 508 ngbe_rx_queue_release_mbufs(rxq); 509 ngbe_reset_rx_queue(adapter, rxq); 510 } 511 } 512 } 513 514 static int 515 ngbe_alloc_rx_queue_mbufs(struct ngbe_rx_queue *rxq) 516 { 517 struct ngbe_rx_entry *rxe = rxq->sw_ring; 518 uint64_t dma_addr; 519 unsigned int i; 520 521 /* Initialize software ring entries */ 522 for (i = 0; i < rxq->nb_rx_desc; i++) { 523 /* the ring can also be modified by hardware */ 524 volatile struct ngbe_rx_desc *rxd; 525 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); 526 527 if (mbuf == NULL) { 528 PMD_INIT_LOG(ERR, "Rx mbuf alloc failed queue_id=%u port_id=%u", 529 (unsigned int)rxq->queue_id, 530 (unsigned int)rxq->port_id); 531 return -ENOMEM; 532 } 533 534 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 535 mbuf->port = rxq->port_id; 536 537 dma_addr = 538 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); 539 rxd = &rxq->rx_ring[i]; 540 NGBE_RXD_HDRADDR(rxd, 0); 541 NGBE_RXD_PKTADDR(rxd, dma_addr); 542 rxe[i].mbuf = mbuf; 543 } 544 545 return 0; 546 } 547 548 /* 549 * Initializes Receive Unit. 550 */ 551 int 552 ngbe_dev_rx_init(struct rte_eth_dev *dev) 553 { 554 struct ngbe_hw *hw; 555 struct ngbe_rx_queue *rxq; 556 uint64_t bus_addr; 557 uint32_t fctrl; 558 uint32_t hlreg0; 559 uint32_t srrctl; 560 uint16_t buf_size; 561 uint16_t i; 562 563 PMD_INIT_FUNC_TRACE(); 564 hw = ngbe_dev_hw(dev); 565 566 /* 567 * Make sure receives are disabled while setting 568 * up the Rx context (registers, descriptor rings, etc.). 569 */ 570 wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_ENA, 0); 571 wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, 0); 572 573 /* Enable receipt of broadcasted frames */ 574 fctrl = rd32(hw, NGBE_PSRCTL); 575 fctrl |= NGBE_PSRCTL_BCA; 576 wr32(hw, NGBE_PSRCTL, fctrl); 577 578 hlreg0 = rd32(hw, NGBE_SECRXCTL); 579 hlreg0 &= ~NGBE_SECRXCTL_XDSA; 580 wr32(hw, NGBE_SECRXCTL, hlreg0); 581 582 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK, 583 NGBE_FRMSZ_MAX(NGBE_FRAME_SIZE_DFT)); 584 585 /* Setup Rx queues */ 586 for (i = 0; i < dev->data->nb_rx_queues; i++) { 587 rxq = dev->data->rx_queues[i]; 588 589 /* Setup the Base and Length of the Rx Descriptor Rings */ 590 bus_addr = rxq->rx_ring_phys_addr; 591 wr32(hw, NGBE_RXBAL(rxq->reg_idx), 592 (uint32_t)(bus_addr & BIT_MASK32)); 593 wr32(hw, NGBE_RXBAH(rxq->reg_idx), 594 (uint32_t)(bus_addr >> 32)); 595 wr32(hw, NGBE_RXRP(rxq->reg_idx), 0); 596 wr32(hw, NGBE_RXWP(rxq->reg_idx), 0); 597 598 srrctl = NGBE_RXCFG_RNGLEN(rxq->nb_rx_desc); 599 600 /* Set if packets are dropped when no descriptors available */ 601 if (rxq->drop_en) 602 srrctl |= NGBE_RXCFG_DROP; 603 604 /* 605 * Configure the Rx buffer size in the PKTLEN field of 606 * the RXCFG register of the queue. 607 * The value is in 1 KB resolution. Valid values can be from 608 * 1 KB to 16 KB. 609 */ 610 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 611 RTE_PKTMBUF_HEADROOM); 612 buf_size = ROUND_DOWN(buf_size, 0x1 << 10); 613 srrctl |= NGBE_RXCFG_PKTLEN(buf_size); 614 615 wr32(hw, NGBE_RXCFG(rxq->reg_idx), srrctl); 616 } 617 618 return 0; 619 } 620 621 /* 622 * Initializes Transmit Unit. 623 */ 624 void 625 ngbe_dev_tx_init(struct rte_eth_dev *dev) 626 { 627 struct ngbe_hw *hw; 628 struct ngbe_tx_queue *txq; 629 uint64_t bus_addr; 630 uint16_t i; 631 632 PMD_INIT_FUNC_TRACE(); 633 hw = ngbe_dev_hw(dev); 634 635 wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_ODSA, NGBE_SECTXCTL_ODSA); 636 wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_XDSA, 0); 637 638 /* Setup the Base and Length of the Tx Descriptor Rings */ 639 for (i = 0; i < dev->data->nb_tx_queues; i++) { 640 txq = dev->data->tx_queues[i]; 641 642 bus_addr = txq->tx_ring_phys_addr; 643 wr32(hw, NGBE_TXBAL(txq->reg_idx), 644 (uint32_t)(bus_addr & BIT_MASK32)); 645 wr32(hw, NGBE_TXBAH(txq->reg_idx), 646 (uint32_t)(bus_addr >> 32)); 647 wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_BUFLEN_MASK, 648 NGBE_TXCFG_BUFLEN(txq->nb_tx_desc)); 649 /* Setup the HW Tx Head and TX Tail descriptor pointers */ 650 wr32(hw, NGBE_TXRP(txq->reg_idx), 0); 651 wr32(hw, NGBE_TXWP(txq->reg_idx), 0); 652 } 653 } 654 655 /* 656 * Start Transmit and Receive Units. 657 */ 658 int 659 ngbe_dev_rxtx_start(struct rte_eth_dev *dev) 660 { 661 struct ngbe_hw *hw; 662 struct ngbe_tx_queue *txq; 663 struct ngbe_rx_queue *rxq; 664 uint32_t dmatxctl; 665 uint32_t rxctrl; 666 uint16_t i; 667 int ret = 0; 668 669 PMD_INIT_FUNC_TRACE(); 670 hw = ngbe_dev_hw(dev); 671 672 for (i = 0; i < dev->data->nb_tx_queues; i++) { 673 txq = dev->data->tx_queues[i]; 674 /* Setup Transmit Threshold Registers */ 675 wr32m(hw, NGBE_TXCFG(txq->reg_idx), 676 NGBE_TXCFG_HTHRESH_MASK | 677 NGBE_TXCFG_WTHRESH_MASK, 678 NGBE_TXCFG_HTHRESH(txq->hthresh) | 679 NGBE_TXCFG_WTHRESH(txq->wthresh)); 680 } 681 682 dmatxctl = rd32(hw, NGBE_DMATXCTRL); 683 dmatxctl |= NGBE_DMATXCTRL_ENA; 684 wr32(hw, NGBE_DMATXCTRL, dmatxctl); 685 686 for (i = 0; i < dev->data->nb_tx_queues; i++) { 687 txq = dev->data->tx_queues[i]; 688 if (txq->tx_deferred_start == 0) { 689 ret = ngbe_dev_tx_queue_start(dev, i); 690 if (ret < 0) 691 return ret; 692 } 693 } 694 695 for (i = 0; i < dev->data->nb_rx_queues; i++) { 696 rxq = dev->data->rx_queues[i]; 697 if (rxq->rx_deferred_start == 0) { 698 ret = ngbe_dev_rx_queue_start(dev, i); 699 if (ret < 0) 700 return ret; 701 } 702 } 703 704 /* Enable Receive engine */ 705 rxctrl = rd32(hw, NGBE_PBRXCTL); 706 rxctrl |= NGBE_PBRXCTL_ENA; 707 hw->mac.enable_rx_dma(hw, rxctrl); 708 709 return 0; 710 } 711 712 void 713 ngbe_dev_save_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id) 714 { 715 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8]; 716 *(reg++) = rd32(hw, NGBE_RXBAL(rx_queue_id)); 717 *(reg++) = rd32(hw, NGBE_RXBAH(rx_queue_id)); 718 *(reg++) = rd32(hw, NGBE_RXCFG(rx_queue_id)); 719 } 720 721 void 722 ngbe_dev_store_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id) 723 { 724 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8]; 725 wr32(hw, NGBE_RXBAL(rx_queue_id), *(reg++)); 726 wr32(hw, NGBE_RXBAH(rx_queue_id), *(reg++)); 727 wr32(hw, NGBE_RXCFG(rx_queue_id), *(reg++) & ~NGBE_RXCFG_ENA); 728 } 729 730 void 731 ngbe_dev_save_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id) 732 { 733 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8]; 734 *(reg++) = rd32(hw, NGBE_TXBAL(tx_queue_id)); 735 *(reg++) = rd32(hw, NGBE_TXBAH(tx_queue_id)); 736 *(reg++) = rd32(hw, NGBE_TXCFG(tx_queue_id)); 737 } 738 739 void 740 ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id) 741 { 742 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8]; 743 wr32(hw, NGBE_TXBAL(tx_queue_id), *(reg++)); 744 wr32(hw, NGBE_TXBAH(tx_queue_id), *(reg++)); 745 wr32(hw, NGBE_TXCFG(tx_queue_id), *(reg++) & ~NGBE_TXCFG_ENA); 746 } 747 748 /* 749 * Start Receive Units for specified queue. 750 */ 751 int 752 ngbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 753 { 754 struct ngbe_hw *hw = ngbe_dev_hw(dev); 755 struct ngbe_rx_queue *rxq; 756 uint32_t rxdctl; 757 int poll_ms; 758 759 PMD_INIT_FUNC_TRACE(); 760 761 rxq = dev->data->rx_queues[rx_queue_id]; 762 763 /* Allocate buffers for descriptor rings */ 764 if (ngbe_alloc_rx_queue_mbufs(rxq) != 0) { 765 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d", 766 rx_queue_id); 767 return -1; 768 } 769 rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx)); 770 rxdctl |= NGBE_RXCFG_ENA; 771 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxdctl); 772 773 /* Wait until Rx Enable ready */ 774 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS; 775 do { 776 rte_delay_ms(1); 777 rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx)); 778 } while (--poll_ms && !(rxdctl & NGBE_RXCFG_ENA)); 779 if (poll_ms == 0) 780 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id); 781 rte_wmb(); 782 wr32(hw, NGBE_RXRP(rxq->reg_idx), 0); 783 wr32(hw, NGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1); 784 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 785 786 return 0; 787 } 788 789 /* 790 * Stop Receive Units for specified queue. 791 */ 792 int 793 ngbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 794 { 795 struct ngbe_hw *hw = ngbe_dev_hw(dev); 796 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 797 struct ngbe_rx_queue *rxq; 798 uint32_t rxdctl; 799 int poll_ms; 800 801 PMD_INIT_FUNC_TRACE(); 802 803 rxq = dev->data->rx_queues[rx_queue_id]; 804 805 ngbe_dev_save_rx_queue(hw, rxq->reg_idx); 806 wr32m(hw, NGBE_RXCFG(rxq->reg_idx), NGBE_RXCFG_ENA, 0); 807 808 /* Wait until Rx Enable bit clear */ 809 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS; 810 do { 811 rte_delay_ms(1); 812 rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx)); 813 } while (--poll_ms && (rxdctl & NGBE_RXCFG_ENA)); 814 if (poll_ms == 0) 815 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id); 816 817 rte_delay_us(RTE_NGBE_WAIT_100_US); 818 ngbe_dev_store_rx_queue(hw, rxq->reg_idx); 819 820 ngbe_rx_queue_release_mbufs(rxq); 821 ngbe_reset_rx_queue(adapter, rxq); 822 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 823 824 return 0; 825 } 826 827 /* 828 * Start Transmit Units for specified queue. 829 */ 830 int 831 ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 832 { 833 struct ngbe_hw *hw = ngbe_dev_hw(dev); 834 struct ngbe_tx_queue *txq; 835 uint32_t txdctl; 836 int poll_ms; 837 838 PMD_INIT_FUNC_TRACE(); 839 840 txq = dev->data->tx_queues[tx_queue_id]; 841 wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, NGBE_TXCFG_ENA); 842 843 /* Wait until Tx Enable ready */ 844 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS; 845 do { 846 rte_delay_ms(1); 847 txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx)); 848 } while (--poll_ms && !(txdctl & NGBE_TXCFG_ENA)); 849 if (poll_ms == 0) 850 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", 851 tx_queue_id); 852 853 rte_wmb(); 854 wr32(hw, NGBE_TXWP(txq->reg_idx), txq->tx_tail); 855 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 856 857 return 0; 858 } 859 860 /* 861 * Stop Transmit Units for specified queue. 862 */ 863 int 864 ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 865 { 866 struct ngbe_hw *hw = ngbe_dev_hw(dev); 867 struct ngbe_tx_queue *txq; 868 uint32_t txdctl; 869 uint32_t txtdh, txtdt; 870 int poll_ms; 871 872 PMD_INIT_FUNC_TRACE(); 873 874 txq = dev->data->tx_queues[tx_queue_id]; 875 876 /* Wait until Tx queue is empty */ 877 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS; 878 do { 879 rte_delay_us(RTE_NGBE_WAIT_100_US); 880 txtdh = rd32(hw, NGBE_TXRP(txq->reg_idx)); 881 txtdt = rd32(hw, NGBE_TXWP(txq->reg_idx)); 882 } while (--poll_ms && (txtdh != txtdt)); 883 if (poll_ms == 0) 884 PMD_INIT_LOG(ERR, "Tx Queue %d is not empty when stopping.", 885 tx_queue_id); 886 887 ngbe_dev_save_tx_queue(hw, txq->reg_idx); 888 wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, 0); 889 890 /* Wait until Tx Enable bit clear */ 891 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS; 892 do { 893 rte_delay_ms(1); 894 txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx)); 895 } while (--poll_ms && (txdctl & NGBE_TXCFG_ENA)); 896 if (poll_ms == 0) 897 PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d", 898 tx_queue_id); 899 900 rte_delay_us(RTE_NGBE_WAIT_100_US); 901 ngbe_dev_store_tx_queue(hw, txq->reg_idx); 902 903 if (txq->ops != NULL) { 904 txq->ops->release_mbufs(txq); 905 txq->ops->reset(txq); 906 } 907 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 908 909 return 0; 910 } 911