1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved. 4 */ 5 6 #include "axgbe_ethdev.h" 7 #include "axgbe_rxtx.h" 8 #include "axgbe_phy.h" 9 10 #include <rte_time.h> 11 #include <rte_mempool.h> 12 #include <rte_mbuf.h> 13 #include <rte_vect.h> 14 15 static void 16 axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue) 17 { 18 uint16_t i; 19 struct rte_mbuf **sw_ring; 20 21 if (rx_queue) { 22 sw_ring = rx_queue->sw_ring; 23 if (sw_ring) { 24 for (i = 0; i < rx_queue->nb_desc; i++) { 25 rte_pktmbuf_free(sw_ring[i]); 26 } 27 rte_free(sw_ring); 28 } 29 rte_free(rx_queue); 30 } 31 } 32 33 void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx) 34 { 35 axgbe_rx_queue_release(dev->data->rx_queues[queue_idx]); 36 } 37 38 int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 39 uint16_t nb_desc, unsigned int socket_id, 40 const struct rte_eth_rxconf *rx_conf, 41 struct rte_mempool *mp) 42 { 43 PMD_INIT_FUNC_TRACE(); 44 uint32_t size; 45 const struct rte_memzone *dma; 46 struct axgbe_rx_queue *rxq; 47 uint32_t rx_desc = nb_desc; 48 struct axgbe_port *pdata = dev->data->dev_private; 49 50 /* 51 * validate Rx descriptors count 52 * should be power of 2 and less than h/w supported 53 */ 54 if ((!rte_is_power_of_2(rx_desc)) || 55 rx_desc > pdata->rx_desc_count) 56 return -EINVAL; 57 /* First allocate the rx queue data structure */ 58 rxq = rte_zmalloc_socket("ethdev RX queue", 59 sizeof(struct axgbe_rx_queue), 60 RTE_CACHE_LINE_SIZE, socket_id); 61 if (!rxq) { 62 PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!"); 63 return -ENOMEM; 64 } 65 66 rxq->cur = 0; 67 rxq->dirty = 0; 68 rxq->pdata = pdata; 69 rxq->mb_pool = mp; 70 rxq->queue_id = queue_idx; 71 rxq->port_id = dev->data->port_id; 72 rxq->nb_desc = rx_desc; 73 rxq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE + 74 (DMA_CH_INC * rxq->queue_id)); 75 rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs + 76 DMA_CH_RDTR_LO); 77 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) 78 rxq->crc_len = RTE_ETHER_CRC_LEN; 79 else 80 rxq->crc_len = 0; 81 82 /* CRC strip in AXGBE supports per port not per queue */ 83 pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0; 84 rxq->free_thresh = rx_conf->rx_free_thresh ? 85 rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH; 86 if (rxq->free_thresh > rxq->nb_desc) 87 rxq->free_thresh = rxq->nb_desc >> 3; 88 89 rxq->offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; 90 /* Allocate RX ring hardware descriptors */ 91 size = rxq->nb_desc * sizeof(union axgbe_rx_desc); 92 dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128, 93 socket_id); 94 if (!dma) { 95 PMD_DRV_LOG_LINE(ERR, "ring_dma_zone_reserve for rx_ring failed"); 96 axgbe_rx_queue_release(rxq); 97 return -ENOMEM; 98 } 99 rxq->ring_phys_addr = (uint64_t)dma->iova; 100 rxq->desc = (volatile union axgbe_rx_desc *)dma->addr; 101 memset((void *)rxq->desc, 0, size); 102 /* Allocate software ring */ 103 size = rxq->nb_desc * sizeof(struct rte_mbuf *); 104 rxq->sw_ring = rte_zmalloc_socket("sw_ring", size, 105 RTE_CACHE_LINE_SIZE, 106 socket_id); 107 if (!rxq->sw_ring) { 108 PMD_DRV_LOG_LINE(ERR, "rte_zmalloc for sw_ring failed"); 109 axgbe_rx_queue_release(rxq); 110 return -ENOMEM; 111 } 112 dev->data->rx_queues[queue_idx] = rxq; 113 if (!pdata->rx_queues) 114 pdata->rx_queues = dev->data->rx_queues; 115 116 return 0; 117 } 118 119 static void axgbe_prepare_rx_stop(struct axgbe_port *pdata, 120 unsigned int queue) 121 { 122 unsigned int rx_status; 123 unsigned long rx_timeout; 124 125 /* The Rx engine cannot be stopped if it is actively processing 126 * packets. Wait for the Rx queue to empty the Rx fifo. Don't 127 * wait forever though... 128 */ 129 rx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * 130 rte_get_timer_hz()); 131 132 while (time_before(rte_get_timer_cycles(), rx_timeout)) { 133 rx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); 134 if ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && 135 (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) 136 break; 137 138 rte_delay_us(900); 139 } 140 141 if (!time_before(rte_get_timer_cycles(), rx_timeout)) 142 PMD_DRV_LOG_LINE(ERR, 143 "timed out waiting for Rx queue %u to empty", 144 queue); 145 } 146 147 void axgbe_dev_disable_rx(struct rte_eth_dev *dev) 148 { 149 struct axgbe_rx_queue *rxq; 150 struct axgbe_port *pdata = dev->data->dev_private; 151 unsigned int i; 152 153 /* Disable MAC Rx */ 154 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); 155 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); 156 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); 157 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); 158 159 /* Prepare for Rx DMA channel stop */ 160 for (i = 0; i < dev->data->nb_rx_queues; i++) { 161 rxq = dev->data->rx_queues[i]; 162 axgbe_prepare_rx_stop(pdata, i); 163 } 164 /* Disable each Rx queue */ 165 AXGMAC_IOWRITE(pdata, MAC_RQC0R, 0); 166 for (i = 0; i < dev->data->nb_rx_queues; i++) { 167 rxq = dev->data->rx_queues[i]; 168 /* Disable Rx DMA channel */ 169 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0); 170 } 171 } 172 173 void axgbe_dev_enable_rx(struct rte_eth_dev *dev) 174 { 175 struct axgbe_rx_queue *rxq; 176 struct axgbe_port *pdata = dev->data->dev_private; 177 unsigned int i; 178 unsigned int reg_val = 0; 179 180 for (i = 0; i < dev->data->nb_rx_queues; i++) { 181 rxq = dev->data->rx_queues[i]; 182 /* Enable Rx DMA channel */ 183 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1); 184 } 185 186 reg_val = 0; 187 for (i = 0; i < pdata->rx_q_count; i++) 188 reg_val |= (0x02 << (i << 1)); 189 AXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); 190 191 /* Enable MAC Rx */ 192 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); 193 /* Frame is forwarded after stripping CRC to application*/ 194 if (pdata->crc_strip_enable) { 195 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); 196 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); 197 } 198 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); 199 } 200 201 /* Rx function one to one refresh */ 202 uint16_t 203 axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 204 uint16_t nb_pkts) 205 { 206 PMD_INIT_FUNC_TRACE(); 207 uint16_t nb_rx = 0; 208 struct axgbe_rx_queue *rxq = rx_queue; 209 volatile union axgbe_rx_desc *desc; 210 uint64_t old_dirty = rxq->dirty; 211 struct rte_mbuf *mbuf, *tmbuf; 212 unsigned int err, etlt; 213 uint32_t error_status; 214 uint16_t idx, pidx, pkt_len; 215 216 idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 217 while (nb_rx < nb_pkts) { 218 if (unlikely(idx == rxq->nb_desc)) 219 idx = 0; 220 221 desc = &rxq->desc[idx]; 222 223 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) 224 break; 225 tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool); 226 if (unlikely(!tmbuf)) { 227 PMD_DRV_LOG_LINE(ERR, "RX mbuf alloc failed port_id = %u" 228 " queue_id = %u", 229 (unsigned int)rxq->port_id, 230 (unsigned int)rxq->queue_id); 231 rte_eth_devices[ 232 rxq->port_id].data->rx_mbuf_alloc_failed++; 233 rxq->rx_mbuf_alloc_failed++; 234 break; 235 } 236 pidx = idx + 1; 237 if (unlikely(pidx == rxq->nb_desc)) 238 pidx = 0; 239 240 rte_prefetch0(rxq->sw_ring[pidx]); 241 if ((pidx & 0x3) == 0) { 242 rte_prefetch0(&rxq->desc[pidx]); 243 rte_prefetch0(&rxq->sw_ring[pidx]); 244 } 245 246 mbuf = rxq->sw_ring[idx]; 247 /* Check for any errors and free mbuf*/ 248 err = AXGMAC_GET_BITS_LE(desc->write.desc3, 249 RX_NORMAL_DESC3, ES); 250 error_status = 0; 251 if (unlikely(err)) { 252 error_status = desc->write.desc3 & AXGBE_ERR_STATUS; 253 if ((error_status != AXGBE_L3_CSUM_ERR) && 254 (error_status != AXGBE_L4_CSUM_ERR)) { 255 rxq->errors++; 256 rte_pktmbuf_free(mbuf); 257 goto err_set; 258 } 259 } 260 if (rxq->pdata->rx_csum_enable) { 261 mbuf->ol_flags = 0; 262 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 263 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 264 if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) { 265 mbuf->ol_flags &= ~RTE_MBUF_F_RX_IP_CKSUM_GOOD; 266 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 267 mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; 268 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; 269 } else if ( 270 unlikely(error_status == AXGBE_L4_CSUM_ERR)) { 271 mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; 272 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 273 } 274 } 275 rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *)); 276 /* Get the RSS hash */ 277 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV)) 278 mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1); 279 etlt = AXGMAC_GET_BITS_LE(desc->write.desc3, 280 RX_NORMAL_DESC3, ETLT); 281 if (!err || !etlt) { 282 if (etlt == RX_CVLAN_TAG_PRESENT) { 283 mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN; 284 mbuf->vlan_tci = 285 AXGMAC_GET_BITS_LE(desc->write.desc0, 286 RX_NORMAL_DESC0, OVT); 287 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 288 mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED; 289 else 290 mbuf->ol_flags &= ~RTE_MBUF_F_RX_VLAN_STRIPPED; 291 } else { 292 mbuf->ol_flags &= 293 ~(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED); 294 mbuf->vlan_tci = 0; 295 } 296 } 297 /* Indicate if a Context Descriptor is next */ 298 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CDA)) 299 mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP 300 | RTE_MBUF_F_RX_IEEE1588_TMST; 301 pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, 302 PL) - rxq->crc_len; 303 /* Mbuf populate */ 304 mbuf->next = NULL; 305 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 306 mbuf->nb_segs = 1; 307 mbuf->port = rxq->port_id; 308 mbuf->pkt_len = pkt_len; 309 mbuf->data_len = pkt_len; 310 rxq->bytes += pkt_len; 311 rx_pkts[nb_rx++] = mbuf; 312 err_set: 313 rxq->cur++; 314 rxq->sw_ring[idx++] = tmbuf; 315 desc->read.baddr = 316 rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf)); 317 memset((void *)(&desc->read.desc2), 0, 8); 318 AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1); 319 rxq->dirty++; 320 } 321 rxq->pkts += nb_rx; 322 if (rxq->dirty != old_dirty) { 323 rte_wmb(); 324 idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1); 325 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, 326 low32_value(rxq->ring_phys_addr + 327 (idx * sizeof(union axgbe_rx_desc)))); 328 } 329 330 return nb_rx; 331 } 332 333 334 uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue, 335 struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 336 { 337 PMD_INIT_FUNC_TRACE(); 338 uint16_t nb_rx = 0; 339 struct axgbe_rx_queue *rxq = rx_queue; 340 volatile union axgbe_rx_desc *desc; 341 342 struct rte_mbuf *first_seg = NULL; 343 struct rte_mbuf *mbuf, *tmbuf; 344 unsigned int err = 0, etlt; 345 uint32_t error_status = 0; 346 uint16_t idx, pidx, data_len = 0, pkt_len = 0; 347 bool eop = 0; 348 349 idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 350 351 while (nb_rx < nb_pkts) { 352 next_desc: 353 idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 354 355 desc = &rxq->desc[idx]; 356 357 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) 358 break; 359 360 tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool); 361 if (unlikely(!tmbuf)) { 362 PMD_DRV_LOG_LINE(ERR, "RX mbuf alloc failed port_id = %u" 363 " queue_id = %u", 364 (unsigned int)rxq->port_id, 365 (unsigned int)rxq->queue_id); 366 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; 367 break; 368 } 369 370 pidx = idx + 1; 371 if (unlikely(pidx == rxq->nb_desc)) 372 pidx = 0; 373 374 rte_prefetch0(rxq->sw_ring[pidx]); 375 if ((pidx & 0x3) == 0) { 376 rte_prefetch0(&rxq->desc[pidx]); 377 rte_prefetch0(&rxq->sw_ring[pidx]); 378 } 379 380 mbuf = rxq->sw_ring[idx]; 381 rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *)); 382 383 if (!AXGMAC_GET_BITS_LE(desc->write.desc3, 384 RX_NORMAL_DESC3, LD)) { 385 eop = 0; 386 pkt_len = rxq->buf_size; 387 data_len = pkt_len; 388 } else { 389 eop = 1; 390 pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, 391 RX_NORMAL_DESC3, PL) - rxq->crc_len; 392 data_len = pkt_len % rxq->buf_size; 393 /* Check for any errors and free mbuf*/ 394 err = AXGMAC_GET_BITS_LE(desc->write.desc3, 395 RX_NORMAL_DESC3, ES); 396 error_status = 0; 397 if (unlikely(err)) { 398 error_status = desc->write.desc3 & 399 AXGBE_ERR_STATUS; 400 if (error_status != AXGBE_L3_CSUM_ERR && 401 error_status != AXGBE_L4_CSUM_ERR) { 402 rxq->errors++; 403 rte_pktmbuf_free(mbuf); 404 rte_pktmbuf_free(first_seg); 405 first_seg = NULL; 406 eop = 0; 407 goto err_set; 408 } 409 } 410 411 } 412 /* Mbuf populate */ 413 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 414 mbuf->data_len = data_len; 415 mbuf->pkt_len = data_len; 416 417 if (rxq->saved_mbuf) { 418 first_seg = rxq->saved_mbuf; 419 rxq->saved_mbuf = NULL; 420 } 421 422 if (first_seg != NULL) { 423 if (rte_pktmbuf_chain(first_seg, mbuf) != 0) { 424 rte_pktmbuf_free(first_seg); 425 first_seg = NULL; 426 rte_pktmbuf_free(mbuf); 427 rxq->saved_mbuf = NULL; 428 rxq->errors++; 429 eop = 0; 430 break; 431 } 432 } else { 433 first_seg = mbuf; 434 } 435 436 /* Get the RSS hash */ 437 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV)) 438 first_seg->hash.rss = 439 rte_le_to_cpu_32(desc->write.desc1); 440 etlt = AXGMAC_GET_BITS_LE(desc->write.desc3, 441 RX_NORMAL_DESC3, ETLT); 442 if (!err || !etlt) { 443 if (etlt == RX_CVLAN_TAG_PRESENT) { 444 first_seg->ol_flags |= RTE_MBUF_F_RX_VLAN; 445 first_seg->vlan_tci = 446 AXGMAC_GET_BITS_LE(desc->write.desc0, 447 RX_NORMAL_DESC0, OVT); 448 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 449 first_seg->ol_flags |= 450 RTE_MBUF_F_RX_VLAN_STRIPPED; 451 else 452 first_seg->ol_flags &= 453 ~RTE_MBUF_F_RX_VLAN_STRIPPED; 454 } else { 455 first_seg->ol_flags &= 456 ~(RTE_MBUF_F_RX_VLAN | 457 RTE_MBUF_F_RX_VLAN_STRIPPED); 458 first_seg->vlan_tci = 0; 459 } 460 } 461 462 err_set: 463 rxq->cur++; 464 rxq->sw_ring[idx] = tmbuf; 465 desc->read.baddr = 466 rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf)); 467 memset((void *)(&desc->read.desc2), 0, 8); 468 AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1); 469 470 if (!eop) 471 goto next_desc; 472 eop = 0; 473 474 rxq->bytes += pkt_len; 475 476 first_seg->port = rxq->port_id; 477 if (rxq->pdata->rx_csum_enable) { 478 first_seg->ol_flags = 0; 479 first_seg->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 480 first_seg->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 481 if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) { 482 first_seg->ol_flags &= 483 ~RTE_MBUF_F_RX_IP_CKSUM_GOOD; 484 first_seg->ol_flags |= 485 RTE_MBUF_F_RX_IP_CKSUM_BAD; 486 first_seg->ol_flags &= 487 ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; 488 first_seg->ol_flags |= 489 RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; 490 } else if (unlikely(error_status 491 == AXGBE_L4_CSUM_ERR)) { 492 first_seg->ol_flags &= 493 ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; 494 first_seg->ol_flags |= 495 RTE_MBUF_F_RX_L4_CKSUM_BAD; 496 } 497 } 498 499 rx_pkts[nb_rx++] = first_seg; 500 501 /* Setup receipt context for a new packet.*/ 502 first_seg = NULL; 503 } 504 505 /* Check if we need to save state before leaving */ 506 if (first_seg != NULL && eop == 0) 507 rxq->saved_mbuf = first_seg; 508 509 /* Save receive context.*/ 510 rxq->pkts += nb_rx; 511 512 if (rxq->dirty != rxq->cur) { 513 rte_wmb(); 514 idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur - 1); 515 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, 516 low32_value(rxq->ring_phys_addr + 517 (idx * sizeof(union axgbe_rx_desc)))); 518 rxq->dirty = rxq->cur; 519 } 520 return nb_rx; 521 } 522 523 /* Tx Apis */ 524 static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue) 525 { 526 uint16_t i; 527 struct rte_mbuf **sw_ring; 528 529 if (tx_queue) { 530 sw_ring = tx_queue->sw_ring; 531 if (sw_ring) { 532 for (i = 0; i < tx_queue->nb_desc; i++) { 533 rte_pktmbuf_free(sw_ring[i]); 534 } 535 rte_free(sw_ring); 536 } 537 rte_free(tx_queue); 538 } 539 } 540 541 void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx) 542 { 543 axgbe_tx_queue_release(dev->data->tx_queues[queue_idx]); 544 } 545 546 int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 547 uint16_t nb_desc, unsigned int socket_id, 548 const struct rte_eth_txconf *tx_conf) 549 { 550 PMD_INIT_FUNC_TRACE(); 551 uint32_t tx_desc; 552 struct axgbe_port *pdata; 553 struct axgbe_tx_queue *txq; 554 unsigned int tsize; 555 const struct rte_memzone *tz; 556 uint64_t offloads; 557 struct rte_eth_dev_data *dev_data = dev->data; 558 559 tx_desc = nb_desc; 560 pdata = dev->data->dev_private; 561 562 /* 563 * validate tx descriptors count 564 * should be power of 2 and less than h/w supported 565 */ 566 if ((!rte_is_power_of_2(tx_desc)) || 567 tx_desc > pdata->tx_desc_count || 568 tx_desc < AXGBE_MIN_RING_DESC) 569 return -EINVAL; 570 571 /* First allocate the tx queue data structure */ 572 txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue), 573 RTE_CACHE_LINE_SIZE); 574 if (!txq) 575 return -ENOMEM; 576 txq->pdata = pdata; 577 offloads = tx_conf->offloads | 578 dev->data->dev_conf.txmode.offloads; 579 txq->nb_desc = tx_desc; 580 txq->free_thresh = tx_conf->tx_free_thresh ? 581 tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH; 582 if (txq->free_thresh > txq->nb_desc) 583 txq->free_thresh = (txq->nb_desc >> 1); 584 txq->free_batch_cnt = txq->free_thresh; 585 586 /* In vector_tx path threshold should be multiple of queue_size*/ 587 if (txq->nb_desc % txq->free_thresh != 0) 588 txq->vector_disable = 1; 589 590 if (offloads != 0) 591 txq->vector_disable = 1; 592 593 /* Allocate TX ring hardware descriptors */ 594 tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc); 595 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, 596 tsize, AXGBE_DESC_ALIGN, socket_id); 597 if (!tz) { 598 axgbe_tx_queue_release(txq); 599 return -ENOMEM; 600 } 601 memset(tz->addr, 0, tsize); 602 txq->ring_phys_addr = (uint64_t)tz->iova; 603 txq->desc = tz->addr; 604 txq->queue_id = queue_idx; 605 txq->port_id = dev->data->port_id; 606 txq->offloads = offloads; 607 txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE + 608 (DMA_CH_INC * txq->queue_id)); 609 txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs + 610 DMA_CH_TDTR_LO); 611 txq->cur = 0; 612 txq->dirty = 0; 613 txq->nb_desc_free = txq->nb_desc; 614 /* Allocate software ring */ 615 tsize = txq->nb_desc * sizeof(struct rte_mbuf *); 616 txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize, 617 RTE_CACHE_LINE_SIZE); 618 if (!txq->sw_ring) { 619 axgbe_tx_queue_release(txq); 620 return -ENOMEM; 621 } 622 dev->data->tx_queues[queue_idx] = txq; 623 if (!pdata->tx_queues) 624 pdata->tx_queues = dev->data->tx_queues; 625 626 if ((dev_data->dev_conf.txmode.offloads & 627 RTE_ETH_TX_OFFLOAD_MULTI_SEGS)) 628 pdata->multi_segs_tx = true; 629 630 631 return 0; 632 } 633 634 int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev, 635 char *fw_version, size_t fw_size) 636 { 637 struct axgbe_port *pdata; 638 struct axgbe_hw_features *hw_feat; 639 int ret; 640 641 pdata = (struct axgbe_port *)eth_dev->data->dev_private; 642 hw_feat = &pdata->hw_feat; 643 644 ret = snprintf(fw_version, fw_size, "%d.%d.%d", 645 AXGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER), 646 AXGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID), 647 AXGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER)); 648 if (ret < 0) 649 return -EINVAL; 650 651 ret += 1; /* add the size of '\0' */ 652 if (fw_size < (size_t)ret) 653 return ret; 654 else 655 return 0; 656 } 657 658 static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata, 659 unsigned int queue) 660 { 661 unsigned int tx_status; 662 unsigned long tx_timeout; 663 664 /* The Tx engine cannot be stopped if it is actively processing 665 * packets. Wait for the Tx queue to empty the Tx fifo. Don't 666 * wait forever though... 667 */ 668 tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * 669 rte_get_timer_hz()); 670 while (time_before(rte_get_timer_cycles(), tx_timeout)) { 671 tx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR); 672 if ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) && 673 (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0)) 674 break; 675 676 rte_delay_us(900); 677 } 678 679 if (!time_before(rte_get_timer_cycles(), tx_timeout)) 680 PMD_DRV_LOG_LINE(ERR, 681 "timed out waiting for Tx queue %u to empty", 682 queue); 683 } 684 685 static void axgbe_prepare_tx_stop(struct axgbe_port *pdata, 686 unsigned int queue) 687 { 688 unsigned int tx_dsr, tx_pos, tx_qidx; 689 unsigned int tx_status; 690 unsigned long tx_timeout; 691 692 if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20) 693 return axgbe_txq_prepare_tx_stop(pdata, queue); 694 695 /* Calculate the status register to read and the position within */ 696 if (queue < DMA_DSRX_FIRST_QUEUE) { 697 tx_dsr = DMA_DSR0; 698 tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START; 699 } else { 700 tx_qidx = queue - DMA_DSRX_FIRST_QUEUE; 701 702 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); 703 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + 704 DMA_DSRX_TPS_START; 705 } 706 707 /* The Tx engine cannot be stopped if it is actively processing 708 * descriptors. Wait for the Tx engine to enter the stopped or 709 * suspended state. Don't wait forever though... 710 */ 711 tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * 712 rte_get_timer_hz()); 713 while (time_before(rte_get_timer_cycles(), tx_timeout)) { 714 tx_status = AXGMAC_IOREAD(pdata, tx_dsr); 715 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); 716 if ((tx_status == DMA_TPS_STOPPED) || 717 (tx_status == DMA_TPS_SUSPENDED)) 718 break; 719 720 rte_delay_us(900); 721 } 722 723 if (!time_before(rte_get_timer_cycles(), tx_timeout)) 724 PMD_DRV_LOG_LINE(ERR, 725 "timed out waiting for Tx DMA channel %u to stop", 726 queue); 727 } 728 729 void axgbe_dev_disable_tx(struct rte_eth_dev *dev) 730 { 731 struct axgbe_tx_queue *txq; 732 struct axgbe_port *pdata = dev->data->dev_private; 733 unsigned int i; 734 735 /* Prepare for stopping DMA channel */ 736 for (i = 0; i < pdata->tx_q_count; i++) { 737 txq = dev->data->tx_queues[i]; 738 axgbe_prepare_tx_stop(pdata, i); 739 } 740 /* Disable MAC Tx */ 741 AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 742 /* Disable each Tx queue*/ 743 for (i = 0; i < pdata->tx_q_count; i++) 744 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 745 0); 746 /* Disable each Tx DMA channel */ 747 for (i = 0; i < dev->data->nb_tx_queues; i++) { 748 txq = dev->data->tx_queues[i]; 749 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0); 750 } 751 } 752 753 void axgbe_dev_enable_tx(struct rte_eth_dev *dev) 754 { 755 struct axgbe_tx_queue *txq; 756 struct axgbe_port *pdata = dev->data->dev_private; 757 unsigned int i; 758 759 for (i = 0; i < dev->data->nb_tx_queues; i++) { 760 txq = dev->data->tx_queues[i]; 761 /* Enable Tx DMA channel */ 762 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1); 763 } 764 /* Enable Tx queue*/ 765 for (i = 0; i < pdata->tx_q_count; i++) 766 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 767 MTL_Q_ENABLED); 768 /* Enable MAC Tx */ 769 AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 770 } 771 772 /* Free Tx conformed mbufs segments */ 773 static void 774 axgbe_xmit_cleanup_seg(struct axgbe_tx_queue *txq) 775 { 776 volatile struct axgbe_tx_desc *desc; 777 uint16_t idx; 778 779 idx = AXGBE_GET_DESC_IDX(txq, txq->dirty); 780 while (txq->cur != txq->dirty) { 781 if (unlikely(idx == txq->nb_desc)) 782 idx = 0; 783 desc = &txq->desc[idx]; 784 /* Check for ownership */ 785 if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN)) 786 return; 787 memset((void *)&desc->desc2, 0, 8); 788 /* Free mbuf */ 789 rte_pktmbuf_free_seg(txq->sw_ring[idx]); 790 txq->sw_ring[idx++] = NULL; 791 txq->dirty++; 792 } 793 } 794 795 /* Free Tx conformed mbufs */ 796 static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq) 797 { 798 volatile struct axgbe_tx_desc *desc; 799 uint16_t idx; 800 801 idx = AXGBE_GET_DESC_IDX(txq, txq->dirty); 802 while (txq->cur != txq->dirty) { 803 if (unlikely(idx == txq->nb_desc)) 804 idx = 0; 805 desc = &txq->desc[idx]; 806 /* Check for ownership */ 807 if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN)) 808 return; 809 memset((void *)&desc->desc2, 0, 8); 810 /* Free mbuf */ 811 rte_pktmbuf_free(txq->sw_ring[idx]); 812 txq->sw_ring[idx++] = NULL; 813 txq->dirty++; 814 } 815 } 816 817 /* Tx Descriptor formation 818 * Considering each mbuf requires one desc 819 * mbuf is linear 820 */ 821 static int axgbe_xmit_hw(struct axgbe_tx_queue *txq, 822 struct rte_mbuf *mbuf) 823 { 824 volatile struct axgbe_tx_desc *desc; 825 uint16_t idx; 826 uint64_t mask; 827 828 idx = AXGBE_GET_DESC_IDX(txq, txq->cur); 829 desc = &txq->desc[idx]; 830 831 /* Update buffer address and length */ 832 desc->baddr = rte_mbuf_data_iova(mbuf); 833 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L, 834 mbuf->pkt_len); 835 /* Total msg length to transmit */ 836 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL, 837 mbuf->pkt_len); 838 /* Timestamp enablement check */ 839 if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) 840 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1); 841 rte_wmb(); 842 /* Mark it as First and Last Descriptor */ 843 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1); 844 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1); 845 /* Mark it as a NORMAL descriptor */ 846 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0); 847 /* configure h/w Offload */ 848 mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK; 849 if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM) 850 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3); 851 else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) 852 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1); 853 rte_wmb(); 854 855 if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { 856 /* Mark it as a CONTEXT descriptor */ 857 AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 858 CTXT, 1); 859 /* Set the VLAN tag */ 860 AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 861 VT, mbuf->vlan_tci); 862 /* Indicate this descriptor contains the VLAN tag */ 863 AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 864 VLTV, 1); 865 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 866 TX_NORMAL_DESC2_VLAN_INSERT); 867 } else { 868 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0); 869 } 870 rte_wmb(); 871 872 /* Set OWN bit */ 873 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); 874 rte_wmb(); 875 876 877 /* Save mbuf */ 878 txq->sw_ring[idx] = mbuf; 879 /* Update current index*/ 880 txq->cur++; 881 /* Update stats */ 882 txq->bytes += mbuf->pkt_len; 883 884 return 0; 885 } 886 887 /* Tx Descriptor formation for segmented mbuf 888 * Each mbuf will require multiple descriptors 889 */ 890 891 static int 892 axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq, 893 struct rte_mbuf *mbuf) 894 { 895 volatile struct axgbe_tx_desc *desc; 896 uint16_t idx; 897 uint64_t mask; 898 int start_index; 899 uint32_t pkt_len = 0; 900 int nb_desc_free; 901 struct rte_mbuf *tx_pkt; 902 903 nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); 904 905 if (mbuf->nb_segs > nb_desc_free) { 906 axgbe_xmit_cleanup_seg(txq); 907 nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); 908 if (unlikely(mbuf->nb_segs > nb_desc_free)) 909 return RTE_ETH_TX_DESC_UNAVAIL; 910 } 911 912 idx = AXGBE_GET_DESC_IDX(txq, txq->cur); 913 desc = &txq->desc[idx]; 914 /* Saving the start index for setting the OWN bit finally */ 915 start_index = idx; 916 917 tx_pkt = mbuf; 918 /* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */ 919 pkt_len = tx_pkt->pkt_len; 920 921 /* Update buffer address and length */ 922 desc->baddr = rte_mbuf_data_iova(tx_pkt); 923 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L, 924 tx_pkt->data_len); 925 /* Total msg length to transmit */ 926 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL, 927 tx_pkt->pkt_len); 928 /* Timestamp enablement check */ 929 if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) 930 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1); 931 932 rte_wmb(); 933 /* Mark it as First Descriptor */ 934 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1); 935 /* Mark it as a NORMAL descriptor */ 936 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0); 937 /* configure h/w Offload */ 938 mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK; 939 if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM) 940 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3); 941 else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) 942 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1); 943 rte_wmb(); 944 945 if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { 946 /* Mark it as a CONTEXT descriptor */ 947 AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 948 CTXT, 1); 949 /* Set the VLAN tag */ 950 AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 951 VT, mbuf->vlan_tci); 952 /* Indicate this descriptor contains the VLAN tag */ 953 AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 954 VLTV, 1); 955 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 956 TX_NORMAL_DESC2_VLAN_INSERT); 957 } else { 958 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0); 959 } 960 rte_wmb(); 961 962 /* Save mbuf */ 963 txq->sw_ring[idx] = tx_pkt; 964 /* Update current index*/ 965 txq->cur++; 966 967 tx_pkt = tx_pkt->next; 968 969 while (tx_pkt != NULL) { 970 idx = AXGBE_GET_DESC_IDX(txq, txq->cur); 971 desc = &txq->desc[idx]; 972 973 /* Update buffer address and length */ 974 desc->baddr = rte_mbuf_data_iova(tx_pkt); 975 976 AXGMAC_SET_BITS_LE(desc->desc2, 977 TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len); 978 979 rte_wmb(); 980 981 /* Mark it as a NORMAL descriptor */ 982 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0); 983 /* configure h/w Offload */ 984 mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK; 985 if (mask == RTE_MBUF_F_TX_TCP_CKSUM || 986 mask == RTE_MBUF_F_TX_UDP_CKSUM) 987 AXGMAC_SET_BITS_LE(desc->desc3, 988 TX_NORMAL_DESC3, CIC, 0x3); 989 else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) 990 AXGMAC_SET_BITS_LE(desc->desc3, 991 TX_NORMAL_DESC3, CIC, 0x1); 992 993 rte_wmb(); 994 995 /* Set OWN bit */ 996 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); 997 rte_wmb(); 998 999 /* Save mbuf */ 1000 txq->sw_ring[idx] = tx_pkt; 1001 /* Update current index*/ 1002 txq->cur++; 1003 1004 tx_pkt = tx_pkt->next; 1005 } 1006 1007 /* Set LD bit for the last descriptor */ 1008 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1); 1009 rte_wmb(); 1010 1011 /* Update stats */ 1012 txq->bytes += pkt_len; 1013 1014 /* Set OWN bit for the first descriptor */ 1015 desc = &txq->desc[start_index]; 1016 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); 1017 rte_wmb(); 1018 1019 return 0; 1020 } 1021 1022 /* Eal supported tx wrapper- Segmented*/ 1023 uint16_t 1024 axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts, 1025 uint16_t nb_pkts) 1026 { 1027 PMD_INIT_FUNC_TRACE(); 1028 1029 struct axgbe_tx_queue *txq; 1030 uint16_t nb_desc_free; 1031 uint16_t nb_pkt_sent = 0; 1032 uint16_t idx; 1033 uint32_t tail_addr; 1034 struct rte_mbuf *mbuf = NULL; 1035 1036 if (unlikely(nb_pkts == 0)) 1037 return nb_pkts; 1038 1039 txq = (struct axgbe_tx_queue *)tx_queue; 1040 1041 nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); 1042 if (unlikely(nb_desc_free <= txq->free_thresh)) { 1043 axgbe_xmit_cleanup_seg(txq); 1044 nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); 1045 if (unlikely(nb_desc_free == 0)) 1046 return 0; 1047 } 1048 1049 while (nb_pkts--) { 1050 mbuf = *tx_pkts++; 1051 1052 if (axgbe_xmit_hw_seg(txq, mbuf)) 1053 goto out; 1054 nb_pkt_sent++; 1055 } 1056 out: 1057 /* Sync read and write */ 1058 rte_mb(); 1059 idx = AXGBE_GET_DESC_IDX(txq, txq->cur); 1060 tail_addr = low32_value(txq->ring_phys_addr + 1061 idx * sizeof(struct axgbe_tx_desc)); 1062 /* Update tail reg with next immediate address to kick Tx DMA channel*/ 1063 AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr); 1064 txq->pkts += nb_pkt_sent; 1065 return nb_pkt_sent; 1066 } 1067 1068 /* Eal supported tx wrapper*/ 1069 uint16_t 1070 axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 1071 uint16_t nb_pkts) 1072 { 1073 PMD_INIT_FUNC_TRACE(); 1074 1075 if (unlikely(nb_pkts == 0)) 1076 return nb_pkts; 1077 1078 struct axgbe_tx_queue *txq; 1079 uint16_t nb_desc_free; 1080 uint16_t nb_pkt_sent = 0; 1081 uint16_t idx; 1082 uint32_t tail_addr; 1083 struct rte_mbuf *mbuf; 1084 1085 txq = (struct axgbe_tx_queue *)tx_queue; 1086 nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); 1087 1088 if (unlikely(nb_desc_free <= txq->free_thresh)) { 1089 axgbe_xmit_cleanup(txq); 1090 nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); 1091 if (unlikely(nb_desc_free == 0)) 1092 return 0; 1093 } 1094 nb_pkts = RTE_MIN(nb_desc_free, nb_pkts); 1095 while (nb_pkts--) { 1096 mbuf = *tx_pkts++; 1097 if (axgbe_xmit_hw(txq, mbuf)) 1098 goto out; 1099 nb_pkt_sent++; 1100 } 1101 out: 1102 /* Sync read and write */ 1103 rte_mb(); 1104 idx = AXGBE_GET_DESC_IDX(txq, txq->cur); 1105 tail_addr = low32_value(txq->ring_phys_addr + 1106 idx * sizeof(struct axgbe_tx_desc)); 1107 /* Update tail reg with next immediate address to kick Tx DMA channel*/ 1108 AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr); 1109 txq->pkts += nb_pkt_sent; 1110 return nb_pkt_sent; 1111 } 1112 1113 void axgbe_dev_clear_queues(struct rte_eth_dev *dev) 1114 { 1115 PMD_INIT_FUNC_TRACE(); 1116 uint8_t i; 1117 struct axgbe_rx_queue *rxq; 1118 struct axgbe_tx_queue *txq; 1119 1120 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1121 rxq = dev->data->rx_queues[i]; 1122 1123 if (rxq) { 1124 axgbe_rx_queue_release(rxq); 1125 dev->data->rx_queues[i] = NULL; 1126 } 1127 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 1128 } 1129 1130 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1131 txq = dev->data->tx_queues[i]; 1132 1133 if (txq) { 1134 axgbe_tx_queue_release(txq); 1135 dev->data->tx_queues[i] = NULL; 1136 } 1137 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 1138 } 1139 } 1140 1141 int 1142 axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) 1143 { 1144 struct axgbe_rx_queue *rxq = rx_queue; 1145 volatile union axgbe_rx_desc *desc; 1146 uint16_t idx; 1147 1148 1149 if (unlikely(offset >= rxq->nb_desc)) 1150 return -EINVAL; 1151 1152 if (offset >= rxq->nb_desc - rxq->dirty) 1153 return RTE_ETH_RX_DESC_UNAVAIL; 1154 1155 idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 1156 desc = &rxq->desc[idx + offset]; 1157 1158 if (!AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) 1159 return RTE_ETH_RX_DESC_DONE; 1160 1161 return RTE_ETH_RX_DESC_AVAIL; 1162 } 1163 1164 int 1165 axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) 1166 { 1167 struct axgbe_tx_queue *txq = tx_queue; 1168 volatile struct axgbe_tx_desc *desc; 1169 uint16_t idx; 1170 1171 1172 if (unlikely(offset >= txq->nb_desc)) 1173 return -EINVAL; 1174 1175 if (offset >= txq->nb_desc - txq->dirty) 1176 return RTE_ETH_TX_DESC_UNAVAIL; 1177 1178 idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt - 1); 1179 desc = &txq->desc[idx + offset]; 1180 1181 if (!AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN)) 1182 return RTE_ETH_TX_DESC_DONE; 1183 1184 return RTE_ETH_TX_DESC_FULL; 1185 } 1186