1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved. 4 */ 5 6 #include "axgbe_ethdev.h" 7 #include "axgbe_rxtx.h" 8 #include "axgbe_phy.h" 9 10 #include <rte_time.h> 11 #include <rte_mempool.h> 12 #include <rte_mbuf.h> 13 #include <rte_vect.h> 14 15 static void 16 axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue) 17 { 18 uint16_t i; 19 struct rte_mbuf **sw_ring; 20 21 if (rx_queue) { 22 sw_ring = rx_queue->sw_ring; 23 if (sw_ring) { 24 for (i = 0; i < rx_queue->nb_desc; i++) { 25 if (sw_ring[i]) 26 rte_pktmbuf_free(sw_ring[i]); 27 } 28 rte_free(sw_ring); 29 } 30 rte_free(rx_queue); 31 } 32 } 33 34 void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx) 35 { 36 axgbe_rx_queue_release(dev->data->rx_queues[queue_idx]); 37 } 38 39 int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 40 uint16_t nb_desc, unsigned int socket_id, 41 const struct rte_eth_rxconf *rx_conf, 42 struct rte_mempool *mp) 43 { 44 PMD_INIT_FUNC_TRACE(); 45 uint32_t size; 46 const struct rte_memzone *dma; 47 struct axgbe_rx_queue *rxq; 48 uint32_t rx_desc = nb_desc; 49 struct axgbe_port *pdata = dev->data->dev_private; 50 51 /* 52 * validate Rx descriptors count 53 * should be power of 2 and less than h/w supported 54 */ 55 if ((!rte_is_power_of_2(rx_desc)) || 56 rx_desc > pdata->rx_desc_count) 57 return -EINVAL; 58 /* First allocate the rx queue data structure */ 59 rxq = rte_zmalloc_socket("ethdev RX queue", 60 sizeof(struct axgbe_rx_queue), 61 RTE_CACHE_LINE_SIZE, socket_id); 62 if (!rxq) { 63 PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!"); 64 return -ENOMEM; 65 } 66 67 rxq->cur = 0; 68 rxq->dirty = 0; 69 rxq->pdata = pdata; 70 rxq->mb_pool = mp; 71 rxq->queue_id = queue_idx; 72 rxq->port_id = dev->data->port_id; 73 rxq->nb_desc = rx_desc; 74 rxq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE + 75 (DMA_CH_INC * rxq->queue_id)); 76 rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs + 77 DMA_CH_RDTR_LO); 78 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) 79 rxq->crc_len = RTE_ETHER_CRC_LEN; 80 else 81 rxq->crc_len = 0; 82 83 /* CRC strip in AXGBE supports per port not per queue */ 84 pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0; 85 rxq->free_thresh = rx_conf->rx_free_thresh ? 86 rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH; 87 if (rxq->free_thresh > rxq->nb_desc) 88 rxq->free_thresh = rxq->nb_desc >> 3; 89 90 /* Allocate RX ring hardware descriptors */ 91 size = rxq->nb_desc * sizeof(union axgbe_rx_desc); 92 dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128, 93 socket_id); 94 if (!dma) { 95 PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed\n"); 96 axgbe_rx_queue_release(rxq); 97 return -ENOMEM; 98 } 99 rxq->ring_phys_addr = (uint64_t)dma->iova; 100 rxq->desc = (volatile union axgbe_rx_desc *)dma->addr; 101 memset((void *)rxq->desc, 0, size); 102 /* Allocate software ring */ 103 size = rxq->nb_desc * sizeof(struct rte_mbuf *); 104 rxq->sw_ring = rte_zmalloc_socket("sw_ring", size, 105 RTE_CACHE_LINE_SIZE, 106 socket_id); 107 if (!rxq->sw_ring) { 108 PMD_DRV_LOG(ERR, "rte_zmalloc for sw_ring failed\n"); 109 axgbe_rx_queue_release(rxq); 110 return -ENOMEM; 111 } 112 dev->data->rx_queues[queue_idx] = rxq; 113 if (!pdata->rx_queues) 114 pdata->rx_queues = dev->data->rx_queues; 115 116 return 0; 117 } 118 119 static void axgbe_prepare_rx_stop(struct axgbe_port *pdata, 120 unsigned int queue) 121 { 122 unsigned int rx_status; 123 unsigned long rx_timeout; 124 125 /* The Rx engine cannot be stopped if it is actively processing 126 * packets. Wait for the Rx queue to empty the Rx fifo. Don't 127 * wait forever though... 128 */ 129 rx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * 130 rte_get_timer_hz()); 131 132 while (time_before(rte_get_timer_cycles(), rx_timeout)) { 133 rx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); 134 if ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && 135 (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) 136 break; 137 138 rte_delay_us(900); 139 } 140 141 if (!time_before(rte_get_timer_cycles(), rx_timeout)) 142 PMD_DRV_LOG(ERR, 143 "timed out waiting for Rx queue %u to empty\n", 144 queue); 145 } 146 147 void axgbe_dev_disable_rx(struct rte_eth_dev *dev) 148 { 149 struct axgbe_rx_queue *rxq; 150 struct axgbe_port *pdata = dev->data->dev_private; 151 unsigned int i; 152 153 /* Disable MAC Rx */ 154 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); 155 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); 156 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); 157 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); 158 159 /* Prepare for Rx DMA channel stop */ 160 for (i = 0; i < dev->data->nb_rx_queues; i++) { 161 rxq = dev->data->rx_queues[i]; 162 axgbe_prepare_rx_stop(pdata, i); 163 } 164 /* Disable each Rx queue */ 165 AXGMAC_IOWRITE(pdata, MAC_RQC0R, 0); 166 for (i = 0; i < dev->data->nb_rx_queues; i++) { 167 rxq = dev->data->rx_queues[i]; 168 /* Disable Rx DMA channel */ 169 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0); 170 } 171 } 172 173 void axgbe_dev_enable_rx(struct rte_eth_dev *dev) 174 { 175 struct axgbe_rx_queue *rxq; 176 struct axgbe_port *pdata = dev->data->dev_private; 177 unsigned int i; 178 unsigned int reg_val = 0; 179 180 for (i = 0; i < dev->data->nb_rx_queues; i++) { 181 rxq = dev->data->rx_queues[i]; 182 /* Enable Rx DMA channel */ 183 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1); 184 } 185 186 reg_val = 0; 187 for (i = 0; i < pdata->rx_q_count; i++) 188 reg_val |= (0x02 << (i << 1)); 189 AXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); 190 191 /* Enable MAC Rx */ 192 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); 193 /* Frame is forwarded after stripping CRC to application*/ 194 if (pdata->crc_strip_enable) { 195 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); 196 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); 197 } 198 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); 199 } 200 201 /* Rx function one to one refresh */ 202 uint16_t 203 axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 204 uint16_t nb_pkts) 205 { 206 PMD_INIT_FUNC_TRACE(); 207 uint16_t nb_rx = 0; 208 struct axgbe_rx_queue *rxq = rx_queue; 209 volatile union axgbe_rx_desc *desc; 210 uint64_t old_dirty = rxq->dirty; 211 struct rte_mbuf *mbuf, *tmbuf; 212 unsigned int err, etlt; 213 uint32_t error_status; 214 uint16_t idx, pidx, pkt_len; 215 uint64_t offloads; 216 217 idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 218 while (nb_rx < nb_pkts) { 219 if (unlikely(idx == rxq->nb_desc)) 220 idx = 0; 221 222 desc = &rxq->desc[idx]; 223 224 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) 225 break; 226 tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool); 227 if (unlikely(!tmbuf)) { 228 PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u" 229 " queue_id = %u\n", 230 (unsigned int)rxq->port_id, 231 (unsigned int)rxq->queue_id); 232 rte_eth_devices[ 233 rxq->port_id].data->rx_mbuf_alloc_failed++; 234 rxq->rx_mbuf_alloc_failed++; 235 break; 236 } 237 pidx = idx + 1; 238 if (unlikely(pidx == rxq->nb_desc)) 239 pidx = 0; 240 241 rte_prefetch0(rxq->sw_ring[pidx]); 242 if ((pidx & 0x3) == 0) { 243 rte_prefetch0(&rxq->desc[pidx]); 244 rte_prefetch0(&rxq->sw_ring[pidx]); 245 } 246 247 mbuf = rxq->sw_ring[idx]; 248 /* Check for any errors and free mbuf*/ 249 err = AXGMAC_GET_BITS_LE(desc->write.desc3, 250 RX_NORMAL_DESC3, ES); 251 error_status = 0; 252 if (unlikely(err)) { 253 error_status = desc->write.desc3 & AXGBE_ERR_STATUS; 254 if ((error_status != AXGBE_L3_CSUM_ERR) && 255 (error_status != AXGBE_L4_CSUM_ERR)) { 256 rxq->errors++; 257 rte_pktmbuf_free(mbuf); 258 goto err_set; 259 } 260 } 261 if (rxq->pdata->rx_csum_enable) { 262 mbuf->ol_flags = 0; 263 mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; 264 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; 265 if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) { 266 mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD; 267 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; 268 mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD; 269 mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; 270 } else if ( 271 unlikely(error_status == AXGBE_L4_CSUM_ERR)) { 272 mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD; 273 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; 274 } 275 } 276 rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *)); 277 /* Get the RSS hash */ 278 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV)) 279 mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1); 280 etlt = AXGMAC_GET_BITS_LE(desc->write.desc3, 281 RX_NORMAL_DESC3, ETLT); 282 offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads; 283 if (!err || !etlt) { 284 if (etlt == RX_CVLAN_TAG_PRESENT) { 285 mbuf->ol_flags |= PKT_RX_VLAN; 286 mbuf->vlan_tci = 287 AXGMAC_GET_BITS_LE(desc->write.desc0, 288 RX_NORMAL_DESC0, OVT); 289 if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 290 mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED; 291 else 292 mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED; 293 } else { 294 mbuf->ol_flags &= 295 ~(PKT_RX_VLAN 296 | PKT_RX_VLAN_STRIPPED); 297 mbuf->vlan_tci = 0; 298 } 299 } 300 /* Indicate if a Context Descriptor is next */ 301 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CDA)) 302 mbuf->ol_flags |= PKT_RX_IEEE1588_PTP 303 | PKT_RX_IEEE1588_TMST; 304 pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, 305 PL) - rxq->crc_len; 306 /* Mbuf populate */ 307 mbuf->next = NULL; 308 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 309 mbuf->nb_segs = 1; 310 mbuf->port = rxq->port_id; 311 mbuf->pkt_len = pkt_len; 312 mbuf->data_len = pkt_len; 313 rxq->bytes += pkt_len; 314 rx_pkts[nb_rx++] = mbuf; 315 err_set: 316 rxq->cur++; 317 rxq->sw_ring[idx++] = tmbuf; 318 desc->read.baddr = 319 rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf)); 320 memset((void *)(&desc->read.desc2), 0, 8); 321 AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1); 322 rxq->dirty++; 323 } 324 rxq->pkts += nb_rx; 325 if (rxq->dirty != old_dirty) { 326 rte_wmb(); 327 idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1); 328 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, 329 low32_value(rxq->ring_phys_addr + 330 (idx * sizeof(union axgbe_rx_desc)))); 331 } 332 333 return nb_rx; 334 } 335 336 337 uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue, 338 struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 339 { 340 PMD_INIT_FUNC_TRACE(); 341 uint16_t nb_rx = 0; 342 struct axgbe_rx_queue *rxq = rx_queue; 343 volatile union axgbe_rx_desc *desc; 344 345 uint64_t old_dirty = rxq->dirty; 346 struct rte_mbuf *first_seg = NULL; 347 struct rte_mbuf *mbuf, *tmbuf; 348 unsigned int err, etlt; 349 uint32_t error_status; 350 uint16_t idx, pidx, data_len = 0, pkt_len = 0; 351 uint64_t offloads; 352 353 idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 354 while (nb_rx < nb_pkts) { 355 bool eop = 0; 356 next_desc: 357 if (unlikely(idx == rxq->nb_desc)) 358 idx = 0; 359 360 desc = &rxq->desc[idx]; 361 362 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) 363 break; 364 365 tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool); 366 if (unlikely(!tmbuf)) { 367 PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u" 368 " queue_id = %u\n", 369 (unsigned int)rxq->port_id, 370 (unsigned int)rxq->queue_id); 371 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; 372 break; 373 } 374 375 pidx = idx + 1; 376 if (unlikely(pidx == rxq->nb_desc)) 377 pidx = 0; 378 379 rte_prefetch0(rxq->sw_ring[pidx]); 380 if ((pidx & 0x3) == 0) { 381 rte_prefetch0(&rxq->desc[pidx]); 382 rte_prefetch0(&rxq->sw_ring[pidx]); 383 } 384 385 mbuf = rxq->sw_ring[idx]; 386 /* Check for any errors and free mbuf*/ 387 err = AXGMAC_GET_BITS_LE(desc->write.desc3, 388 RX_NORMAL_DESC3, ES); 389 error_status = 0; 390 if (unlikely(err)) { 391 error_status = desc->write.desc3 & AXGBE_ERR_STATUS; 392 if ((error_status != AXGBE_L3_CSUM_ERR) 393 && (error_status != AXGBE_L4_CSUM_ERR)) { 394 rxq->errors++; 395 rte_pktmbuf_free(mbuf); 396 goto err_set; 397 } 398 } 399 rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *)); 400 401 if (!AXGMAC_GET_BITS_LE(desc->write.desc3, 402 RX_NORMAL_DESC3, LD)) { 403 eop = 0; 404 pkt_len = rxq->buf_size; 405 data_len = pkt_len; 406 } else { 407 eop = 1; 408 pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, 409 RX_NORMAL_DESC3, PL); 410 data_len = pkt_len - rxq->crc_len; 411 } 412 413 if (first_seg != NULL) { 414 if (rte_pktmbuf_chain(first_seg, mbuf) != 0) 415 rte_mempool_put(rxq->mb_pool, 416 first_seg); 417 } else { 418 first_seg = mbuf; 419 } 420 421 /* Get the RSS hash */ 422 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV)) 423 mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1); 424 etlt = AXGMAC_GET_BITS_LE(desc->write.desc3, 425 RX_NORMAL_DESC3, ETLT); 426 offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads; 427 if (!err || !etlt) { 428 if (etlt == RX_CVLAN_TAG_PRESENT) { 429 mbuf->ol_flags |= PKT_RX_VLAN; 430 mbuf->vlan_tci = 431 AXGMAC_GET_BITS_LE(desc->write.desc0, 432 RX_NORMAL_DESC0, OVT); 433 if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 434 mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED; 435 else 436 mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED; 437 } else { 438 mbuf->ol_flags &= 439 ~(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED); 440 mbuf->vlan_tci = 0; 441 } 442 } 443 /* Mbuf populate */ 444 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 445 mbuf->data_len = data_len; 446 447 err_set: 448 rxq->cur++; 449 rxq->sw_ring[idx++] = tmbuf; 450 desc->read.baddr = 451 rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf)); 452 memset((void *)(&desc->read.desc2), 0, 8); 453 AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1); 454 rxq->dirty++; 455 456 if (!eop) { 457 rte_pktmbuf_free(mbuf); 458 goto next_desc; 459 } 460 461 first_seg->pkt_len = pkt_len; 462 rxq->bytes += pkt_len; 463 mbuf->next = NULL; 464 465 first_seg->port = rxq->port_id; 466 if (rxq->pdata->rx_csum_enable) { 467 mbuf->ol_flags = 0; 468 mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; 469 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; 470 if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) { 471 mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD; 472 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; 473 mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD; 474 mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; 475 } else if (unlikely(error_status 476 == AXGBE_L4_CSUM_ERR)) { 477 mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD; 478 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; 479 } 480 } 481 482 rx_pkts[nb_rx++] = first_seg; 483 484 /* Setup receipt context for a new packet.*/ 485 first_seg = NULL; 486 } 487 488 /* Save receive context.*/ 489 rxq->pkts += nb_rx; 490 491 if (rxq->dirty != old_dirty) { 492 rte_wmb(); 493 idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1); 494 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, 495 low32_value(rxq->ring_phys_addr + 496 (idx * sizeof(union axgbe_rx_desc)))); 497 } 498 return nb_rx; 499 } 500 501 /* Tx Apis */ 502 static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue) 503 { 504 uint16_t i; 505 struct rte_mbuf **sw_ring; 506 507 if (tx_queue) { 508 sw_ring = tx_queue->sw_ring; 509 if (sw_ring) { 510 for (i = 0; i < tx_queue->nb_desc; i++) { 511 if (sw_ring[i]) 512 rte_pktmbuf_free(sw_ring[i]); 513 } 514 rte_free(sw_ring); 515 } 516 rte_free(tx_queue); 517 } 518 } 519 520 void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx) 521 { 522 axgbe_tx_queue_release(dev->data->tx_queues[queue_idx]); 523 } 524 525 int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 526 uint16_t nb_desc, unsigned int socket_id, 527 const struct rte_eth_txconf *tx_conf) 528 { 529 PMD_INIT_FUNC_TRACE(); 530 uint32_t tx_desc; 531 struct axgbe_port *pdata; 532 struct axgbe_tx_queue *txq; 533 unsigned int tsize; 534 const struct rte_memzone *tz; 535 uint64_t offloads; 536 537 tx_desc = nb_desc; 538 pdata = dev->data->dev_private; 539 540 /* 541 * validate tx descriptors count 542 * should be power of 2 and less than h/w supported 543 */ 544 if ((!rte_is_power_of_2(tx_desc)) || 545 tx_desc > pdata->tx_desc_count || 546 tx_desc < AXGBE_MIN_RING_DESC) 547 return -EINVAL; 548 549 /* First allocate the tx queue data structure */ 550 txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue), 551 RTE_CACHE_LINE_SIZE); 552 if (!txq) 553 return -ENOMEM; 554 txq->pdata = pdata; 555 offloads = tx_conf->offloads | 556 txq->pdata->eth_dev->data->dev_conf.txmode.offloads; 557 txq->nb_desc = tx_desc; 558 txq->free_thresh = tx_conf->tx_free_thresh ? 559 tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH; 560 if (txq->free_thresh > txq->nb_desc) 561 txq->free_thresh = (txq->nb_desc >> 1); 562 txq->free_batch_cnt = txq->free_thresh; 563 564 /* In vector_tx path threshold should be multiple of queue_size*/ 565 if (txq->nb_desc % txq->free_thresh != 0) 566 txq->vector_disable = 1; 567 568 if (offloads != 0) 569 txq->vector_disable = 1; 570 571 /* Allocate TX ring hardware descriptors */ 572 tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc); 573 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, 574 tsize, AXGBE_DESC_ALIGN, socket_id); 575 if (!tz) { 576 axgbe_tx_queue_release(txq); 577 return -ENOMEM; 578 } 579 memset(tz->addr, 0, tsize); 580 txq->ring_phys_addr = (uint64_t)tz->iova; 581 txq->desc = tz->addr; 582 txq->queue_id = queue_idx; 583 txq->port_id = dev->data->port_id; 584 txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE + 585 (DMA_CH_INC * txq->queue_id)); 586 txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs + 587 DMA_CH_TDTR_LO); 588 txq->cur = 0; 589 txq->dirty = 0; 590 txq->nb_desc_free = txq->nb_desc; 591 /* Allocate software ring */ 592 tsize = txq->nb_desc * sizeof(struct rte_mbuf *); 593 txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize, 594 RTE_CACHE_LINE_SIZE); 595 if (!txq->sw_ring) { 596 axgbe_tx_queue_release(txq); 597 return -ENOMEM; 598 } 599 dev->data->tx_queues[queue_idx] = txq; 600 if (!pdata->tx_queues) 601 pdata->tx_queues = dev->data->tx_queues; 602 603 if (txq->vector_disable || 604 rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) 605 dev->tx_pkt_burst = &axgbe_xmit_pkts; 606 else 607 #ifdef RTE_ARCH_X86 608 dev->tx_pkt_burst = &axgbe_xmit_pkts_vec; 609 #else 610 dev->tx_pkt_burst = &axgbe_xmit_pkts; 611 #endif 612 613 return 0; 614 } 615 616 int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev, 617 char *fw_version, size_t fw_size) 618 { 619 struct axgbe_port *pdata; 620 struct axgbe_hw_features *hw_feat; 621 int ret; 622 623 pdata = (struct axgbe_port *)eth_dev->data->dev_private; 624 hw_feat = &pdata->hw_feat; 625 626 ret = snprintf(fw_version, fw_size, "%d.%d.%d", 627 AXGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER), 628 AXGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID), 629 AXGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER)); 630 if (ret < 0) 631 return -EINVAL; 632 633 ret += 1; /* add the size of '\0' */ 634 if (fw_size < (size_t)ret) 635 return ret; 636 else 637 return 0; 638 } 639 640 static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata, 641 unsigned int queue) 642 { 643 unsigned int tx_status; 644 unsigned long tx_timeout; 645 646 /* The Tx engine cannot be stopped if it is actively processing 647 * packets. Wait for the Tx queue to empty the Tx fifo. Don't 648 * wait forever though... 649 */ 650 tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * 651 rte_get_timer_hz()); 652 while (time_before(rte_get_timer_cycles(), tx_timeout)) { 653 tx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR); 654 if ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) && 655 (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0)) 656 break; 657 658 rte_delay_us(900); 659 } 660 661 if (!time_before(rte_get_timer_cycles(), tx_timeout)) 662 PMD_DRV_LOG(ERR, 663 "timed out waiting for Tx queue %u to empty\n", 664 queue); 665 } 666 667 static void axgbe_prepare_tx_stop(struct axgbe_port *pdata, 668 unsigned int queue) 669 { 670 unsigned int tx_dsr, tx_pos, tx_qidx; 671 unsigned int tx_status; 672 unsigned long tx_timeout; 673 674 if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20) 675 return axgbe_txq_prepare_tx_stop(pdata, queue); 676 677 /* Calculate the status register to read and the position within */ 678 if (queue < DMA_DSRX_FIRST_QUEUE) { 679 tx_dsr = DMA_DSR0; 680 tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START; 681 } else { 682 tx_qidx = queue - DMA_DSRX_FIRST_QUEUE; 683 684 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); 685 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + 686 DMA_DSRX_TPS_START; 687 } 688 689 /* The Tx engine cannot be stopped if it is actively processing 690 * descriptors. Wait for the Tx engine to enter the stopped or 691 * suspended state. Don't wait forever though... 692 */ 693 tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * 694 rte_get_timer_hz()); 695 while (time_before(rte_get_timer_cycles(), tx_timeout)) { 696 tx_status = AXGMAC_IOREAD(pdata, tx_dsr); 697 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); 698 if ((tx_status == DMA_TPS_STOPPED) || 699 (tx_status == DMA_TPS_SUSPENDED)) 700 break; 701 702 rte_delay_us(900); 703 } 704 705 if (!time_before(rte_get_timer_cycles(), tx_timeout)) 706 PMD_DRV_LOG(ERR, 707 "timed out waiting for Tx DMA channel %u to stop\n", 708 queue); 709 } 710 711 void axgbe_dev_disable_tx(struct rte_eth_dev *dev) 712 { 713 struct axgbe_tx_queue *txq; 714 struct axgbe_port *pdata = dev->data->dev_private; 715 unsigned int i; 716 717 /* Prepare for stopping DMA channel */ 718 for (i = 0; i < pdata->tx_q_count; i++) { 719 txq = dev->data->tx_queues[i]; 720 axgbe_prepare_tx_stop(pdata, i); 721 } 722 /* Disable MAC Tx */ 723 AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 724 /* Disable each Tx queue*/ 725 for (i = 0; i < pdata->tx_q_count; i++) 726 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 727 0); 728 /* Disable each Tx DMA channel */ 729 for (i = 0; i < dev->data->nb_tx_queues; i++) { 730 txq = dev->data->tx_queues[i]; 731 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0); 732 } 733 } 734 735 void axgbe_dev_enable_tx(struct rte_eth_dev *dev) 736 { 737 struct axgbe_tx_queue *txq; 738 struct axgbe_port *pdata = dev->data->dev_private; 739 unsigned int i; 740 741 for (i = 0; i < dev->data->nb_tx_queues; i++) { 742 txq = dev->data->tx_queues[i]; 743 /* Enable Tx DMA channel */ 744 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1); 745 } 746 /* Enable Tx queue*/ 747 for (i = 0; i < pdata->tx_q_count; i++) 748 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 749 MTL_Q_ENABLED); 750 /* Enable MAC Tx */ 751 AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 752 } 753 754 /* Free Tx conformed mbufs */ 755 static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq) 756 { 757 volatile struct axgbe_tx_desc *desc; 758 uint16_t idx; 759 760 idx = AXGBE_GET_DESC_IDX(txq, txq->dirty); 761 while (txq->cur != txq->dirty) { 762 if (unlikely(idx == txq->nb_desc)) 763 idx = 0; 764 desc = &txq->desc[idx]; 765 /* Check for ownership */ 766 if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN)) 767 return; 768 memset((void *)&desc->desc2, 0, 8); 769 /* Free mbuf */ 770 rte_pktmbuf_free(txq->sw_ring[idx]); 771 txq->sw_ring[idx++] = NULL; 772 txq->dirty++; 773 } 774 } 775 776 /* Tx Descriptor formation 777 * Considering each mbuf requires one desc 778 * mbuf is linear 779 */ 780 static int axgbe_xmit_hw(struct axgbe_tx_queue *txq, 781 struct rte_mbuf *mbuf) 782 { 783 volatile struct axgbe_tx_desc *desc; 784 uint16_t idx; 785 uint64_t mask; 786 787 idx = AXGBE_GET_DESC_IDX(txq, txq->cur); 788 desc = &txq->desc[idx]; 789 790 /* Update buffer address and length */ 791 desc->baddr = rte_mbuf_data_iova(mbuf); 792 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L, 793 mbuf->pkt_len); 794 /* Total msg length to transmit */ 795 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL, 796 mbuf->pkt_len); 797 /* Timestamp enablement check */ 798 if (mbuf->ol_flags & PKT_TX_IEEE1588_TMST) 799 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1); 800 rte_wmb(); 801 /* Mark it as First and Last Descriptor */ 802 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1); 803 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1); 804 /* Mark it as a NORMAL descriptor */ 805 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0); 806 /* configure h/w Offload */ 807 mask = mbuf->ol_flags & PKT_TX_L4_MASK; 808 if ((mask == PKT_TX_TCP_CKSUM) || (mask == PKT_TX_UDP_CKSUM)) 809 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3); 810 else if (mbuf->ol_flags & PKT_TX_IP_CKSUM) 811 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1); 812 rte_wmb(); 813 814 if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) { 815 /* Mark it as a CONTEXT descriptor */ 816 AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 817 CTXT, 1); 818 /* Set the VLAN tag */ 819 AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 820 VT, mbuf->vlan_tci); 821 /* Indicate this descriptor contains the VLAN tag */ 822 AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 823 VLTV, 1); 824 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 825 TX_NORMAL_DESC2_VLAN_INSERT); 826 } else { 827 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0); 828 } 829 rte_wmb(); 830 831 /* Set OWN bit */ 832 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); 833 rte_wmb(); 834 835 836 /* Save mbuf */ 837 txq->sw_ring[idx] = mbuf; 838 /* Update current index*/ 839 txq->cur++; 840 /* Update stats */ 841 txq->bytes += mbuf->pkt_len; 842 843 return 0; 844 } 845 846 /* Eal supported tx wrapper*/ 847 uint16_t 848 axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 849 uint16_t nb_pkts) 850 { 851 PMD_INIT_FUNC_TRACE(); 852 853 if (unlikely(nb_pkts == 0)) 854 return nb_pkts; 855 856 struct axgbe_tx_queue *txq; 857 uint16_t nb_desc_free; 858 uint16_t nb_pkt_sent = 0; 859 uint16_t idx; 860 uint32_t tail_addr; 861 struct rte_mbuf *mbuf; 862 863 txq = (struct axgbe_tx_queue *)tx_queue; 864 nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); 865 866 if (unlikely(nb_desc_free <= txq->free_thresh)) { 867 axgbe_xmit_cleanup(txq); 868 nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); 869 if (unlikely(nb_desc_free == 0)) 870 return 0; 871 } 872 nb_pkts = RTE_MIN(nb_desc_free, nb_pkts); 873 while (nb_pkts--) { 874 mbuf = *tx_pkts++; 875 if (axgbe_xmit_hw(txq, mbuf)) 876 goto out; 877 nb_pkt_sent++; 878 } 879 out: 880 /* Sync read and write */ 881 rte_mb(); 882 idx = AXGBE_GET_DESC_IDX(txq, txq->cur); 883 tail_addr = low32_value(txq->ring_phys_addr + 884 idx * sizeof(struct axgbe_tx_desc)); 885 /* Update tail reg with next immediate address to kick Tx DMA channel*/ 886 AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr); 887 txq->pkts += nb_pkt_sent; 888 return nb_pkt_sent; 889 } 890 891 void axgbe_dev_clear_queues(struct rte_eth_dev *dev) 892 { 893 PMD_INIT_FUNC_TRACE(); 894 uint8_t i; 895 struct axgbe_rx_queue *rxq; 896 struct axgbe_tx_queue *txq; 897 898 for (i = 0; i < dev->data->nb_rx_queues; i++) { 899 rxq = dev->data->rx_queues[i]; 900 901 if (rxq) { 902 axgbe_rx_queue_release(rxq); 903 dev->data->rx_queues[i] = NULL; 904 } 905 } 906 907 for (i = 0; i < dev->data->nb_tx_queues; i++) { 908 txq = dev->data->tx_queues[i]; 909 910 if (txq) { 911 axgbe_tx_queue_release(txq); 912 dev->data->tx_queues[i] = NULL; 913 } 914 } 915 } 916 917 int 918 axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) 919 { 920 struct axgbe_rx_queue *rxq = rx_queue; 921 volatile union axgbe_rx_desc *desc; 922 uint16_t idx; 923 924 925 if (unlikely(offset >= rxq->nb_desc)) 926 return -EINVAL; 927 928 if (offset >= rxq->nb_desc - rxq->dirty) 929 return RTE_ETH_RX_DESC_UNAVAIL; 930 931 idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 932 desc = &rxq->desc[idx + offset]; 933 934 if (!AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) 935 return RTE_ETH_RX_DESC_DONE; 936 937 return RTE_ETH_RX_DESC_AVAIL; 938 } 939 940 int 941 axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) 942 { 943 struct axgbe_tx_queue *txq = tx_queue; 944 volatile struct axgbe_tx_desc *desc; 945 uint16_t idx; 946 947 948 if (unlikely(offset >= txq->nb_desc)) 949 return -EINVAL; 950 951 if (offset >= txq->nb_desc - txq->dirty) 952 return RTE_ETH_TX_DESC_UNAVAIL; 953 954 idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt - 1); 955 desc = &txq->desc[idx + offset]; 956 957 if (!AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN)) 958 return RTE_ETH_TX_DESC_DONE; 959 960 return RTE_ETH_TX_DESC_FULL; 961 } 962