1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018-2022 Advanced Micro Devices, Inc. 3 */ 4 5 #include <stdio.h> 6 #include <string.h> 7 #include <errno.h> 8 #include <stdint.h> 9 10 #include <rte_common.h> 11 #include <rte_byteorder.h> 12 #include <rte_errno.h> 13 #include <rte_log.h> 14 #include <rte_mbuf.h> 15 #include <rte_ether.h> 16 #include <rte_ip.h> 17 #include <rte_tcp.h> 18 #include <rte_ethdev.h> 19 #include <ethdev_driver.h> 20 21 #include "ionic.h" 22 #include "ionic_dev.h" 23 #include "ionic_lif.h" 24 #include "ionic_ethdev.h" 25 #include "ionic_rxtx.h" 26 #include "ionic_logs.h" 27 28 static void 29 ionic_empty_array(void **array, uint32_t cnt, uint16_t idx) 30 { 31 uint32_t i; 32 33 for (i = idx; i < cnt; i++) 34 if (array[i]) 35 rte_pktmbuf_free_seg(array[i]); 36 37 memset(array, 0, sizeof(void *) * cnt); 38 } 39 40 static void __rte_cold 41 ionic_tx_empty(struct ionic_tx_qcq *txq) 42 { 43 struct ionic_queue *q = &txq->qcq.q; 44 45 ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); 46 } 47 48 static void __rte_cold 49 ionic_rx_empty(struct ionic_rx_qcq *rxq) 50 { 51 struct ionic_queue *q = &rxq->qcq.q; 52 53 /* 54 * Walk the full info array so that the clean up includes any 55 * fragments that were left dangling for later reuse 56 */ 57 ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); 58 59 ionic_empty_array((void **)rxq->mbs, 60 IONIC_MBUF_BULK_ALLOC, rxq->mb_idx); 61 rxq->mb_idx = 0; 62 } 63 64 /********************************************************************* 65 * 66 * TX functions 67 * 68 **********************************************************************/ 69 70 void 71 ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 72 struct rte_eth_txq_info *qinfo) 73 { 74 struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id]; 75 struct ionic_queue *q = &txq->qcq.q; 76 77 qinfo->nb_desc = q->num_descs; 78 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; 79 if (txq->flags & IONIC_QCQ_F_FAST_FREE) 80 qinfo->conf.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 81 qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED; 82 } 83 84 void __rte_cold 85 ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 86 { 87 struct ionic_tx_qcq *txq = dev->data->tx_queues[qid]; 88 89 IONIC_PRINT_CALL(); 90 91 ionic_qcq_free(&txq->qcq); 92 } 93 94 int __rte_cold 95 ionic_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 96 { 97 ionic_dev_tx_queue_stop_firsthalf(dev, tx_queue_id); 98 ionic_dev_tx_queue_stop_secondhalf(dev, tx_queue_id); 99 100 return 0; 101 } 102 103 void __rte_cold 104 ionic_dev_tx_queue_stop_firsthalf(struct rte_eth_dev *dev, 105 uint16_t tx_queue_id) 106 { 107 struct ionic_tx_qcq *txq = dev->data->tx_queues[tx_queue_id]; 108 109 IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id); 110 111 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 112 113 ionic_lif_txq_deinit_nowait(txq); 114 } 115 116 void __rte_cold 117 ionic_dev_tx_queue_stop_secondhalf(struct rte_eth_dev *dev, 118 uint16_t tx_queue_id) 119 { 120 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(dev); 121 struct ionic_tx_qcq *txq = dev->data->tx_queues[tx_queue_id]; 122 123 ionic_adminq_wait(lif, &txq->admin_ctx); 124 125 /* Free all buffers from descriptor ring */ 126 ionic_tx_empty(txq); 127 128 ionic_lif_txq_stats(txq); 129 } 130 131 int __rte_cold 132 ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, 133 uint16_t nb_desc, uint32_t socket_id, 134 const struct rte_eth_txconf *tx_conf) 135 { 136 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 137 struct ionic_tx_qcq *txq; 138 uint64_t offloads; 139 int err; 140 141 if (tx_queue_id >= lif->ntxqcqs) { 142 IONIC_PRINT(DEBUG, "Queue index %u not available " 143 "(max %u queues)", 144 tx_queue_id, lif->ntxqcqs); 145 return -EINVAL; 146 } 147 148 offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; 149 IONIC_PRINT(DEBUG, 150 "Configuring skt %u TX queue %u with %u buffers, offloads %jx", 151 socket_id, tx_queue_id, nb_desc, offloads); 152 153 /* Validate number of receive descriptors */ 154 if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) 155 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 156 157 if (tx_conf->tx_free_thresh > nb_desc) { 158 IONIC_PRINT(ERR, 159 "tx_free_thresh must be less than nb_desc (%u)", 160 nb_desc); 161 return -EINVAL; 162 } 163 164 /* Free memory prior to re-allocation if needed... */ 165 if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { 166 ionic_dev_tx_queue_release(eth_dev, tx_queue_id); 167 eth_dev->data->tx_queues[tx_queue_id] = NULL; 168 } 169 170 eth_dev->data->tx_queue_state[tx_queue_id] = 171 RTE_ETH_QUEUE_STATE_STOPPED; 172 173 err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq); 174 if (err) { 175 IONIC_PRINT(DEBUG, "Queue allocation failure"); 176 return -EINVAL; 177 } 178 179 /* Do not start queue with rte_eth_dev_start() */ 180 if (tx_conf->tx_deferred_start) 181 txq->flags |= IONIC_QCQ_F_DEFERRED; 182 183 /* Convert the offload flags into queue flags */ 184 if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) 185 txq->flags |= IONIC_QCQ_F_CSUM_L3; 186 if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) 187 txq->flags |= IONIC_QCQ_F_CSUM_TCP; 188 if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) 189 txq->flags |= IONIC_QCQ_F_CSUM_UDP; 190 if (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) 191 txq->flags |= IONIC_QCQ_F_FAST_FREE; 192 193 txq->free_thresh = 194 tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh : 195 nb_desc - IONIC_DEF_TXRX_BURST; 196 197 eth_dev->data->tx_queues[tx_queue_id] = txq; 198 199 return 0; 200 } 201 202 /* 203 * Start Transmit Units for specified queue. 204 */ 205 int __rte_cold 206 ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 207 { 208 uint8_t *tx_queue_state = eth_dev->data->tx_queue_state; 209 struct ionic_tx_qcq *txq; 210 int err; 211 212 if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 213 IONIC_PRINT(DEBUG, "TX queue %u already started", 214 tx_queue_id); 215 return 0; 216 } 217 218 txq = eth_dev->data->tx_queues[tx_queue_id]; 219 220 IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs", 221 tx_queue_id, txq->qcq.q.num_descs); 222 223 err = ionic_lif_txq_init(txq); 224 if (err) 225 return err; 226 227 tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 228 229 return 0; 230 } 231 232 static void 233 ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) 234 { 235 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 236 char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; 237 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 238 (l3_hdr + txm->l3_len); 239 240 if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 241 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 242 ipv4_hdr->hdr_checksum = 0; 243 tcp_hdr->cksum = 0; 244 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 245 } else { 246 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 247 tcp_hdr->cksum = 0; 248 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 249 } 250 } 251 252 static void 253 ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) 254 { 255 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 256 char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + 257 txm->outer_l3_len + txm->l2_len; 258 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 259 (l3_hdr + txm->l3_len); 260 261 if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) { 262 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 263 ipv4_hdr->hdr_checksum = 0; 264 tcp_hdr->cksum = 0; 265 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 266 } else { 267 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 268 tcp_hdr->cksum = 0; 269 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 270 } 271 } 272 273 static void 274 ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 275 struct rte_mbuf *txm, 276 rte_iova_t addr, uint8_t nsge, uint16_t len, 277 uint32_t hdrlen, uint32_t mss, 278 bool encap, 279 uint16_t vlan_tci, bool has_vlan, 280 bool start, bool done) 281 { 282 struct rte_mbuf *txm_seg; 283 void **info; 284 uint64_t cmd; 285 uint8_t flags = 0; 286 int i; 287 288 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 289 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 290 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 291 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 292 293 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, 294 flags, nsge, addr); 295 desc->cmd = rte_cpu_to_le_64(cmd); 296 desc->len = rte_cpu_to_le_16(len); 297 desc->vlan_tci = rte_cpu_to_le_16(vlan_tci); 298 desc->hdr_len = rte_cpu_to_le_16(hdrlen); 299 desc->mss = rte_cpu_to_le_16(mss); 300 301 if (done) { 302 info = IONIC_INFO_PTR(q, q->head_idx); 303 304 /* Walk the mbuf chain to stash pointers in the array */ 305 txm_seg = txm; 306 for (i = 0; i < txm->nb_segs; i++) { 307 info[i] = txm_seg; 308 txm_seg = txm_seg->next; 309 } 310 } 311 312 q->head_idx = Q_NEXT_TO_POST(q, 1); 313 } 314 315 static struct ionic_txq_desc * 316 ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem) 317 { 318 struct ionic_queue *q = &txq->qcq.q; 319 struct ionic_txq_desc *desc_base = q->base; 320 struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 321 struct ionic_txq_desc *desc = &desc_base[q->head_idx]; 322 struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx]; 323 324 *elem = sg_desc->elems; 325 return desc; 326 } 327 328 int 329 ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 330 { 331 struct ionic_queue *q = &txq->qcq.q; 332 struct ionic_tx_stats *stats = &txq->stats; 333 struct ionic_txq_desc *desc; 334 struct ionic_txq_sg_elem *elem; 335 struct rte_mbuf *txm_seg; 336 rte_iova_t data_iova; 337 uint64_t desc_addr = 0, next_addr; 338 uint16_t desc_len = 0; 339 uint8_t desc_nsge = 0; 340 uint32_t hdrlen; 341 uint32_t mss = txm->tso_segsz; 342 uint32_t frag_left = 0; 343 uint32_t left; 344 uint32_t seglen; 345 uint32_t len; 346 uint32_t offset = 0; 347 bool start, done; 348 bool encap; 349 bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN); 350 bool use_sgl = !!(txq->flags & IONIC_QCQ_F_SG); 351 uint16_t vlan_tci = txm->vlan_tci; 352 uint64_t ol_flags = txm->ol_flags; 353 354 encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 355 (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 356 ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 357 (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)); 358 359 /* Preload inner-most TCP csum field with IP pseudo hdr 360 * calculated with IP length set to zero. HW will later 361 * add in length to each TCP segment resulting from the TSO. 362 */ 363 364 if (encap) { 365 ionic_tx_tcp_inner_pseudo_csum(txm); 366 hdrlen = txm->outer_l2_len + txm->outer_l3_len + 367 txm->l2_len + txm->l3_len + txm->l4_len; 368 } else { 369 ionic_tx_tcp_pseudo_csum(txm); 370 hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; 371 } 372 373 desc = ionic_tx_tso_next(txq, &elem); 374 txm_seg = txm; 375 start = true; 376 seglen = hdrlen + mss; 377 378 /* Walk the chain of mbufs */ 379 while (txm_seg != NULL) { 380 offset = 0; 381 data_iova = rte_mbuf_data_iova(txm_seg); 382 left = txm_seg->data_len; 383 384 /* Split the mbuf data up into multiple descriptors */ 385 while (left > 0) { 386 next_addr = rte_cpu_to_le_64(data_iova + offset); 387 if (frag_left > 0 && use_sgl) { 388 /* Fill previous descriptor's SGE */ 389 len = RTE_MIN(frag_left, left); 390 frag_left -= len; 391 elem->addr = next_addr; 392 elem->len = rte_cpu_to_le_16(len); 393 elem++; 394 desc_nsge++; 395 } else { 396 /* Fill new descriptor's data field */ 397 len = RTE_MIN(seglen, left); 398 frag_left = seglen - len; 399 desc_addr = next_addr; 400 desc_len = len; 401 desc_nsge = 0; 402 } 403 left -= len; 404 offset += len; 405 406 /* Pack the next mbuf's data into the descriptor */ 407 if (txm_seg->next != NULL && frag_left > 0 && use_sgl) 408 break; 409 410 done = (txm_seg->next == NULL && left == 0); 411 ionic_tx_tso_post(q, desc, txm_seg, 412 desc_addr, desc_nsge, desc_len, 413 hdrlen, mss, 414 encap, 415 vlan_tci, has_vlan, 416 start, done); 417 desc = ionic_tx_tso_next(txq, &elem); 418 start = false; 419 seglen = mss; 420 } 421 422 txm_seg = txm_seg->next; 423 } 424 425 stats->tso++; 426 427 return 0; 428 } 429 430 /********************************************************************* 431 * 432 * TX prep functions 433 * 434 **********************************************************************/ 435 436 #define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 | \ 437 RTE_MBUF_F_TX_IPV6 | \ 438 RTE_MBUF_F_TX_VLAN | \ 439 RTE_MBUF_F_TX_IP_CKSUM | \ 440 RTE_MBUF_F_TX_TCP_SEG | \ 441 RTE_MBUF_F_TX_L4_MASK) 442 443 #define IONIC_TX_OFFLOAD_NOTSUP_MASK \ 444 (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) 445 446 uint16_t 447 ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 448 { 449 struct ionic_tx_qcq *txq = tx_queue; 450 struct rte_mbuf *txm; 451 uint64_t offloads; 452 int i = 0; 453 454 for (i = 0; i < nb_pkts; i++) { 455 txm = tx_pkts[i]; 456 457 if (txm->nb_segs > txq->num_segs_fw) { 458 rte_errno = -EINVAL; 459 break; 460 } 461 462 offloads = txm->ol_flags; 463 464 if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) { 465 rte_errno = -ENOTSUP; 466 break; 467 } 468 } 469 470 return i; 471 } 472 473 /********************************************************************* 474 * 475 * RX functions 476 * 477 **********************************************************************/ 478 479 void 480 ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 481 struct rte_eth_rxq_info *qinfo) 482 { 483 struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id]; 484 struct ionic_queue *q = &rxq->qcq.q; 485 486 qinfo->mp = rxq->mb_pool; 487 qinfo->scattered_rx = dev->data->scattered_rx; 488 qinfo->nb_desc = q->num_descs; 489 qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED; 490 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 491 } 492 493 void __rte_cold 494 ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 495 { 496 struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid]; 497 498 if (!rxq) 499 return; 500 501 IONIC_PRINT_CALL(); 502 503 ionic_qcq_free(&rxq->qcq); 504 } 505 506 int __rte_cold 507 ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 508 uint16_t rx_queue_id, 509 uint16_t nb_desc, 510 uint32_t socket_id, 511 const struct rte_eth_rxconf *rx_conf, 512 struct rte_mempool *mp) 513 { 514 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 515 struct ionic_rx_qcq *rxq; 516 uint64_t offloads; 517 int err; 518 519 if (rx_queue_id >= lif->nrxqcqs) { 520 IONIC_PRINT(ERR, 521 "Queue index %u not available (max %u queues)", 522 rx_queue_id, lif->nrxqcqs); 523 return -EINVAL; 524 } 525 526 offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; 527 IONIC_PRINT(DEBUG, 528 "Configuring skt %u RX queue %u with %u buffers, offloads %jx", 529 socket_id, rx_queue_id, nb_desc, offloads); 530 531 if (!rx_conf->rx_drop_en) 532 IONIC_PRINT(WARNING, "No-drop mode is not supported"); 533 534 /* Validate number of receive descriptors */ 535 if (!rte_is_power_of_2(nb_desc) || 536 nb_desc < IONIC_MIN_RING_DESC || 537 nb_desc > IONIC_MAX_RING_DESC) { 538 IONIC_PRINT(ERR, 539 "Bad descriptor count (%u) for queue %u (min: %u)", 540 nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); 541 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 542 } 543 544 /* Free memory prior to re-allocation if needed... */ 545 if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { 546 ionic_dev_rx_queue_release(eth_dev, rx_queue_id); 547 eth_dev->data->rx_queues[rx_queue_id] = NULL; 548 } 549 550 eth_dev->data->rx_queue_state[rx_queue_id] = 551 RTE_ETH_QUEUE_STATE_STOPPED; 552 553 err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, mp, 554 &rxq); 555 if (err) { 556 IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id); 557 return -EINVAL; 558 } 559 560 rxq->mb_pool = mp; 561 rxq->wdog_ms = IONIC_Q_WDOG_MS; 562 563 /* 564 * Note: the interface does not currently support 565 * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN 566 * when the adapter will be able to keep the CRC and subtract 567 * it to the length for all received packets: 568 * if (eth_dev->data->dev_conf.rxmode.offloads & 569 * RTE_ETH_RX_OFFLOAD_KEEP_CRC) 570 * rxq->crc_len = ETHER_CRC_LEN; 571 */ 572 573 /* Do not start queue with rte_eth_dev_start() */ 574 if (rx_conf->rx_deferred_start) 575 rxq->flags |= IONIC_QCQ_F_DEFERRED; 576 577 eth_dev->data->rx_queues[rx_queue_id] = rxq; 578 579 return 0; 580 } 581 582 #define IONIC_CSUM_FLAG_MASK (IONIC_RXQ_COMP_CSUM_F_VLAN - 1) 583 const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK] 584 __rte_cache_aligned = { 585 /* IP_BAD set */ 586 [IONIC_RXQ_COMP_CSUM_F_IP_BAD] = RTE_MBUF_F_RX_IP_CKSUM_BAD, 587 [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_OK] = 588 RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 589 [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = 590 RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 591 [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_OK] = 592 RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 593 [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = 594 RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 595 /* IP_OK set */ 596 [IONIC_RXQ_COMP_CSUM_F_IP_OK] = RTE_MBUF_F_RX_IP_CKSUM_GOOD, 597 [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_OK] = 598 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 599 [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = 600 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 601 [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_OK] = 602 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 603 [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = 604 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 605 /* No IP flag set */ 606 [IONIC_RXQ_COMP_CSUM_F_TCP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD, 607 [IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD, 608 [IONIC_RXQ_COMP_CSUM_F_UDP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD, 609 [IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD, 610 }; 611 612 /* RTE_PTYPE_UNKNOWN is 0x0 */ 613 const uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK] 614 __rte_cache_aligned = { 615 [IONIC_PKT_TYPE_NON_IP] = RTE_PTYPE_UNKNOWN, 616 [IONIC_PKT_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, 617 [IONIC_PKT_TYPE_IPV4_TCP] = 618 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, 619 [IONIC_PKT_TYPE_IPV4_UDP] = 620 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, 621 [IONIC_PKT_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, 622 [IONIC_PKT_TYPE_IPV6_TCP] = 623 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, 624 [IONIC_PKT_TYPE_IPV6_UDP] = 625 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, 626 }; 627 628 const uint32_t * 629 ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused, 630 size_t *no_of_elements) 631 { 632 /* See ionic_ptype_table[] */ 633 static const uint32_t ptypes[] = { 634 RTE_PTYPE_L2_ETHER, 635 RTE_PTYPE_L2_ETHER_TIMESYNC, 636 RTE_PTYPE_L2_ETHER_LLDP, 637 RTE_PTYPE_L2_ETHER_ARP, 638 RTE_PTYPE_L3_IPV4, 639 RTE_PTYPE_L3_IPV6, 640 RTE_PTYPE_L4_TCP, 641 RTE_PTYPE_L4_UDP, 642 }; 643 644 *no_of_elements = RTE_DIM(ptypes); 645 return ptypes; 646 } 647 648 /* 649 * Perform one-time initialization of descriptor fields 650 * which will not change for the life of the queue. 651 */ 652 static void __rte_cold 653 ionic_rx_init_descriptors(struct ionic_rx_qcq *rxq) 654 { 655 struct ionic_queue *q = &rxq->qcq.q; 656 struct ionic_rxq_desc *desc, *desc_base = q->base; 657 struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 658 uint32_t i, j; 659 uint8_t opcode; 660 661 opcode = (q->num_segs > 1) ? 662 IONIC_RXQ_DESC_OPCODE_SG : IONIC_RXQ_DESC_OPCODE_SIMPLE; 663 664 /* 665 * NB: Only the first segment needs to leave headroom (hdr_seg_size). 666 * Later segments (seg_size) do not. 667 */ 668 for (i = 0; i < q->num_descs; i++) { 669 desc = &desc_base[i]; 670 desc->len = rte_cpu_to_le_16(rxq->hdr_seg_size); 671 desc->opcode = opcode; 672 673 sg_desc = &sg_desc_base[i]; 674 for (j = 0; j < q->num_segs - 1u; j++) 675 sg_desc->elems[j].len = 676 rte_cpu_to_le_16(rxq->seg_size); 677 } 678 } 679 680 /* 681 * Start Receive Units for specified queue. 682 */ 683 int __rte_cold 684 ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 685 { 686 uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 687 struct ionic_rx_qcq *rxq; 688 struct ionic_queue *q; 689 int err; 690 691 if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 692 IONIC_PRINT(DEBUG, "RX queue %u already started", 693 rx_queue_id); 694 return 0; 695 } 696 697 rxq = eth_dev->data->rx_queues[rx_queue_id]; 698 q = &rxq->qcq.q; 699 700 rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN; 701 702 /* Recalculate segment count based on MTU */ 703 q->num_segs = 1 + 704 (rxq->frame_size + RTE_PKTMBUF_HEADROOM - 1) / rxq->seg_size; 705 706 IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u segs %u", 707 rx_queue_id, q->num_descs, rxq->frame_size, q->num_segs); 708 709 ionic_rx_init_descriptors(rxq); 710 711 err = ionic_lif_rxq_init(rxq); 712 if (err) 713 return err; 714 715 /* Allocate buffers for descriptor ring */ 716 if (rxq->flags & IONIC_QCQ_F_SG) 717 err = ionic_rx_fill_sg(rxq); 718 else 719 err = ionic_rx_fill(rxq); 720 if (err != 0) { 721 IONIC_PRINT(ERR, "Could not fill queue %d", rx_queue_id); 722 return -1; 723 } 724 725 rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 726 727 return 0; 728 } 729 730 /* 731 * Stop Receive Units for specified queue. 732 */ 733 int __rte_cold 734 ionic_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 735 { 736 ionic_dev_rx_queue_stop_firsthalf(dev, rx_queue_id); 737 ionic_dev_rx_queue_stop_secondhalf(dev, rx_queue_id); 738 739 return 0; 740 } 741 742 void __rte_cold 743 ionic_dev_rx_queue_stop_firsthalf(struct rte_eth_dev *dev, 744 uint16_t rx_queue_id) 745 { 746 struct ionic_rx_qcq *rxq = dev->data->rx_queues[rx_queue_id]; 747 748 IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id); 749 750 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 751 752 ionic_lif_rxq_deinit_nowait(rxq); 753 } 754 755 void __rte_cold 756 ionic_dev_rx_queue_stop_secondhalf(struct rte_eth_dev *dev, 757 uint16_t rx_queue_id) 758 { 759 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(dev); 760 struct ionic_rx_qcq *rxq = dev->data->rx_queues[rx_queue_id]; 761 762 ionic_adminq_wait(lif, &rxq->admin_ctx); 763 764 /* Free all buffers from descriptor ring */ 765 ionic_rx_empty(rxq); 766 767 ionic_lif_rxq_stats(rxq); 768 } 769 770 int 771 ionic_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) 772 { 773 struct ionic_rx_qcq *rxq = rx_queue; 774 struct ionic_qcq *qcq = &rxq->qcq; 775 volatile struct ionic_rxq_comp *cq_desc; 776 uint16_t mask, head, tail, pos; 777 bool done_color; 778 779 mask = qcq->q.size_mask; 780 781 /* offset must be within the size of the ring */ 782 if (offset > mask) 783 return -EINVAL; 784 785 head = qcq->q.head_idx; 786 tail = qcq->q.tail_idx; 787 788 /* offset is beyond what is posted */ 789 if (offset >= ((head - tail) & mask)) 790 return RTE_ETH_RX_DESC_UNAVAIL; 791 792 /* interested in this absolute position in the rxq */ 793 pos = (tail + offset) & mask; 794 795 /* rx cq position == rx q position */ 796 cq_desc = qcq->cq.base; 797 cq_desc = &cq_desc[pos]; 798 799 /* expected done color at this position */ 800 done_color = qcq->cq.done_color != (pos < tail); 801 802 /* has the hw indicated the done color at this position? */ 803 if (color_match(cq_desc->pkt_type_color, done_color)) 804 return RTE_ETH_RX_DESC_DONE; 805 806 return RTE_ETH_RX_DESC_AVAIL; 807 } 808 809 int 810 ionic_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) 811 { 812 struct ionic_tx_qcq *txq = tx_queue; 813 struct ionic_qcq *qcq = &txq->qcq; 814 volatile struct ionic_txq_comp *cq_desc; 815 uint16_t mask, head, tail, pos, cq_pos; 816 bool done_color; 817 818 mask = qcq->q.size_mask; 819 820 /* offset must be within the size of the ring */ 821 if (offset > mask) 822 return -EINVAL; 823 824 head = qcq->q.head_idx; 825 tail = qcq->q.tail_idx; 826 827 /* offset is beyond what is posted */ 828 if (offset >= ((head - tail) & mask)) 829 return RTE_ETH_TX_DESC_DONE; 830 831 /* interested in this absolute position in the txq */ 832 pos = (tail + offset) & mask; 833 834 /* tx cq position != tx q position, need to walk cq */ 835 cq_pos = qcq->cq.tail_idx; 836 cq_desc = qcq->cq.base; 837 cq_desc = &cq_desc[cq_pos]; 838 839 /* how far behind is pos from head? */ 840 offset = (head - pos) & mask; 841 842 /* walk cq descriptors that match the expected done color */ 843 done_color = qcq->cq.done_color; 844 while (color_match(cq_desc->color, done_color)) { 845 /* is comp index no further behind than pos? */ 846 tail = rte_cpu_to_le_16(cq_desc->comp_index); 847 if (((head - tail) & mask) <= offset) 848 return RTE_ETH_TX_DESC_DONE; 849 850 cq_pos = (cq_pos + 1) & mask; 851 cq_desc = qcq->cq.base; 852 cq_desc = &cq_desc[cq_pos]; 853 854 done_color = done_color != (cq_pos == 0); 855 } 856 857 return RTE_ETH_TX_DESC_FULL; 858 } 859