1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018-2022 Advanced Micro Devices, Inc. 3 */ 4 5 #include <stdio.h> 6 #include <string.h> 7 #include <errno.h> 8 #include <stdint.h> 9 10 #include <rte_common.h> 11 #include <rte_byteorder.h> 12 #include <rte_errno.h> 13 #include <rte_log.h> 14 #include <rte_mbuf.h> 15 #include <rte_ether.h> 16 #include <rte_ip.h> 17 #include <rte_tcp.h> 18 #include <rte_ethdev.h> 19 #include <ethdev_driver.h> 20 21 #include "ionic.h" 22 #include "ionic_dev.h" 23 #include "ionic_lif.h" 24 #include "ionic_ethdev.h" 25 #include "ionic_rxtx.h" 26 #include "ionic_logs.h" 27 28 static void 29 ionic_empty_array(void **array, uint32_t cnt, uint16_t idx) 30 { 31 uint32_t i; 32 33 for (i = idx; i < cnt; i++) 34 if (array[i]) 35 rte_pktmbuf_free_seg(array[i]); 36 37 memset(array, 0, sizeof(void *) * cnt); 38 } 39 40 static void __rte_cold 41 ionic_tx_empty(struct ionic_tx_qcq *txq) 42 { 43 struct ionic_queue *q = &txq->qcq.q; 44 45 ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); 46 } 47 48 static void __rte_cold 49 ionic_rx_empty(struct ionic_rx_qcq *rxq) 50 { 51 struct ionic_queue *q = &rxq->qcq.q; 52 53 /* 54 * Walk the full info array so that the clean up includes any 55 * fragments that were left dangling for later reuse 56 */ 57 ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); 58 59 ionic_empty_array((void **)rxq->mbs, 60 IONIC_MBUF_BULK_ALLOC, rxq->mb_idx); 61 rxq->mb_idx = 0; 62 } 63 64 /********************************************************************* 65 * 66 * TX functions 67 * 68 **********************************************************************/ 69 70 void 71 ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 72 struct rte_eth_txq_info *qinfo) 73 { 74 struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id]; 75 struct ionic_queue *q = &txq->qcq.q; 76 77 qinfo->nb_desc = q->num_descs; 78 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; 79 if (txq->flags & IONIC_QCQ_F_FAST_FREE) 80 qinfo->conf.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 81 qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED; 82 } 83 84 void __rte_cold 85 ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 86 { 87 struct ionic_tx_qcq *txq = dev->data->tx_queues[qid]; 88 89 IONIC_PRINT_CALL(); 90 91 ionic_qcq_free(&txq->qcq); 92 } 93 94 int __rte_cold 95 ionic_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 96 { 97 ionic_dev_tx_queue_stop_firsthalf(dev, tx_queue_id); 98 ionic_dev_tx_queue_stop_secondhalf(dev, tx_queue_id); 99 100 return 0; 101 } 102 103 void __rte_cold 104 ionic_dev_tx_queue_stop_firsthalf(struct rte_eth_dev *dev, 105 uint16_t tx_queue_id) 106 { 107 struct ionic_tx_qcq *txq = dev->data->tx_queues[tx_queue_id]; 108 109 IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id); 110 111 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 112 113 ionic_lif_txq_deinit_nowait(txq); 114 } 115 116 void __rte_cold 117 ionic_dev_tx_queue_stop_secondhalf(struct rte_eth_dev *dev, 118 uint16_t tx_queue_id) 119 { 120 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(dev); 121 struct ionic_tx_qcq *txq = dev->data->tx_queues[tx_queue_id]; 122 123 ionic_adminq_wait(lif, &txq->admin_ctx); 124 125 /* Free all buffers from descriptor ring */ 126 ionic_tx_empty(txq); 127 128 ionic_lif_txq_stats(txq); 129 } 130 131 int __rte_cold 132 ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, 133 uint16_t nb_desc, uint32_t socket_id, 134 const struct rte_eth_txconf *tx_conf) 135 { 136 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 137 struct ionic_tx_qcq *txq; 138 uint64_t offloads; 139 int err; 140 141 if (tx_queue_id >= lif->ntxqcqs) { 142 IONIC_PRINT(DEBUG, "Queue index %u not available " 143 "(max %u queues)", 144 tx_queue_id, lif->ntxqcqs); 145 return -EINVAL; 146 } 147 148 offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; 149 IONIC_PRINT(DEBUG, 150 "Configuring skt %u TX queue %u with %u buffers, offloads %jx", 151 socket_id, tx_queue_id, nb_desc, offloads); 152 153 /* Validate number of receive descriptors */ 154 if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) 155 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 156 157 if (tx_conf->tx_free_thresh > nb_desc) { 158 IONIC_PRINT(ERR, 159 "tx_free_thresh must be less than nb_desc (%u)", 160 nb_desc); 161 return -EINVAL; 162 } 163 164 /* Free memory prior to re-allocation if needed... */ 165 if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { 166 ionic_dev_tx_queue_release(eth_dev, tx_queue_id); 167 eth_dev->data->tx_queues[tx_queue_id] = NULL; 168 } 169 170 eth_dev->data->tx_queue_state[tx_queue_id] = 171 RTE_ETH_QUEUE_STATE_STOPPED; 172 173 err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq); 174 if (err) { 175 IONIC_PRINT(DEBUG, "Queue allocation failure"); 176 return -EINVAL; 177 } 178 179 /* Do not start queue with rte_eth_dev_start() */ 180 if (tx_conf->tx_deferred_start) 181 txq->flags |= IONIC_QCQ_F_DEFERRED; 182 183 /* Convert the offload flags into queue flags */ 184 if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) 185 txq->flags |= IONIC_QCQ_F_CSUM_L3; 186 if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) 187 txq->flags |= IONIC_QCQ_F_CSUM_TCP; 188 if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) 189 txq->flags |= IONIC_QCQ_F_CSUM_UDP; 190 if (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) 191 txq->flags |= IONIC_QCQ_F_FAST_FREE; 192 193 txq->free_thresh = 194 tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh : 195 nb_desc - IONIC_DEF_TXRX_BURST; 196 197 eth_dev->data->tx_queues[tx_queue_id] = txq; 198 199 return 0; 200 } 201 202 /* 203 * Start Transmit Units for specified queue. 204 */ 205 int __rte_cold 206 ionic_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 207 { 208 int err; 209 210 err = ionic_dev_tx_queue_start_firsthalf(dev, tx_queue_id); 211 if (err) 212 return err; 213 214 return ionic_dev_tx_queue_start_secondhalf(dev, tx_queue_id); 215 } 216 217 int __rte_cold 218 ionic_dev_tx_queue_start_firsthalf(struct rte_eth_dev *dev, 219 uint16_t tx_queue_id) 220 { 221 uint8_t *tx_queue_state = dev->data->tx_queue_state; 222 struct ionic_tx_qcq *txq = dev->data->tx_queues[tx_queue_id]; 223 224 if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 225 IONIC_PRINT(DEBUG, "TX queue %u already started", 226 tx_queue_id); 227 return 0; 228 } 229 230 IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs", 231 tx_queue_id, txq->qcq.q.num_descs); 232 233 return ionic_lif_txq_init_nowait(txq); 234 } 235 236 int __rte_cold 237 ionic_dev_tx_queue_start_secondhalf(struct rte_eth_dev *dev, 238 uint16_t tx_queue_id) 239 { 240 uint8_t *tx_queue_state = dev->data->tx_queue_state; 241 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(dev); 242 struct ionic_tx_qcq *txq = dev->data->tx_queues[tx_queue_id]; 243 int err; 244 245 if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) 246 return 0; 247 248 err = ionic_adminq_wait(lif, &txq->admin_ctx); 249 if (err) 250 return err; 251 252 ionic_lif_txq_init_done(txq); 253 254 tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 255 256 return 0; 257 } 258 259 static void 260 ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) 261 { 262 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 263 char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; 264 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 265 (l3_hdr + txm->l3_len); 266 267 if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 268 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 269 ipv4_hdr->hdr_checksum = 0; 270 tcp_hdr->cksum = 0; 271 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 272 } else { 273 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 274 tcp_hdr->cksum = 0; 275 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 276 } 277 } 278 279 static void 280 ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) 281 { 282 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 283 char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + 284 txm->outer_l3_len + txm->l2_len; 285 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 286 (l3_hdr + txm->l3_len); 287 288 if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) { 289 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 290 ipv4_hdr->hdr_checksum = 0; 291 tcp_hdr->cksum = 0; 292 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 293 } else { 294 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 295 tcp_hdr->cksum = 0; 296 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 297 } 298 } 299 300 static void 301 ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 302 struct rte_mbuf *txm, 303 rte_iova_t addr, uint8_t nsge, uint16_t len, 304 uint32_t hdrlen, uint32_t mss, 305 bool encap, 306 uint16_t vlan_tci, bool has_vlan, 307 bool start, bool done) 308 { 309 struct rte_mbuf *txm_seg; 310 void **info; 311 uint64_t cmd; 312 uint8_t flags = 0; 313 int i; 314 315 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 316 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 317 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 318 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 319 320 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, 321 flags, nsge, addr); 322 desc->cmd = rte_cpu_to_le_64(cmd); 323 desc->len = rte_cpu_to_le_16(len); 324 desc->vlan_tci = rte_cpu_to_le_16(vlan_tci); 325 desc->hdr_len = rte_cpu_to_le_16(hdrlen); 326 desc->mss = rte_cpu_to_le_16(mss); 327 328 if (done) { 329 info = IONIC_INFO_PTR(q, q->head_idx); 330 331 /* Walk the mbuf chain to stash pointers in the array */ 332 txm_seg = txm; 333 for (i = 0; i < txm->nb_segs; i++) { 334 info[i] = txm_seg; 335 txm_seg = txm_seg->next; 336 } 337 } 338 339 q->head_idx = Q_NEXT_TO_POST(q, 1); 340 } 341 342 static struct ionic_txq_desc * 343 ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem) 344 { 345 struct ionic_queue *q = &txq->qcq.q; 346 struct ionic_txq_desc *desc_base = q->base; 347 struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 348 struct ionic_txq_desc *desc = &desc_base[q->head_idx]; 349 struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx]; 350 351 *elem = sg_desc->elems; 352 return desc; 353 } 354 355 int 356 ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 357 { 358 struct ionic_queue *q = &txq->qcq.q; 359 struct ionic_tx_stats *stats = &txq->stats; 360 struct ionic_txq_desc *desc; 361 struct ionic_txq_sg_elem *elem; 362 struct rte_mbuf *txm_seg; 363 rte_iova_t data_iova; 364 uint64_t desc_addr = 0, next_addr; 365 uint16_t desc_len = 0; 366 uint8_t desc_nsge = 0; 367 uint32_t hdrlen; 368 uint32_t mss = txm->tso_segsz; 369 uint32_t frag_left = 0; 370 uint32_t left; 371 uint32_t seglen; 372 uint32_t len; 373 uint32_t offset = 0; 374 bool start, done; 375 bool encap; 376 bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN); 377 bool use_sgl = !!(txq->flags & IONIC_QCQ_F_SG); 378 uint16_t vlan_tci = txm->vlan_tci; 379 uint64_t ol_flags = txm->ol_flags; 380 381 encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 382 (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 383 ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 384 (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)); 385 386 /* Preload inner-most TCP csum field with IP pseudo hdr 387 * calculated with IP length set to zero. HW will later 388 * add in length to each TCP segment resulting from the TSO. 389 */ 390 391 if (encap) { 392 ionic_tx_tcp_inner_pseudo_csum(txm); 393 hdrlen = txm->outer_l2_len + txm->outer_l3_len + 394 txm->l2_len + txm->l3_len + txm->l4_len; 395 } else { 396 ionic_tx_tcp_pseudo_csum(txm); 397 hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; 398 } 399 400 desc = ionic_tx_tso_next(txq, &elem); 401 txm_seg = txm; 402 start = true; 403 seglen = hdrlen + mss; 404 405 /* Walk the chain of mbufs */ 406 while (txm_seg != NULL) { 407 offset = 0; 408 data_iova = rte_mbuf_data_iova(txm_seg); 409 left = txm_seg->data_len; 410 411 /* Split the mbuf data up into multiple descriptors */ 412 while (left > 0) { 413 next_addr = rte_cpu_to_le_64(data_iova + offset); 414 if (frag_left > 0 && use_sgl) { 415 /* Fill previous descriptor's SGE */ 416 len = RTE_MIN(frag_left, left); 417 frag_left -= len; 418 elem->addr = next_addr; 419 elem->len = rte_cpu_to_le_16(len); 420 elem++; 421 desc_nsge++; 422 } else { 423 /* Fill new descriptor's data field */ 424 len = RTE_MIN(seglen, left); 425 frag_left = seglen - len; 426 desc_addr = next_addr; 427 desc_len = len; 428 desc_nsge = 0; 429 } 430 left -= len; 431 offset += len; 432 433 /* Pack the next mbuf's data into the descriptor */ 434 if (txm_seg->next != NULL && frag_left > 0 && use_sgl) 435 break; 436 437 done = (txm_seg->next == NULL && left == 0); 438 ionic_tx_tso_post(q, desc, txm_seg, 439 desc_addr, desc_nsge, desc_len, 440 hdrlen, mss, 441 encap, 442 vlan_tci, has_vlan, 443 start, done); 444 desc = ionic_tx_tso_next(txq, &elem); 445 start = false; 446 seglen = mss; 447 } 448 449 txm_seg = txm_seg->next; 450 } 451 452 stats->tso++; 453 454 return 0; 455 } 456 457 /********************************************************************* 458 * 459 * TX prep functions 460 * 461 **********************************************************************/ 462 463 #define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 | \ 464 RTE_MBUF_F_TX_IPV6 | \ 465 RTE_MBUF_F_TX_VLAN | \ 466 RTE_MBUF_F_TX_IP_CKSUM | \ 467 RTE_MBUF_F_TX_TCP_SEG | \ 468 RTE_MBUF_F_TX_L4_MASK) 469 470 #define IONIC_TX_OFFLOAD_NOTSUP_MASK \ 471 (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) 472 473 uint16_t 474 ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 475 { 476 struct ionic_tx_qcq *txq = tx_queue; 477 struct rte_mbuf *txm; 478 uint64_t offloads; 479 int i = 0; 480 481 for (i = 0; i < nb_pkts; i++) { 482 txm = tx_pkts[i]; 483 484 if (txm->nb_segs > txq->num_segs_fw) { 485 rte_errno = -EINVAL; 486 break; 487 } 488 489 offloads = txm->ol_flags; 490 491 if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) { 492 rte_errno = -ENOTSUP; 493 break; 494 } 495 } 496 497 return i; 498 } 499 500 /********************************************************************* 501 * 502 * RX functions 503 * 504 **********************************************************************/ 505 506 void 507 ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 508 struct rte_eth_rxq_info *qinfo) 509 { 510 struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id]; 511 struct ionic_queue *q = &rxq->qcq.q; 512 513 qinfo->mp = rxq->mb_pool; 514 qinfo->scattered_rx = dev->data->scattered_rx; 515 qinfo->nb_desc = q->num_descs; 516 qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED; 517 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 518 } 519 520 void __rte_cold 521 ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 522 { 523 struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid]; 524 525 if (!rxq) 526 return; 527 528 IONIC_PRINT_CALL(); 529 530 ionic_qcq_free(&rxq->qcq); 531 } 532 533 int __rte_cold 534 ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 535 uint16_t rx_queue_id, 536 uint16_t nb_desc, 537 uint32_t socket_id, 538 const struct rte_eth_rxconf *rx_conf, 539 struct rte_mempool *mp) 540 { 541 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 542 struct ionic_rx_qcq *rxq; 543 uint64_t offloads; 544 int err; 545 546 if (rx_queue_id >= lif->nrxqcqs) { 547 IONIC_PRINT(ERR, 548 "Queue index %u not available (max %u queues)", 549 rx_queue_id, lif->nrxqcqs); 550 return -EINVAL; 551 } 552 553 offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; 554 IONIC_PRINT(DEBUG, 555 "Configuring skt %u RX queue %u with %u buffers, offloads %jx", 556 socket_id, rx_queue_id, nb_desc, offloads); 557 558 if (!rx_conf->rx_drop_en) 559 IONIC_PRINT(WARNING, "No-drop mode is not supported"); 560 561 /* Validate number of receive descriptors */ 562 if (!rte_is_power_of_2(nb_desc) || 563 nb_desc < IONIC_MIN_RING_DESC || 564 nb_desc > IONIC_MAX_RING_DESC) { 565 IONIC_PRINT(ERR, 566 "Bad descriptor count (%u) for queue %u (min: %u)", 567 nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); 568 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 569 } 570 571 /* Free memory prior to re-allocation if needed... */ 572 if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { 573 ionic_dev_rx_queue_release(eth_dev, rx_queue_id); 574 eth_dev->data->rx_queues[rx_queue_id] = NULL; 575 } 576 577 eth_dev->data->rx_queue_state[rx_queue_id] = 578 RTE_ETH_QUEUE_STATE_STOPPED; 579 580 err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, mp, 581 &rxq); 582 if (err) { 583 IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id); 584 return -EINVAL; 585 } 586 587 rxq->mb_pool = mp; 588 rxq->wdog_ms = IONIC_Q_WDOG_MS; 589 590 /* 591 * Note: the interface does not currently support 592 * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN 593 * when the adapter will be able to keep the CRC and subtract 594 * it to the length for all received packets: 595 * if (eth_dev->data->dev_conf.rxmode.offloads & 596 * RTE_ETH_RX_OFFLOAD_KEEP_CRC) 597 * rxq->crc_len = ETHER_CRC_LEN; 598 */ 599 600 /* Do not start queue with rte_eth_dev_start() */ 601 if (rx_conf->rx_deferred_start) 602 rxq->flags |= IONIC_QCQ_F_DEFERRED; 603 604 eth_dev->data->rx_queues[rx_queue_id] = rxq; 605 606 return 0; 607 } 608 609 #define IONIC_CSUM_FLAG_MASK (IONIC_RXQ_COMP_CSUM_F_VLAN - 1) 610 const alignas(RTE_CACHE_LINE_SIZE) uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK] = { 611 /* IP_BAD set */ 612 [IONIC_RXQ_COMP_CSUM_F_IP_BAD] = RTE_MBUF_F_RX_IP_CKSUM_BAD, 613 [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_OK] = 614 RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 615 [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = 616 RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 617 [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_OK] = 618 RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 619 [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = 620 RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 621 /* IP_OK set */ 622 [IONIC_RXQ_COMP_CSUM_F_IP_OK] = RTE_MBUF_F_RX_IP_CKSUM_GOOD, 623 [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_OK] = 624 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 625 [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = 626 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 627 [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_OK] = 628 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 629 [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = 630 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 631 /* No IP flag set */ 632 [IONIC_RXQ_COMP_CSUM_F_TCP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD, 633 [IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD, 634 [IONIC_RXQ_COMP_CSUM_F_UDP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD, 635 [IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD, 636 }; 637 638 /* RTE_PTYPE_UNKNOWN is 0x0 */ 639 const alignas(RTE_CACHE_LINE_SIZE) uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK] = { 640 [IONIC_PKT_TYPE_NON_IP] = RTE_PTYPE_UNKNOWN, 641 [IONIC_PKT_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, 642 [IONIC_PKT_TYPE_IPV4_TCP] = 643 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, 644 [IONIC_PKT_TYPE_IPV4_UDP] = 645 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, 646 [IONIC_PKT_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, 647 [IONIC_PKT_TYPE_IPV6_TCP] = 648 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, 649 [IONIC_PKT_TYPE_IPV6_UDP] = 650 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, 651 }; 652 653 const uint32_t * 654 ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused, 655 size_t *no_of_elements) 656 { 657 /* See ionic_ptype_table[] */ 658 static const uint32_t ptypes[] = { 659 RTE_PTYPE_L2_ETHER, 660 RTE_PTYPE_L2_ETHER_TIMESYNC, 661 RTE_PTYPE_L2_ETHER_LLDP, 662 RTE_PTYPE_L2_ETHER_ARP, 663 RTE_PTYPE_L3_IPV4, 664 RTE_PTYPE_L3_IPV6, 665 RTE_PTYPE_L4_TCP, 666 RTE_PTYPE_L4_UDP, 667 }; 668 669 *no_of_elements = RTE_DIM(ptypes); 670 return ptypes; 671 } 672 673 /* 674 * Perform one-time initialization of descriptor fields 675 * which will not change for the life of the queue. 676 */ 677 static void __rte_cold 678 ionic_rx_init_descriptors(struct ionic_rx_qcq *rxq) 679 { 680 struct ionic_queue *q = &rxq->qcq.q; 681 struct ionic_rxq_desc *desc, *desc_base = q->base; 682 struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 683 uint32_t i, j; 684 uint8_t opcode; 685 686 opcode = (q->num_segs > 1) ? 687 IONIC_RXQ_DESC_OPCODE_SG : IONIC_RXQ_DESC_OPCODE_SIMPLE; 688 689 /* 690 * NB: Only the first segment needs to leave headroom (hdr_seg_size). 691 * Later segments (seg_size) do not. 692 */ 693 for (i = 0; i < q->num_descs; i++) { 694 desc = &desc_base[i]; 695 desc->len = rte_cpu_to_le_16(rxq->hdr_seg_size); 696 desc->opcode = opcode; 697 698 sg_desc = &sg_desc_base[i]; 699 for (j = 0; j < q->num_segs - 1u; j++) 700 sg_desc->elems[j].len = 701 rte_cpu_to_le_16(rxq->seg_size); 702 } 703 } 704 705 /* 706 * Start Receive Units for specified queue. 707 */ 708 int __rte_cold 709 ionic_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 710 { 711 int err; 712 713 err = ionic_dev_rx_queue_start_firsthalf(dev, rx_queue_id); 714 if (err) 715 return err; 716 717 return ionic_dev_rx_queue_start_secondhalf(dev, rx_queue_id); 718 } 719 720 int __rte_cold 721 ionic_dev_rx_queue_start_firsthalf(struct rte_eth_dev *dev, 722 uint16_t rx_queue_id) 723 { 724 uint8_t *rx_queue_state = dev->data->rx_queue_state; 725 struct ionic_rx_qcq *rxq = dev->data->rx_queues[rx_queue_id]; 726 struct ionic_queue *q = &rxq->qcq.q; 727 728 if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 729 IONIC_PRINT(DEBUG, "RX queue %u already started", 730 rx_queue_id); 731 return 0; 732 } 733 734 rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN; 735 736 /* Recalculate segment count based on MTU */ 737 q->num_segs = 1 + 738 (rxq->frame_size + RTE_PKTMBUF_HEADROOM - 1) / rxq->seg_size; 739 740 IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u segs %u", 741 rx_queue_id, q->num_descs, rxq->frame_size, q->num_segs); 742 743 ionic_rx_init_descriptors(rxq); 744 745 return ionic_lif_rxq_init_nowait(rxq); 746 } 747 748 int __rte_cold 749 ionic_dev_rx_queue_start_secondhalf(struct rte_eth_dev *dev, 750 uint16_t rx_queue_id) 751 { 752 uint8_t *rx_queue_state = dev->data->rx_queue_state; 753 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(dev); 754 struct ionic_rx_qcq *rxq = dev->data->rx_queues[rx_queue_id]; 755 int err; 756 757 if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) 758 return 0; 759 760 err = ionic_adminq_wait(lif, &rxq->admin_ctx); 761 if (err) 762 return err; 763 764 ionic_lif_rxq_init_done(rxq); 765 766 /* Allocate buffers for descriptor ring */ 767 if (rxq->flags & IONIC_QCQ_F_SG) 768 err = ionic_rx_fill_sg(rxq); 769 else 770 err = ionic_rx_fill(rxq); 771 if (err != 0) { 772 IONIC_PRINT(ERR, "Could not fill queue %d", rx_queue_id); 773 return -1; 774 } 775 776 rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 777 778 return 0; 779 } 780 781 /* 782 * Stop Receive Units for specified queue. 783 */ 784 int __rte_cold 785 ionic_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 786 { 787 ionic_dev_rx_queue_stop_firsthalf(dev, rx_queue_id); 788 ionic_dev_rx_queue_stop_secondhalf(dev, rx_queue_id); 789 790 return 0; 791 } 792 793 void __rte_cold 794 ionic_dev_rx_queue_stop_firsthalf(struct rte_eth_dev *dev, 795 uint16_t rx_queue_id) 796 { 797 struct ionic_rx_qcq *rxq = dev->data->rx_queues[rx_queue_id]; 798 799 IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id); 800 801 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 802 803 ionic_lif_rxq_deinit_nowait(rxq); 804 } 805 806 void __rte_cold 807 ionic_dev_rx_queue_stop_secondhalf(struct rte_eth_dev *dev, 808 uint16_t rx_queue_id) 809 { 810 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(dev); 811 struct ionic_rx_qcq *rxq = dev->data->rx_queues[rx_queue_id]; 812 813 ionic_adminq_wait(lif, &rxq->admin_ctx); 814 815 /* Free all buffers from descriptor ring */ 816 ionic_rx_empty(rxq); 817 818 ionic_lif_rxq_stats(rxq); 819 } 820 821 int 822 ionic_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) 823 { 824 struct ionic_rx_qcq *rxq = rx_queue; 825 struct ionic_qcq *qcq = &rxq->qcq; 826 volatile struct ionic_rxq_comp *cq_desc; 827 uint16_t mask, head, tail, pos; 828 bool done_color; 829 830 mask = qcq->q.size_mask; 831 832 /* offset must be within the size of the ring */ 833 if (offset > mask) 834 return -EINVAL; 835 836 head = qcq->q.head_idx; 837 tail = qcq->q.tail_idx; 838 839 /* offset is beyond what is posted */ 840 if (offset >= ((head - tail) & mask)) 841 return RTE_ETH_RX_DESC_UNAVAIL; 842 843 /* interested in this absolute position in the rxq */ 844 pos = (tail + offset) & mask; 845 846 /* rx cq position == rx q position */ 847 cq_desc = qcq->cq.base; 848 cq_desc = &cq_desc[pos]; 849 850 /* expected done color at this position */ 851 done_color = qcq->cq.done_color != (pos < tail); 852 853 /* has the hw indicated the done color at this position? */ 854 if (color_match(cq_desc->pkt_type_color, done_color)) 855 return RTE_ETH_RX_DESC_DONE; 856 857 return RTE_ETH_RX_DESC_AVAIL; 858 } 859 860 int 861 ionic_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) 862 { 863 struct ionic_tx_qcq *txq = tx_queue; 864 struct ionic_qcq *qcq = &txq->qcq; 865 volatile struct ionic_txq_comp *cq_desc; 866 uint16_t mask, head, tail, pos, cq_pos; 867 bool done_color; 868 869 mask = qcq->q.size_mask; 870 871 /* offset must be within the size of the ring */ 872 if (offset > mask) 873 return -EINVAL; 874 875 head = qcq->q.head_idx; 876 tail = qcq->q.tail_idx; 877 878 /* offset is beyond what is posted */ 879 if (offset >= ((head - tail) & mask)) 880 return RTE_ETH_TX_DESC_DONE; 881 882 /* interested in this absolute position in the txq */ 883 pos = (tail + offset) & mask; 884 885 /* tx cq position != tx q position, need to walk cq */ 886 cq_pos = qcq->cq.tail_idx; 887 cq_desc = qcq->cq.base; 888 cq_desc = &cq_desc[cq_pos]; 889 890 /* how far behind is pos from head? */ 891 offset = (head - pos) & mask; 892 893 /* walk cq descriptors that match the expected done color */ 894 done_color = qcq->cq.done_color; 895 while (color_match(cq_desc->color, done_color)) { 896 /* is comp index no further behind than pos? */ 897 tail = rte_cpu_to_le_16(cq_desc->comp_index); 898 if (((head - tail) & mask) <= offset) 899 return RTE_ETH_TX_DESC_DONE; 900 901 cq_pos = (cq_pos + 1) & mask; 902 cq_desc = qcq->cq.base; 903 cq_desc = &cq_desc[cq_pos]; 904 905 done_color = done_color != (cq_pos == 0); 906 } 907 908 return RTE_ETH_TX_DESC_FULL; 909 } 910