1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018-2022 Advanced Micro Devices, Inc. 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <string.h> 9 #include <errno.h> 10 #include <stdint.h> 11 #include <stdarg.h> 12 #include <unistd.h> 13 #include <inttypes.h> 14 15 #include <rte_byteorder.h> 16 #include <rte_common.h> 17 #include <rte_cycles.h> 18 #include <rte_log.h> 19 #include <rte_debug.h> 20 #include <rte_interrupts.h> 21 #include <rte_pci.h> 22 #include <rte_memory.h> 23 #include <rte_memzone.h> 24 #include <rte_launch.h> 25 #include <rte_eal.h> 26 #include <rte_per_lcore.h> 27 #include <rte_lcore.h> 28 #include <rte_atomic.h> 29 #include <rte_branch_prediction.h> 30 #include <rte_mempool.h> 31 #include <rte_malloc.h> 32 #include <rte_mbuf.h> 33 #include <rte_ether.h> 34 #include <ethdev_driver.h> 35 #include <rte_prefetch.h> 36 #include <rte_udp.h> 37 #include <rte_tcp.h> 38 #include <rte_sctp.h> 39 #include <rte_string_fns.h> 40 #include <rte_errno.h> 41 #include <rte_ip.h> 42 #include <rte_net.h> 43 44 #include "ionic_logs.h" 45 #include "ionic_mac_api.h" 46 #include "ionic_ethdev.h" 47 #include "ionic_lif.h" 48 #include "ionic_rxtx.h" 49 50 static void 51 ionic_empty_array(void **array, uint32_t cnt, uint16_t idx) 52 { 53 uint32_t i; 54 55 for (i = idx; i < cnt; i++) 56 if (array[i]) 57 rte_pktmbuf_free_seg(array[i]); 58 59 memset(array, 0, sizeof(void *) * cnt); 60 } 61 62 static void __rte_cold 63 ionic_tx_empty(struct ionic_tx_qcq *txq) 64 { 65 struct ionic_queue *q = &txq->qcq.q; 66 67 ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); 68 } 69 70 static void __rte_cold 71 ionic_rx_empty(struct ionic_rx_qcq *rxq) 72 { 73 struct ionic_queue *q = &rxq->qcq.q; 74 75 /* 76 * Walk the full info array so that the clean up includes any 77 * fragments that were left dangling for later reuse 78 */ 79 ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); 80 81 ionic_empty_array((void **)rxq->mbs, 82 IONIC_MBUF_BULK_ALLOC, rxq->mb_idx); 83 rxq->mb_idx = 0; 84 } 85 86 /********************************************************************* 87 * 88 * TX functions 89 * 90 **********************************************************************/ 91 92 void 93 ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 94 struct rte_eth_txq_info *qinfo) 95 { 96 struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id]; 97 struct ionic_queue *q = &txq->qcq.q; 98 99 qinfo->nb_desc = q->num_descs; 100 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; 101 if (txq->flags & IONIC_QCQ_F_FAST_FREE) 102 qinfo->conf.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 103 qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED; 104 } 105 106 static __rte_always_inline void 107 ionic_tx_flush(struct ionic_tx_qcq *txq) 108 { 109 struct ionic_cq *cq = &txq->qcq.cq; 110 struct ionic_queue *q = &txq->qcq.q; 111 struct rte_mbuf *txm; 112 struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base; 113 void **info; 114 uint32_t i; 115 116 cq_desc = &cq_desc_base[cq->tail_idx]; 117 118 while (color_match(cq_desc->color, cq->done_color)) { 119 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 120 if (cq->tail_idx == 0) 121 cq->done_color = !cq->done_color; 122 123 /* Prefetch 4 x 16B comp at cq->tail_idx + 4 */ 124 if ((cq->tail_idx & 0x3) == 0) 125 rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]); 126 127 while (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) { 128 /* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */ 129 rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 2))); 130 131 /* Prefetch next mbuf */ 132 void **next_info = 133 IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 1)); 134 if (next_info[0]) 135 rte_mbuf_prefetch_part2(next_info[0]); 136 if (next_info[1]) 137 rte_mbuf_prefetch_part2(next_info[1]); 138 139 info = IONIC_INFO_PTR(q, q->tail_idx); 140 for (i = 0; i < q->num_segs; i++) { 141 txm = info[i]; 142 if (!txm) 143 break; 144 145 if (txq->flags & IONIC_QCQ_F_FAST_FREE) 146 rte_mempool_put(txm->pool, txm); 147 else 148 rte_pktmbuf_free_seg(txm); 149 150 info[i] = NULL; 151 } 152 153 q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 154 } 155 156 cq_desc = &cq_desc_base[cq->tail_idx]; 157 } 158 } 159 160 void __rte_cold 161 ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 162 { 163 struct ionic_tx_qcq *txq = dev->data->tx_queues[qid]; 164 165 IONIC_PRINT_CALL(); 166 167 ionic_qcq_free(&txq->qcq); 168 } 169 170 int __rte_cold 171 ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 172 { 173 struct ionic_tx_stats *stats; 174 struct ionic_tx_qcq *txq; 175 176 IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id); 177 178 txq = eth_dev->data->tx_queues[tx_queue_id]; 179 180 eth_dev->data->tx_queue_state[tx_queue_id] = 181 RTE_ETH_QUEUE_STATE_STOPPED; 182 183 /* 184 * Note: we should better post NOP Tx desc and wait for its completion 185 * before disabling Tx queue 186 */ 187 188 ionic_lif_txq_deinit(txq); 189 190 /* Free all buffers from descriptor ring */ 191 ionic_tx_empty(txq); 192 193 stats = &txq->stats; 194 IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju", 195 txq->qcq.q.index, stats->packets, stats->tso); 196 197 return 0; 198 } 199 200 int __rte_cold 201 ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, 202 uint16_t nb_desc, uint32_t socket_id, 203 const struct rte_eth_txconf *tx_conf) 204 { 205 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 206 struct ionic_tx_qcq *txq; 207 uint64_t offloads; 208 int err; 209 210 if (tx_queue_id >= lif->ntxqcqs) { 211 IONIC_PRINT(DEBUG, "Queue index %u not available " 212 "(max %u queues)", 213 tx_queue_id, lif->ntxqcqs); 214 return -EINVAL; 215 } 216 217 offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; 218 IONIC_PRINT(DEBUG, 219 "Configuring skt %u TX queue %u with %u buffers, offloads %jx", 220 socket_id, tx_queue_id, nb_desc, offloads); 221 222 /* Validate number of receive descriptors */ 223 if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) 224 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 225 226 if (tx_conf->tx_free_thresh > nb_desc) { 227 IONIC_PRINT(ERR, 228 "tx_free_thresh must be less than nb_desc (%u)", 229 nb_desc); 230 return -EINVAL; 231 } 232 233 /* Free memory prior to re-allocation if needed... */ 234 if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { 235 ionic_dev_tx_queue_release(eth_dev, tx_queue_id); 236 eth_dev->data->tx_queues[tx_queue_id] = NULL; 237 } 238 239 eth_dev->data->tx_queue_state[tx_queue_id] = 240 RTE_ETH_QUEUE_STATE_STOPPED; 241 242 err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq); 243 if (err) { 244 IONIC_PRINT(DEBUG, "Queue allocation failure"); 245 return -EINVAL; 246 } 247 248 /* Do not start queue with rte_eth_dev_start() */ 249 if (tx_conf->tx_deferred_start) 250 txq->flags |= IONIC_QCQ_F_DEFERRED; 251 252 /* Convert the offload flags into queue flags */ 253 if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) 254 txq->flags |= IONIC_QCQ_F_CSUM_L3; 255 if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) 256 txq->flags |= IONIC_QCQ_F_CSUM_TCP; 257 if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) 258 txq->flags |= IONIC_QCQ_F_CSUM_UDP; 259 if (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) 260 txq->flags |= IONIC_QCQ_F_FAST_FREE; 261 262 txq->free_thresh = 263 tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh : 264 nb_desc - IONIC_DEF_TXRX_BURST; 265 266 eth_dev->data->tx_queues[tx_queue_id] = txq; 267 268 return 0; 269 } 270 271 /* 272 * Start Transmit Units for specified queue. 273 */ 274 int __rte_cold 275 ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 276 { 277 uint8_t *tx_queue_state = eth_dev->data->tx_queue_state; 278 struct ionic_tx_qcq *txq; 279 int err; 280 281 if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 282 IONIC_PRINT(DEBUG, "TX queue %u already started", 283 tx_queue_id); 284 return 0; 285 } 286 287 txq = eth_dev->data->tx_queues[tx_queue_id]; 288 289 IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs", 290 tx_queue_id, txq->qcq.q.num_descs); 291 292 err = ionic_lif_txq_init(txq); 293 if (err) 294 return err; 295 296 tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 297 298 return 0; 299 } 300 301 static void 302 ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) 303 { 304 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 305 char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; 306 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 307 (l3_hdr + txm->l3_len); 308 309 if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 310 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 311 ipv4_hdr->hdr_checksum = 0; 312 tcp_hdr->cksum = 0; 313 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 314 } else { 315 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 316 tcp_hdr->cksum = 0; 317 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 318 } 319 } 320 321 static void 322 ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) 323 { 324 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 325 char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + 326 txm->outer_l3_len + txm->l2_len; 327 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 328 (l3_hdr + txm->l3_len); 329 330 if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) { 331 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 332 ipv4_hdr->hdr_checksum = 0; 333 tcp_hdr->cksum = 0; 334 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 335 } else { 336 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 337 tcp_hdr->cksum = 0; 338 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 339 } 340 } 341 342 static void 343 ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 344 struct rte_mbuf *txm, 345 rte_iova_t addr, uint8_t nsge, uint16_t len, 346 uint32_t hdrlen, uint32_t mss, 347 bool encap, 348 uint16_t vlan_tci, bool has_vlan, 349 bool start, bool done) 350 { 351 struct rte_mbuf *txm_seg; 352 void **info; 353 uint64_t cmd; 354 uint8_t flags = 0; 355 int i; 356 357 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 358 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 359 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 360 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 361 362 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, 363 flags, nsge, addr); 364 desc->cmd = rte_cpu_to_le_64(cmd); 365 desc->len = rte_cpu_to_le_16(len); 366 desc->vlan_tci = rte_cpu_to_le_16(vlan_tci); 367 desc->hdr_len = rte_cpu_to_le_16(hdrlen); 368 desc->mss = rte_cpu_to_le_16(mss); 369 370 if (done) { 371 info = IONIC_INFO_PTR(q, q->head_idx); 372 373 /* Walk the mbuf chain to stash pointers in the array */ 374 txm_seg = txm; 375 for (i = 0; i < txm->nb_segs; i++) { 376 info[i] = txm_seg; 377 txm_seg = txm_seg->next; 378 } 379 } 380 381 q->head_idx = Q_NEXT_TO_POST(q, 1); 382 } 383 384 static struct ionic_txq_desc * 385 ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem) 386 { 387 struct ionic_queue *q = &txq->qcq.q; 388 struct ionic_txq_desc *desc_base = q->base; 389 struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 390 struct ionic_txq_desc *desc = &desc_base[q->head_idx]; 391 struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx]; 392 393 *elem = sg_desc->elems; 394 return desc; 395 } 396 397 static int 398 ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 399 { 400 struct ionic_queue *q = &txq->qcq.q; 401 struct ionic_tx_stats *stats = &txq->stats; 402 struct ionic_txq_desc *desc; 403 struct ionic_txq_sg_elem *elem; 404 struct rte_mbuf *txm_seg; 405 rte_iova_t data_iova; 406 uint64_t desc_addr = 0, next_addr; 407 uint16_t desc_len = 0; 408 uint8_t desc_nsge; 409 uint32_t hdrlen; 410 uint32_t mss = txm->tso_segsz; 411 uint32_t frag_left = 0; 412 uint32_t left; 413 uint32_t seglen; 414 uint32_t len; 415 uint32_t offset = 0; 416 bool start, done; 417 bool encap; 418 bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN); 419 uint16_t vlan_tci = txm->vlan_tci; 420 uint64_t ol_flags = txm->ol_flags; 421 422 encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 423 (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 424 ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 425 (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)); 426 427 /* Preload inner-most TCP csum field with IP pseudo hdr 428 * calculated with IP length set to zero. HW will later 429 * add in length to each TCP segment resulting from the TSO. 430 */ 431 432 if (encap) { 433 ionic_tx_tcp_inner_pseudo_csum(txm); 434 hdrlen = txm->outer_l2_len + txm->outer_l3_len + 435 txm->l2_len + txm->l3_len + txm->l4_len; 436 } else { 437 ionic_tx_tcp_pseudo_csum(txm); 438 hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; 439 } 440 441 seglen = hdrlen + mss; 442 left = txm->data_len; 443 data_iova = rte_mbuf_data_iova(txm); 444 445 desc = ionic_tx_tso_next(txq, &elem); 446 start = true; 447 448 /* Chop data up into desc segments */ 449 450 while (left > 0) { 451 len = RTE_MIN(seglen, left); 452 frag_left = seglen - len; 453 desc_addr = rte_cpu_to_le_64(data_iova + offset); 454 desc_len = len; 455 desc_nsge = 0; 456 left -= len; 457 offset += len; 458 if (txm->nb_segs > 1 && frag_left > 0) 459 continue; 460 done = (txm->nb_segs == 1 && left == 0); 461 ionic_tx_tso_post(q, desc, txm, 462 desc_addr, desc_nsge, desc_len, 463 hdrlen, mss, 464 encap, 465 vlan_tci, has_vlan, 466 start, done); 467 desc = ionic_tx_tso_next(txq, &elem); 468 start = false; 469 seglen = mss; 470 } 471 472 /* Chop frags into desc segments */ 473 474 txm_seg = txm->next; 475 while (txm_seg != NULL) { 476 offset = 0; 477 data_iova = rte_mbuf_data_iova(txm_seg); 478 left = txm_seg->data_len; 479 480 while (left > 0) { 481 next_addr = rte_cpu_to_le_64(data_iova + offset); 482 if (frag_left > 0) { 483 len = RTE_MIN(frag_left, left); 484 frag_left -= len; 485 elem->addr = next_addr; 486 elem->len = rte_cpu_to_le_16(len); 487 elem++; 488 desc_nsge++; 489 } else { 490 len = RTE_MIN(mss, left); 491 frag_left = mss - len; 492 desc_addr = next_addr; 493 desc_len = len; 494 desc_nsge = 0; 495 } 496 left -= len; 497 offset += len; 498 if (txm_seg->next != NULL && frag_left > 0) 499 continue; 500 501 done = (txm_seg->next == NULL && left == 0); 502 ionic_tx_tso_post(q, desc, txm_seg, 503 desc_addr, desc_nsge, desc_len, 504 hdrlen, mss, 505 encap, 506 vlan_tci, has_vlan, 507 start, done); 508 desc = ionic_tx_tso_next(txq, &elem); 509 start = false; 510 } 511 512 txm_seg = txm_seg->next; 513 } 514 515 stats->tso++; 516 517 return 0; 518 } 519 520 static __rte_always_inline int 521 ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 522 { 523 struct ionic_queue *q = &txq->qcq.q; 524 struct ionic_txq_desc *desc, *desc_base = q->base; 525 struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 526 struct ionic_txq_sg_elem *elem; 527 struct ionic_tx_stats *stats = &txq->stats; 528 struct rte_mbuf *txm_seg; 529 void **info; 530 rte_iova_t data_iova; 531 uint64_t ol_flags = txm->ol_flags; 532 uint64_t addr, cmd; 533 uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; 534 uint8_t flags = 0; 535 536 desc = &desc_base[q->head_idx]; 537 info = IONIC_INFO_PTR(q, q->head_idx); 538 539 if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 540 (txq->flags & IONIC_QCQ_F_CSUM_L3)) { 541 opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 542 flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; 543 } 544 545 if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) && 546 (txq->flags & IONIC_QCQ_F_CSUM_TCP)) || 547 ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) && 548 (txq->flags & IONIC_QCQ_F_CSUM_UDP))) { 549 opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 550 flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; 551 } 552 553 if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE) 554 stats->no_csum++; 555 556 if (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 557 (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 558 ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 559 (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) { 560 flags |= IONIC_TXQ_DESC_FLAG_ENCAP; 561 } 562 563 if (ol_flags & RTE_MBUF_F_TX_VLAN) { 564 flags |= IONIC_TXQ_DESC_FLAG_VLAN; 565 desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci); 566 } 567 568 addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm)); 569 570 cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); 571 desc->cmd = rte_cpu_to_le_64(cmd); 572 desc->len = rte_cpu_to_le_16(txm->data_len); 573 574 info[0] = txm; 575 576 if (txm->nb_segs > 1) { 577 txm_seg = txm->next; 578 579 elem = sg_desc_base[q->head_idx].elems; 580 581 while (txm_seg != NULL) { 582 /* Stash the mbuf ptr in the array */ 583 info++; 584 *info = txm_seg; 585 586 /* Configure the SGE */ 587 data_iova = rte_mbuf_data_iova(txm_seg); 588 elem->len = rte_cpu_to_le_16(txm_seg->data_len); 589 elem->addr = rte_cpu_to_le_64(data_iova); 590 elem++; 591 592 txm_seg = txm_seg->next; 593 } 594 } 595 596 q->head_idx = Q_NEXT_TO_POST(q, 1); 597 598 return 0; 599 } 600 601 uint16_t 602 ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 603 uint16_t nb_pkts) 604 { 605 struct ionic_tx_qcq *txq = tx_queue; 606 struct ionic_queue *q = &txq->qcq.q; 607 struct ionic_tx_stats *stats = &txq->stats; 608 struct rte_mbuf *mbuf; 609 uint32_t bytes_tx = 0; 610 uint16_t nb_avail, nb_tx = 0; 611 int err; 612 613 struct ionic_txq_desc *desc_base = q->base; 614 if (!(txq->flags & IONIC_QCQ_F_CMB)) 615 rte_prefetch0(&desc_base[q->head_idx]); 616 rte_prefetch0(IONIC_INFO_PTR(q, q->head_idx)); 617 618 if (tx_pkts) { 619 rte_mbuf_prefetch_part1(tx_pkts[0]); 620 rte_mbuf_prefetch_part2(tx_pkts[0]); 621 } 622 623 if (unlikely(ionic_q_space_avail(q) < txq->free_thresh)) { 624 /* Cleaning old buffers */ 625 ionic_tx_flush(txq); 626 } 627 628 nb_avail = ionic_q_space_avail(q); 629 if (unlikely(nb_avail < nb_pkts)) { 630 stats->stop += nb_pkts - nb_avail; 631 nb_pkts = nb_avail; 632 } 633 634 while (nb_tx < nb_pkts) { 635 uint16_t next_idx = Q_NEXT_TO_POST(q, 1); 636 if (!(txq->flags & IONIC_QCQ_F_CMB)) 637 rte_prefetch0(&desc_base[next_idx]); 638 rte_prefetch0(IONIC_INFO_PTR(q, next_idx)); 639 640 if (nb_tx + 1 < nb_pkts) { 641 rte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]); 642 rte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]); 643 } 644 645 mbuf = tx_pkts[nb_tx]; 646 647 if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) 648 err = ionic_tx_tso(txq, mbuf); 649 else 650 err = ionic_tx(txq, mbuf); 651 if (err) { 652 stats->drop += nb_pkts - nb_tx; 653 break; 654 } 655 656 bytes_tx += mbuf->pkt_len; 657 nb_tx++; 658 } 659 660 if (nb_tx > 0) { 661 rte_wmb(); 662 ionic_q_flush(q); 663 664 stats->packets += nb_tx; 665 stats->bytes += bytes_tx; 666 } 667 668 return nb_tx; 669 } 670 671 /********************************************************************* 672 * 673 * TX prep functions 674 * 675 **********************************************************************/ 676 677 #define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 | \ 678 RTE_MBUF_F_TX_IPV6 | \ 679 RTE_MBUF_F_TX_VLAN | \ 680 RTE_MBUF_F_TX_IP_CKSUM | \ 681 RTE_MBUF_F_TX_TCP_SEG | \ 682 RTE_MBUF_F_TX_L4_MASK) 683 684 #define IONIC_TX_OFFLOAD_NOTSUP_MASK \ 685 (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) 686 687 uint16_t 688 ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 689 { 690 struct ionic_tx_qcq *txq = tx_queue; 691 struct rte_mbuf *txm; 692 uint64_t offloads; 693 int i = 0; 694 695 for (i = 0; i < nb_pkts; i++) { 696 txm = tx_pkts[i]; 697 698 if (txm->nb_segs > txq->num_segs_fw) { 699 rte_errno = -EINVAL; 700 break; 701 } 702 703 offloads = txm->ol_flags; 704 705 if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) { 706 rte_errno = -ENOTSUP; 707 break; 708 } 709 } 710 711 return i; 712 } 713 714 /********************************************************************* 715 * 716 * RX functions 717 * 718 **********************************************************************/ 719 720 void 721 ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 722 struct rte_eth_rxq_info *qinfo) 723 { 724 struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id]; 725 struct ionic_queue *q = &rxq->qcq.q; 726 727 qinfo->mp = rxq->mb_pool; 728 qinfo->scattered_rx = dev->data->scattered_rx; 729 qinfo->nb_desc = q->num_descs; 730 qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED; 731 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 732 } 733 734 void __rte_cold 735 ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 736 { 737 struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid]; 738 739 if (!rxq) 740 return; 741 742 IONIC_PRINT_CALL(); 743 744 ionic_qcq_free(&rxq->qcq); 745 } 746 747 int __rte_cold 748 ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 749 uint16_t rx_queue_id, 750 uint16_t nb_desc, 751 uint32_t socket_id, 752 const struct rte_eth_rxconf *rx_conf, 753 struct rte_mempool *mp) 754 { 755 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 756 struct ionic_rx_qcq *rxq; 757 uint64_t offloads; 758 int err; 759 760 if (rx_queue_id >= lif->nrxqcqs) { 761 IONIC_PRINT(ERR, 762 "Queue index %u not available (max %u queues)", 763 rx_queue_id, lif->nrxqcqs); 764 return -EINVAL; 765 } 766 767 offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; 768 IONIC_PRINT(DEBUG, 769 "Configuring skt %u RX queue %u with %u buffers, offloads %jx", 770 socket_id, rx_queue_id, nb_desc, offloads); 771 772 if (!rx_conf->rx_drop_en) 773 IONIC_PRINT(WARNING, "No-drop mode is not supported"); 774 775 /* Validate number of receive descriptors */ 776 if (!rte_is_power_of_2(nb_desc) || 777 nb_desc < IONIC_MIN_RING_DESC || 778 nb_desc > IONIC_MAX_RING_DESC) { 779 IONIC_PRINT(ERR, 780 "Bad descriptor count (%u) for queue %u (min: %u)", 781 nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); 782 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 783 } 784 785 /* Free memory prior to re-allocation if needed... */ 786 if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { 787 ionic_dev_rx_queue_release(eth_dev, rx_queue_id); 788 eth_dev->data->rx_queues[rx_queue_id] = NULL; 789 } 790 791 eth_dev->data->rx_queue_state[rx_queue_id] = 792 RTE_ETH_QUEUE_STATE_STOPPED; 793 794 err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, mp, 795 &rxq); 796 if (err) { 797 IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id); 798 return -EINVAL; 799 } 800 801 rxq->mb_pool = mp; 802 803 /* 804 * Note: the interface does not currently support 805 * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN 806 * when the adapter will be able to keep the CRC and subtract 807 * it to the length for all received packets: 808 * if (eth_dev->data->dev_conf.rxmode.offloads & 809 * RTE_ETH_RX_OFFLOAD_KEEP_CRC) 810 * rxq->crc_len = ETHER_CRC_LEN; 811 */ 812 813 /* Do not start queue with rte_eth_dev_start() */ 814 if (rx_conf->rx_deferred_start) 815 rxq->flags |= IONIC_QCQ_F_DEFERRED; 816 817 eth_dev->data->rx_queues[rx_queue_id] = rxq; 818 819 return 0; 820 } 821 822 #define IONIC_CSUM_FLAG_MASK (IONIC_RXQ_COMP_CSUM_F_VLAN - 1) 823 static const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK] 824 __rte_cache_aligned = { 825 /* IP_BAD set */ 826 [IONIC_RXQ_COMP_CSUM_F_IP_BAD] = RTE_MBUF_F_RX_IP_CKSUM_BAD, 827 [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_OK] = 828 RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 829 [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = 830 RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 831 [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_OK] = 832 RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 833 [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = 834 RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 835 /* IP_OK set */ 836 [IONIC_RXQ_COMP_CSUM_F_IP_OK] = RTE_MBUF_F_RX_IP_CKSUM_GOOD, 837 [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_OK] = 838 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 839 [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = 840 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 841 [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_OK] = 842 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 843 [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = 844 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 845 /* No IP flag set */ 846 [IONIC_RXQ_COMP_CSUM_F_TCP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD, 847 [IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD, 848 [IONIC_RXQ_COMP_CSUM_F_UDP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD, 849 [IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD, 850 }; 851 852 /* RTE_PTYPE_UNKNOWN is 0x0 */ 853 static const uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK] 854 __rte_cache_aligned = { 855 [IONIC_PKT_TYPE_NON_IP] = RTE_PTYPE_UNKNOWN, 856 [IONIC_PKT_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, 857 [IONIC_PKT_TYPE_IPV4_TCP] = 858 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, 859 [IONIC_PKT_TYPE_IPV4_UDP] = 860 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, 861 [IONIC_PKT_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, 862 [IONIC_PKT_TYPE_IPV6_TCP] = 863 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, 864 [IONIC_PKT_TYPE_IPV6_UDP] = 865 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, 866 }; 867 868 const uint32_t * 869 ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) 870 { 871 /* See ionic_ptype_table[] */ 872 static const uint32_t ptypes[] = { 873 RTE_PTYPE_L2_ETHER, 874 RTE_PTYPE_L2_ETHER_TIMESYNC, 875 RTE_PTYPE_L2_ETHER_LLDP, 876 RTE_PTYPE_L2_ETHER_ARP, 877 RTE_PTYPE_L3_IPV4, 878 RTE_PTYPE_L3_IPV6, 879 RTE_PTYPE_L4_TCP, 880 RTE_PTYPE_L4_UDP, 881 RTE_PTYPE_UNKNOWN 882 }; 883 884 return ptypes; 885 } 886 887 /* 888 * Cleans one descriptor. Connects the filled mbufs into a chain. 889 * Does not advance the tail index. 890 */ 891 static __rte_always_inline void 892 ionic_rx_clean_one(struct ionic_rx_qcq *rxq, 893 struct ionic_rxq_comp *cq_desc, 894 struct ionic_rx_service *rx_svc) 895 { 896 struct ionic_queue *q = &rxq->qcq.q; 897 struct rte_mbuf *rxm, *rxm_seg, *prev_rxm; 898 struct ionic_rx_stats *stats = &rxq->stats; 899 uint64_t pkt_flags = 0; 900 uint32_t pkt_type; 901 uint32_t left, i; 902 uint16_t cq_desc_len; 903 uint8_t ptype, cflags; 904 void **info; 905 906 cq_desc_len = rte_le_to_cpu_16(cq_desc->len); 907 908 info = IONIC_INFO_PTR(q, q->tail_idx); 909 910 rxm = info[0]; 911 912 if (cq_desc->status) { 913 stats->bad_cq_status++; 914 return; 915 } 916 917 if (cq_desc_len > rxq->frame_size || cq_desc_len == 0) { 918 stats->bad_len++; 919 return; 920 } 921 922 info[0] = NULL; 923 924 /* Set the mbuf metadata based on the cq entry */ 925 rxm->rearm_data[0] = rxq->rearm_data; 926 rxm->pkt_len = cq_desc_len; 927 rxm->data_len = RTE_MIN(rxq->hdr_seg_size, cq_desc_len); 928 left = cq_desc_len - rxm->data_len; 929 rxm->nb_segs = cq_desc->num_sg_elems + 1; 930 prev_rxm = rxm; 931 932 for (i = 1; i < rxm->nb_segs && left; i++) { 933 rxm_seg = info[i]; 934 info[i] = NULL; 935 936 /* Set the chained mbuf metadata */ 937 rxm_seg->rearm_data[0] = rxq->rearm_seg_data; 938 rxm_seg->data_len = RTE_MIN(rxq->seg_size, left); 939 left -= rxm_seg->data_len; 940 941 /* Link the mbuf */ 942 prev_rxm->next = rxm_seg; 943 prev_rxm = rxm_seg; 944 } 945 946 /* Terminate the mbuf chain */ 947 prev_rxm->next = NULL; 948 949 /* RSS */ 950 pkt_flags |= RTE_MBUF_F_RX_RSS_HASH; 951 rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash); 952 953 /* Vlan Strip */ 954 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { 955 pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 956 rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci); 957 } 958 959 /* Checksum */ 960 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 961 cflags = cq_desc->csum_flags & IONIC_CSUM_FLAG_MASK; 962 pkt_flags |= ionic_csum_flags[cflags]; 963 } 964 965 rxm->ol_flags = pkt_flags; 966 967 /* Packet Type */ 968 ptype = cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK; 969 pkt_type = ionic_ptype_table[ptype]; 970 if (pkt_type == RTE_PTYPE_UNKNOWN) { 971 struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm, 972 struct rte_ether_hdr *); 973 uint16_t ether_type = eth_h->ether_type; 974 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) 975 pkt_type = RTE_PTYPE_L2_ETHER_ARP; 976 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_LLDP)) 977 pkt_type = RTE_PTYPE_L2_ETHER_LLDP; 978 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_1588)) 979 pkt_type = RTE_PTYPE_L2_ETHER_TIMESYNC; 980 stats->mtods++; 981 } 982 983 rxm->packet_type = pkt_type; 984 985 rx_svc->rx_pkts[rx_svc->nb_rx] = rxm; 986 rx_svc->nb_rx++; 987 988 stats->packets++; 989 stats->bytes += rxm->pkt_len; 990 } 991 992 /* 993 * Fills one descriptor with mbufs. Does not advance the head index. 994 */ 995 static __rte_always_inline int 996 ionic_rx_fill_one(struct ionic_rx_qcq *rxq) 997 { 998 struct ionic_queue *q = &rxq->qcq.q; 999 struct rte_mbuf *rxm, *rxm_seg; 1000 struct ionic_rxq_desc *desc, *desc_base = q->base; 1001 struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 1002 rte_iova_t data_iova; 1003 uint32_t i; 1004 void **info; 1005 int ret; 1006 1007 info = IONIC_INFO_PTR(q, q->head_idx); 1008 desc = &desc_base[q->head_idx]; 1009 sg_desc = &sg_desc_base[q->head_idx]; 1010 1011 /* mbuf is unused => whole chain is unused */ 1012 if (unlikely(info[0])) 1013 return 0; 1014 1015 if (rxq->mb_idx == 0) { 1016 ret = rte_mempool_get_bulk(rxq->mb_pool, 1017 (void **)rxq->mbs, 1018 IONIC_MBUF_BULK_ALLOC); 1019 if (ret) { 1020 assert(0); 1021 return -ENOMEM; 1022 } 1023 1024 rxq->mb_idx = IONIC_MBUF_BULK_ALLOC; 1025 } 1026 1027 rxm = rxq->mbs[--rxq->mb_idx]; 1028 info[0] = rxm; 1029 1030 data_iova = rte_mbuf_data_iova_default(rxm); 1031 desc->addr = rte_cpu_to_le_64(data_iova); 1032 1033 for (i = 1; i < q->num_segs; i++) { 1034 /* mbuf is unused => rest of the chain is unused */ 1035 if (info[i]) 1036 return 0; 1037 1038 if (rxq->mb_idx == 0) { 1039 ret = rte_mempool_get_bulk(rxq->mb_pool, 1040 (void **)rxq->mbs, 1041 IONIC_MBUF_BULK_ALLOC); 1042 if (ret) { 1043 assert(0); 1044 return -ENOMEM; 1045 } 1046 1047 rxq->mb_idx = IONIC_MBUF_BULK_ALLOC; 1048 } 1049 1050 rxm_seg = rxq->mbs[--rxq->mb_idx]; 1051 info[i] = rxm_seg; 1052 1053 /* The data_off does not get set to 0 until later */ 1054 data_iova = rxm_seg->buf_iova; 1055 sg_desc->elems[i - 1].addr = rte_cpu_to_le_64(data_iova); 1056 } 1057 1058 return 0; 1059 } 1060 1061 /* 1062 * Fills all descriptors with mbufs. 1063 */ 1064 static int __rte_cold 1065 ionic_rx_fill(struct ionic_rx_qcq *rxq) 1066 { 1067 struct ionic_queue *q = &rxq->qcq.q; 1068 uint32_t i; 1069 int err; 1070 1071 for (i = 1; i < q->num_descs; i++) { 1072 err = ionic_rx_fill_one(rxq); 1073 if (err) 1074 return err; 1075 1076 q->head_idx = Q_NEXT_TO_POST(q, 1); 1077 } 1078 1079 ionic_q_flush(q); 1080 1081 return 0; 1082 } 1083 1084 /* 1085 * Perform one-time initialization of descriptor fields 1086 * which will not change for the life of the queue. 1087 */ 1088 static void __rte_cold 1089 ionic_rx_init_descriptors(struct ionic_rx_qcq *rxq) 1090 { 1091 struct ionic_queue *q = &rxq->qcq.q; 1092 struct ionic_rxq_desc *desc, *desc_base = q->base; 1093 struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 1094 uint32_t i, j; 1095 uint8_t opcode; 1096 1097 opcode = (q->num_segs > 1) ? 1098 IONIC_RXQ_DESC_OPCODE_SG : IONIC_RXQ_DESC_OPCODE_SIMPLE; 1099 1100 /* 1101 * NB: Only the first segment needs to leave headroom (hdr_seg_size). 1102 * Later segments (seg_size) do not. 1103 */ 1104 for (i = 0; i < q->num_descs; i++) { 1105 desc = &desc_base[i]; 1106 desc->len = rte_cpu_to_le_16(rxq->hdr_seg_size); 1107 desc->opcode = opcode; 1108 1109 sg_desc = &sg_desc_base[i]; 1110 for (j = 0; j < q->num_segs - 1u; j++) 1111 sg_desc->elems[j].len = 1112 rte_cpu_to_le_16(rxq->seg_size); 1113 } 1114 } 1115 1116 /* 1117 * Start Receive Units for specified queue. 1118 */ 1119 int __rte_cold 1120 ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1121 { 1122 uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 1123 struct ionic_rx_qcq *rxq; 1124 struct ionic_queue *q; 1125 int err; 1126 1127 if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 1128 IONIC_PRINT(DEBUG, "RX queue %u already started", 1129 rx_queue_id); 1130 return 0; 1131 } 1132 1133 rxq = eth_dev->data->rx_queues[rx_queue_id]; 1134 q = &rxq->qcq.q; 1135 1136 rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN; 1137 1138 /* Recalculate segment count based on MTU */ 1139 q->num_segs = 1 + 1140 (rxq->frame_size + RTE_PKTMBUF_HEADROOM - 1) / rxq->seg_size; 1141 1142 IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u segs %u", 1143 rx_queue_id, q->num_descs, rxq->frame_size, q->num_segs); 1144 1145 ionic_rx_init_descriptors(rxq); 1146 1147 err = ionic_lif_rxq_init(rxq); 1148 if (err) 1149 return err; 1150 1151 /* Allocate buffers for descriptor rings */ 1152 if (ionic_rx_fill(rxq) != 0) { 1153 IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d", 1154 rx_queue_id); 1155 return -1; 1156 } 1157 1158 rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 1159 1160 return 0; 1161 } 1162 1163 /* 1164 * Walk the CQ to find completed receive descriptors. 1165 * Any completed descriptor found is refilled. 1166 */ 1167 static __rte_always_inline void 1168 ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do, 1169 struct ionic_rx_service *rx_svc) 1170 { 1171 struct ionic_cq *cq = &rxq->qcq.cq; 1172 struct ionic_queue *q = &rxq->qcq.q; 1173 struct ionic_rxq_desc *q_desc_base = q->base; 1174 struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; 1175 uint32_t work_done = 0; 1176 1177 cq_desc = &cq_desc_base[cq->tail_idx]; 1178 1179 while (color_match(cq_desc->pkt_type_color, cq->done_color)) { 1180 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 1181 1182 if (cq->tail_idx == 0) 1183 cq->done_color = !cq->done_color; 1184 1185 /* Prefetch 8 x 8B bufinfo */ 1186 rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 8))); 1187 /* Prefetch 4 x 16B comp */ 1188 rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]); 1189 /* Prefetch 4 x 16B descriptors */ 1190 if (!(rxq->flags & IONIC_QCQ_F_CMB)) 1191 rte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]); 1192 1193 ionic_rx_clean_one(rxq, cq_desc, rx_svc); 1194 1195 q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 1196 1197 (void)ionic_rx_fill_one(rxq); 1198 1199 q->head_idx = Q_NEXT_TO_POST(q, 1); 1200 1201 if (++work_done == work_to_do) 1202 break; 1203 1204 cq_desc = &cq_desc_base[cq->tail_idx]; 1205 } 1206 1207 /* Update the queue indices and ring the doorbell */ 1208 if (work_done) 1209 ionic_q_flush(q); 1210 } 1211 1212 /* 1213 * Stop Receive Units for specified queue. 1214 */ 1215 int __rte_cold 1216 ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1217 { 1218 uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 1219 struct ionic_rx_stats *stats; 1220 struct ionic_rx_qcq *rxq; 1221 1222 IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id); 1223 1224 rxq = eth_dev->data->rx_queues[rx_queue_id]; 1225 1226 rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 1227 1228 ionic_lif_rxq_deinit(rxq); 1229 1230 /* Free all buffers from descriptor ring */ 1231 ionic_rx_empty(rxq); 1232 1233 stats = &rxq->stats; 1234 IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju", 1235 rxq->qcq.q.index, stats->packets, stats->mtods); 1236 1237 return 0; 1238 } 1239 1240 uint16_t 1241 ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 1242 uint16_t nb_pkts) 1243 { 1244 struct ionic_rx_qcq *rxq = rx_queue; 1245 struct ionic_rx_service rx_svc; 1246 1247 rx_svc.rx_pkts = rx_pkts; 1248 rx_svc.nb_rx = 0; 1249 1250 ionic_rxq_service(rxq, nb_pkts, &rx_svc); 1251 1252 return rx_svc.nb_rx; 1253 } 1254 1255 int 1256 ionic_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) 1257 { 1258 struct ionic_rx_qcq *rxq = rx_queue; 1259 struct ionic_qcq *qcq = &rxq->qcq; 1260 struct ionic_rxq_comp *cq_desc; 1261 uint16_t mask, head, tail, pos; 1262 bool done_color; 1263 1264 mask = qcq->q.size_mask; 1265 1266 /* offset must be within the size of the ring */ 1267 if (offset > mask) 1268 return -EINVAL; 1269 1270 head = qcq->q.head_idx; 1271 tail = qcq->q.tail_idx; 1272 1273 /* offset is beyond what is posted */ 1274 if (offset >= ((head - tail) & mask)) 1275 return RTE_ETH_RX_DESC_UNAVAIL; 1276 1277 /* interested in this absolute position in the rxq */ 1278 pos = (tail + offset) & mask; 1279 1280 /* rx cq position == rx q position */ 1281 cq_desc = qcq->cq.base; 1282 cq_desc = &cq_desc[pos]; 1283 1284 /* expected done color at this position */ 1285 done_color = qcq->cq.done_color != (pos < tail); 1286 1287 /* has the hw indicated the done color at this position? */ 1288 if (color_match(cq_desc->pkt_type_color, done_color)) 1289 return RTE_ETH_RX_DESC_DONE; 1290 1291 return RTE_ETH_RX_DESC_AVAIL; 1292 } 1293 1294 int 1295 ionic_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) 1296 { 1297 struct ionic_tx_qcq *txq = tx_queue; 1298 struct ionic_qcq *qcq = &txq->qcq; 1299 struct ionic_txq_comp *cq_desc; 1300 uint16_t mask, head, tail, pos, cq_pos; 1301 bool done_color; 1302 1303 mask = qcq->q.size_mask; 1304 1305 /* offset must be within the size of the ring */ 1306 if (offset > mask) 1307 return -EINVAL; 1308 1309 head = qcq->q.head_idx; 1310 tail = qcq->q.tail_idx; 1311 1312 /* offset is beyond what is posted */ 1313 if (offset >= ((head - tail) & mask)) 1314 return RTE_ETH_TX_DESC_DONE; 1315 1316 /* interested in this absolute position in the txq */ 1317 pos = (tail + offset) & mask; 1318 1319 /* tx cq position != tx q position, need to walk cq */ 1320 cq_pos = qcq->cq.tail_idx; 1321 cq_desc = qcq->cq.base; 1322 cq_desc = &cq_desc[cq_pos]; 1323 1324 /* how far behind is pos from head? */ 1325 offset = (head - pos) & mask; 1326 1327 /* walk cq descriptors that match the expected done color */ 1328 done_color = qcq->cq.done_color; 1329 while (color_match(cq_desc->color, done_color)) { 1330 /* is comp index no further behind than pos? */ 1331 tail = rte_cpu_to_le_16(cq_desc->comp_index); 1332 if (((head - tail) & mask) <= offset) 1333 return RTE_ETH_TX_DESC_DONE; 1334 1335 cq_pos = (cq_pos + 1) & mask; 1336 cq_desc = qcq->cq.base; 1337 cq_desc = &cq_desc[cq_pos]; 1338 1339 done_color = done_color != (cq_pos == 0); 1340 } 1341 1342 return RTE_ETH_TX_DESC_FULL; 1343 } 1344