1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018-2022 Advanced Micro Devices, Inc. 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <string.h> 9 #include <errno.h> 10 #include <stdint.h> 11 #include <stdarg.h> 12 #include <unistd.h> 13 #include <inttypes.h> 14 15 #include <rte_byteorder.h> 16 #include <rte_common.h> 17 #include <rte_cycles.h> 18 #include <rte_log.h> 19 #include <rte_debug.h> 20 #include <rte_interrupts.h> 21 #include <rte_pci.h> 22 #include <rte_memory.h> 23 #include <rte_memzone.h> 24 #include <rte_launch.h> 25 #include <rte_eal.h> 26 #include <rte_per_lcore.h> 27 #include <rte_lcore.h> 28 #include <rte_atomic.h> 29 #include <rte_branch_prediction.h> 30 #include <rte_mempool.h> 31 #include <rte_malloc.h> 32 #include <rte_mbuf.h> 33 #include <rte_ether.h> 34 #include <ethdev_driver.h> 35 #include <rte_prefetch.h> 36 #include <rte_udp.h> 37 #include <rte_tcp.h> 38 #include <rte_sctp.h> 39 #include <rte_string_fns.h> 40 #include <rte_errno.h> 41 #include <rte_ip.h> 42 #include <rte_net.h> 43 44 #include "ionic_logs.h" 45 #include "ionic_mac_api.h" 46 #include "ionic_ethdev.h" 47 #include "ionic_lif.h" 48 #include "ionic_rxtx.h" 49 50 /********************************************************************* 51 * 52 * TX functions 53 * 54 **********************************************************************/ 55 56 void 57 ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 58 struct rte_eth_txq_info *qinfo) 59 { 60 struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id]; 61 struct ionic_queue *q = &txq->qcq.q; 62 63 qinfo->nb_desc = q->num_descs; 64 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; 65 qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED; 66 } 67 68 static __rte_always_inline void 69 ionic_tx_flush(struct ionic_tx_qcq *txq) 70 { 71 struct ionic_cq *cq = &txq->qcq.cq; 72 struct ionic_queue *q = &txq->qcq.q; 73 struct rte_mbuf *txm, *next; 74 struct ionic_txq_comp *cq_desc_base = cq->base; 75 struct ionic_txq_comp *cq_desc; 76 void **info; 77 u_int32_t comp_index = (u_int32_t)-1; 78 79 cq_desc = &cq_desc_base[cq->tail_idx]; 80 while (color_match(cq_desc->color, cq->done_color)) { 81 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 82 83 /* Prefetch the next 4 descriptors (not really useful here) */ 84 if ((cq->tail_idx & 0x3) == 0) 85 rte_prefetch0(&cq_desc_base[cq->tail_idx]); 86 87 if (cq->tail_idx == 0) 88 cq->done_color = !cq->done_color; 89 90 comp_index = cq_desc->comp_index; 91 92 cq_desc = &cq_desc_base[cq->tail_idx]; 93 } 94 95 if (comp_index != (u_int32_t)-1) { 96 while (q->tail_idx != comp_index) { 97 info = IONIC_INFO_PTR(q, q->tail_idx); 98 99 q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 100 101 /* Prefetch the next 4 descriptors */ 102 if ((q->tail_idx & 0x3) == 0) 103 /* q desc info */ 104 rte_prefetch0(&q->info[q->tail_idx]); 105 106 /* 107 * Note: you can just use rte_pktmbuf_free, 108 * but this loop is faster 109 */ 110 txm = info[0]; 111 while (txm != NULL) { 112 next = txm->next; 113 rte_pktmbuf_free_seg(txm); 114 txm = next; 115 } 116 } 117 } 118 } 119 120 void __rte_cold 121 ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 122 { 123 struct ionic_tx_qcq *txq = dev->data->tx_queues[qid]; 124 struct ionic_tx_stats *stats = &txq->stats; 125 126 IONIC_PRINT_CALL(); 127 128 IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju", 129 txq->qcq.q.index, stats->packets, stats->tso); 130 131 ionic_lif_txq_deinit(txq); 132 133 ionic_qcq_free(&txq->qcq); 134 } 135 136 int __rte_cold 137 ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 138 { 139 struct ionic_tx_qcq *txq; 140 141 IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id); 142 143 txq = eth_dev->data->tx_queues[tx_queue_id]; 144 145 eth_dev->data->tx_queue_state[tx_queue_id] = 146 RTE_ETH_QUEUE_STATE_STOPPED; 147 148 /* 149 * Note: we should better post NOP Tx desc and wait for its completion 150 * before disabling Tx queue 151 */ 152 153 ionic_qcq_disable(&txq->qcq); 154 155 ionic_tx_flush(txq); 156 157 return 0; 158 } 159 160 int __rte_cold 161 ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, 162 uint16_t nb_desc, uint32_t socket_id, 163 const struct rte_eth_txconf *tx_conf) 164 { 165 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 166 struct ionic_tx_qcq *txq; 167 uint64_t offloads; 168 int err; 169 170 if (tx_queue_id >= lif->ntxqcqs) { 171 IONIC_PRINT(DEBUG, "Queue index %u not available " 172 "(max %u queues)", 173 tx_queue_id, lif->ntxqcqs); 174 return -EINVAL; 175 } 176 177 offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; 178 IONIC_PRINT(DEBUG, 179 "Configuring skt %u TX queue %u with %u buffers, offloads %jx", 180 socket_id, tx_queue_id, nb_desc, offloads); 181 182 /* Validate number of receive descriptors */ 183 if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) 184 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 185 186 /* Free memory prior to re-allocation if needed... */ 187 if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { 188 ionic_dev_tx_queue_release(eth_dev, tx_queue_id); 189 eth_dev->data->tx_queues[tx_queue_id] = NULL; 190 } 191 192 eth_dev->data->tx_queue_state[tx_queue_id] = 193 RTE_ETH_QUEUE_STATE_STOPPED; 194 195 err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq); 196 if (err) { 197 IONIC_PRINT(DEBUG, "Queue allocation failure"); 198 return -EINVAL; 199 } 200 201 /* Do not start queue with rte_eth_dev_start() */ 202 if (tx_conf->tx_deferred_start) 203 txq->flags |= IONIC_QCQ_F_DEFERRED; 204 205 /* Convert the offload flags into queue flags */ 206 if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) 207 txq->flags |= IONIC_QCQ_F_CSUM_L3; 208 if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) 209 txq->flags |= IONIC_QCQ_F_CSUM_TCP; 210 if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) 211 txq->flags |= IONIC_QCQ_F_CSUM_UDP; 212 213 eth_dev->data->tx_queues[tx_queue_id] = txq; 214 215 return 0; 216 } 217 218 /* 219 * Start Transmit Units for specified queue. 220 */ 221 int __rte_cold 222 ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 223 { 224 uint8_t *tx_queue_state = eth_dev->data->tx_queue_state; 225 struct ionic_tx_qcq *txq; 226 int err; 227 228 if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 229 IONIC_PRINT(DEBUG, "TX queue %u already started", 230 tx_queue_id); 231 return 0; 232 } 233 234 txq = eth_dev->data->tx_queues[tx_queue_id]; 235 236 IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs", 237 tx_queue_id, txq->qcq.q.num_descs); 238 239 if (!(txq->flags & IONIC_QCQ_F_INITED)) { 240 err = ionic_lif_txq_init(txq); 241 if (err) 242 return err; 243 } else { 244 ionic_qcq_enable(&txq->qcq); 245 } 246 247 tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 248 249 return 0; 250 } 251 252 static void 253 ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) 254 { 255 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 256 char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; 257 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 258 (l3_hdr + txm->l3_len); 259 260 if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 261 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 262 ipv4_hdr->hdr_checksum = 0; 263 tcp_hdr->cksum = 0; 264 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 265 } else { 266 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 267 tcp_hdr->cksum = 0; 268 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 269 } 270 } 271 272 static void 273 ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) 274 { 275 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 276 char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + 277 txm->outer_l3_len + txm->l2_len; 278 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 279 (l3_hdr + txm->l3_len); 280 281 if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) { 282 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 283 ipv4_hdr->hdr_checksum = 0; 284 tcp_hdr->cksum = 0; 285 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 286 } else { 287 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 288 tcp_hdr->cksum = 0; 289 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 290 } 291 } 292 293 static void 294 ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 295 struct rte_mbuf *txm, 296 rte_iova_t addr, uint8_t nsge, uint16_t len, 297 uint32_t hdrlen, uint32_t mss, 298 bool encap, 299 uint16_t vlan_tci, bool has_vlan, 300 bool start, bool done) 301 { 302 void **info; 303 uint64_t cmd; 304 uint8_t flags = 0; 305 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 306 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 307 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 308 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 309 310 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, 311 flags, nsge, addr); 312 desc->cmd = rte_cpu_to_le_64(cmd); 313 desc->len = rte_cpu_to_le_16(len); 314 desc->vlan_tci = rte_cpu_to_le_16(vlan_tci); 315 desc->hdr_len = rte_cpu_to_le_16(hdrlen); 316 desc->mss = rte_cpu_to_le_16(mss); 317 318 if (done) { 319 info = IONIC_INFO_PTR(q, q->head_idx); 320 info[0] = txm; 321 } 322 323 q->head_idx = Q_NEXT_TO_POST(q, 1); 324 } 325 326 static struct ionic_txq_desc * 327 ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem) 328 { 329 struct ionic_queue *q = &txq->qcq.q; 330 struct ionic_txq_desc *desc_base = q->base; 331 struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 332 struct ionic_txq_desc *desc = &desc_base[q->head_idx]; 333 struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx]; 334 335 *elem = sg_desc->elems; 336 return desc; 337 } 338 339 static int 340 ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 341 { 342 struct ionic_queue *q = &txq->qcq.q; 343 struct ionic_tx_stats *stats = &txq->stats; 344 struct ionic_txq_desc *desc; 345 struct ionic_txq_sg_elem *elem; 346 struct rte_mbuf *txm_seg; 347 rte_iova_t data_iova; 348 uint64_t desc_addr = 0, next_addr; 349 uint16_t desc_len = 0; 350 uint8_t desc_nsge; 351 uint32_t hdrlen; 352 uint32_t mss = txm->tso_segsz; 353 uint32_t frag_left = 0; 354 uint32_t left; 355 uint32_t seglen; 356 uint32_t len; 357 uint32_t offset = 0; 358 bool start, done; 359 bool encap; 360 bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN); 361 uint16_t vlan_tci = txm->vlan_tci; 362 uint64_t ol_flags = txm->ol_flags; 363 364 encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 365 (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 366 ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 367 (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)); 368 369 /* Preload inner-most TCP csum field with IP pseudo hdr 370 * calculated with IP length set to zero. HW will later 371 * add in length to each TCP segment resulting from the TSO. 372 */ 373 374 if (encap) { 375 ionic_tx_tcp_inner_pseudo_csum(txm); 376 hdrlen = txm->outer_l2_len + txm->outer_l3_len + 377 txm->l2_len + txm->l3_len + txm->l4_len; 378 } else { 379 ionic_tx_tcp_pseudo_csum(txm); 380 hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; 381 } 382 383 seglen = hdrlen + mss; 384 left = txm->data_len; 385 data_iova = rte_mbuf_data_iova(txm); 386 387 desc = ionic_tx_tso_next(txq, &elem); 388 start = true; 389 390 /* Chop data up into desc segments */ 391 392 while (left > 0) { 393 len = RTE_MIN(seglen, left); 394 frag_left = seglen - len; 395 desc_addr = rte_cpu_to_le_64(data_iova + offset); 396 desc_len = len; 397 desc_nsge = 0; 398 left -= len; 399 offset += len; 400 if (txm->nb_segs > 1 && frag_left > 0) 401 continue; 402 done = (txm->nb_segs == 1 && left == 0); 403 ionic_tx_tso_post(q, desc, txm, 404 desc_addr, desc_nsge, desc_len, 405 hdrlen, mss, 406 encap, 407 vlan_tci, has_vlan, 408 start, done); 409 desc = ionic_tx_tso_next(txq, &elem); 410 start = false; 411 seglen = mss; 412 } 413 414 /* Chop frags into desc segments */ 415 416 txm_seg = txm->next; 417 while (txm_seg != NULL) { 418 offset = 0; 419 data_iova = rte_mbuf_data_iova(txm_seg); 420 left = txm_seg->data_len; 421 422 while (left > 0) { 423 next_addr = rte_cpu_to_le_64(data_iova + offset); 424 if (frag_left > 0) { 425 len = RTE_MIN(frag_left, left); 426 frag_left -= len; 427 elem->addr = next_addr; 428 elem->len = rte_cpu_to_le_16(len); 429 elem++; 430 desc_nsge++; 431 } else { 432 len = RTE_MIN(mss, left); 433 frag_left = mss - len; 434 desc_addr = next_addr; 435 desc_len = len; 436 desc_nsge = 0; 437 } 438 left -= len; 439 offset += len; 440 if (txm_seg->next != NULL && frag_left > 0) 441 continue; 442 443 done = (txm_seg->next == NULL && left == 0); 444 ionic_tx_tso_post(q, desc, txm_seg, 445 desc_addr, desc_nsge, desc_len, 446 hdrlen, mss, 447 encap, 448 vlan_tci, has_vlan, 449 start, done); 450 desc = ionic_tx_tso_next(txq, &elem); 451 start = false; 452 } 453 454 txm_seg = txm_seg->next; 455 } 456 457 stats->tso++; 458 459 return 0; 460 } 461 462 static __rte_always_inline int 463 ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 464 { 465 struct ionic_queue *q = &txq->qcq.q; 466 struct ionic_txq_desc *desc, *desc_base = q->base; 467 struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 468 struct ionic_txq_sg_elem *elem; 469 struct ionic_tx_stats *stats = &txq->stats; 470 struct rte_mbuf *txm_seg; 471 void **info; 472 bool encap; 473 bool has_vlan; 474 uint64_t ol_flags = txm->ol_flags; 475 uint64_t addr, cmd; 476 uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; 477 uint8_t flags = 0; 478 479 desc = &desc_base[q->head_idx]; 480 info = IONIC_INFO_PTR(q, q->head_idx); 481 482 if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 483 (txq->flags & IONIC_QCQ_F_CSUM_L3)) { 484 opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 485 flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; 486 } 487 488 if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) && 489 (txq->flags & IONIC_QCQ_F_CSUM_TCP)) || 490 ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) && 491 (txq->flags & IONIC_QCQ_F_CSUM_UDP))) { 492 opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 493 flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; 494 } 495 496 if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE) 497 stats->no_csum++; 498 499 has_vlan = (ol_flags & RTE_MBUF_F_TX_VLAN); 500 encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 501 (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 502 ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 503 (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)); 504 505 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 506 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 507 508 addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm)); 509 510 cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); 511 desc->cmd = rte_cpu_to_le_64(cmd); 512 desc->len = rte_cpu_to_le_16(txm->data_len); 513 desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci); 514 515 info[0] = txm; 516 517 elem = sg_desc_base[q->head_idx].elems; 518 519 txm_seg = txm->next; 520 while (txm_seg != NULL) { 521 elem->len = rte_cpu_to_le_16(txm_seg->data_len); 522 elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg)); 523 elem++; 524 txm_seg = txm_seg->next; 525 } 526 527 q->head_idx = Q_NEXT_TO_POST(q, 1); 528 529 return 0; 530 } 531 532 uint16_t 533 ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 534 uint16_t nb_pkts) 535 { 536 struct ionic_tx_qcq *txq = tx_queue; 537 struct ionic_queue *q = &txq->qcq.q; 538 struct ionic_tx_stats *stats = &txq->stats; 539 uint32_t next_q_head_idx; 540 uint32_t bytes_tx = 0; 541 uint16_t nb_avail, nb_tx = 0; 542 int err; 543 544 /* Cleaning old buffers */ 545 ionic_tx_flush(txq); 546 547 nb_avail = ionic_q_space_avail(q); 548 if (unlikely(nb_avail < nb_pkts)) { 549 stats->stop += nb_pkts - nb_avail; 550 nb_pkts = nb_avail; 551 } 552 553 while (nb_tx < nb_pkts) { 554 next_q_head_idx = Q_NEXT_TO_POST(q, 1); 555 if ((next_q_head_idx & 0x3) == 0) { 556 struct ionic_txq_desc *desc_base = q->base; 557 rte_prefetch0(&desc_base[next_q_head_idx]); 558 rte_prefetch0(&q->info[next_q_head_idx]); 559 } 560 561 if (tx_pkts[nb_tx]->ol_flags & RTE_MBUF_F_TX_TCP_SEG) 562 err = ionic_tx_tso(txq, tx_pkts[nb_tx]); 563 else 564 err = ionic_tx(txq, tx_pkts[nb_tx]); 565 if (err) { 566 stats->drop += nb_pkts - nb_tx; 567 break; 568 } 569 570 bytes_tx += tx_pkts[nb_tx]->pkt_len; 571 nb_tx++; 572 } 573 574 if (nb_tx > 0) { 575 rte_wmb(); 576 ionic_q_flush(q); 577 } 578 579 stats->packets += nb_tx; 580 stats->bytes += bytes_tx; 581 582 return nb_tx; 583 } 584 585 /********************************************************************* 586 * 587 * TX prep functions 588 * 589 **********************************************************************/ 590 591 #define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 | \ 592 RTE_MBUF_F_TX_IPV6 | \ 593 RTE_MBUF_F_TX_VLAN | \ 594 RTE_MBUF_F_TX_IP_CKSUM | \ 595 RTE_MBUF_F_TX_TCP_SEG | \ 596 RTE_MBUF_F_TX_L4_MASK) 597 598 #define IONIC_TX_OFFLOAD_NOTSUP_MASK \ 599 (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) 600 601 uint16_t 602 ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 603 { 604 struct ionic_tx_qcq *txq = tx_queue; 605 struct rte_mbuf *txm; 606 uint64_t offloads; 607 int i = 0; 608 609 for (i = 0; i < nb_pkts; i++) { 610 txm = tx_pkts[i]; 611 612 if (txm->nb_segs > txq->num_segs_fw) { 613 rte_errno = -EINVAL; 614 break; 615 } 616 617 offloads = txm->ol_flags; 618 619 if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) { 620 rte_errno = -ENOTSUP; 621 break; 622 } 623 } 624 625 return i; 626 } 627 628 /********************************************************************* 629 * 630 * RX functions 631 * 632 **********************************************************************/ 633 634 static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, 635 struct rte_mbuf *mbuf); 636 637 void 638 ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 639 struct rte_eth_rxq_info *qinfo) 640 { 641 struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id]; 642 struct ionic_queue *q = &rxq->qcq.q; 643 644 qinfo->mp = rxq->mb_pool; 645 qinfo->scattered_rx = dev->data->scattered_rx; 646 qinfo->nb_desc = q->num_descs; 647 qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED; 648 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 649 } 650 651 static void __rte_cold 652 ionic_rx_empty(struct ionic_rx_qcq *rxq) 653 { 654 struct ionic_queue *q = &rxq->qcq.q; 655 struct rte_mbuf *mbuf; 656 void **info; 657 658 while (q->tail_idx != q->head_idx) { 659 info = IONIC_INFO_PTR(q, q->tail_idx); 660 mbuf = info[0]; 661 rte_mempool_put(rxq->mb_pool, mbuf); 662 663 q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 664 } 665 } 666 667 void __rte_cold 668 ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 669 { 670 struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid]; 671 struct ionic_rx_stats *stats; 672 673 if (!rxq) 674 return; 675 676 IONIC_PRINT_CALL(); 677 678 stats = &rxq->stats; 679 680 IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju", 681 rxq->qcq.q.index, stats->packets, stats->mtods); 682 683 ionic_rx_empty(rxq); 684 685 ionic_lif_rxq_deinit(rxq); 686 687 ionic_qcq_free(&rxq->qcq); 688 } 689 690 int __rte_cold 691 ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 692 uint16_t rx_queue_id, 693 uint16_t nb_desc, 694 uint32_t socket_id, 695 const struct rte_eth_rxconf *rx_conf, 696 struct rte_mempool *mp) 697 { 698 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 699 struct ionic_rx_qcq *rxq; 700 uint64_t offloads; 701 int err; 702 703 if (rx_queue_id >= lif->nrxqcqs) { 704 IONIC_PRINT(ERR, 705 "Queue index %u not available (max %u queues)", 706 rx_queue_id, lif->nrxqcqs); 707 return -EINVAL; 708 } 709 710 offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; 711 IONIC_PRINT(DEBUG, 712 "Configuring skt %u RX queue %u with %u buffers, offloads %jx", 713 socket_id, rx_queue_id, nb_desc, offloads); 714 715 if (!rx_conf->rx_drop_en) 716 IONIC_PRINT(WARNING, "No-drop mode is not supported"); 717 718 /* Validate number of receive descriptors */ 719 if (!rte_is_power_of_2(nb_desc) || 720 nb_desc < IONIC_MIN_RING_DESC || 721 nb_desc > IONIC_MAX_RING_DESC) { 722 IONIC_PRINT(ERR, 723 "Bad descriptor count (%u) for queue %u (min: %u)", 724 nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); 725 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 726 } 727 728 /* Free memory prior to re-allocation if needed... */ 729 if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { 730 ionic_dev_rx_queue_release(eth_dev, rx_queue_id); 731 eth_dev->data->rx_queues[rx_queue_id] = NULL; 732 } 733 734 eth_dev->data->rx_queue_state[rx_queue_id] = 735 RTE_ETH_QUEUE_STATE_STOPPED; 736 737 err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, 738 &rxq); 739 if (err) { 740 IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id); 741 return -EINVAL; 742 } 743 744 rxq->mb_pool = mp; 745 746 /* 747 * Note: the interface does not currently support 748 * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN 749 * when the adapter will be able to keep the CRC and subtract 750 * it to the length for all received packets: 751 * if (eth_dev->data->dev_conf.rxmode.offloads & 752 * RTE_ETH_RX_OFFLOAD_KEEP_CRC) 753 * rxq->crc_len = ETHER_CRC_LEN; 754 */ 755 756 /* Do not start queue with rte_eth_dev_start() */ 757 if (rx_conf->rx_deferred_start) 758 rxq->flags |= IONIC_QCQ_F_DEFERRED; 759 760 eth_dev->data->rx_queues[rx_queue_id] = rxq; 761 762 return 0; 763 } 764 765 static __rte_always_inline void 766 ionic_rx_clean(struct ionic_rx_qcq *rxq, 767 uint32_t q_desc_index, uint32_t cq_desc_index, 768 struct ionic_rx_service *rx_svc) 769 { 770 struct ionic_queue *q = &rxq->qcq.q; 771 struct ionic_cq *cq = &rxq->qcq.cq; 772 struct ionic_rxq_comp *cq_desc_base = cq->base; 773 struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index]; 774 struct rte_mbuf *rxm, *rxm_seg; 775 uint64_t pkt_flags = 0; 776 uint32_t pkt_type; 777 struct ionic_rx_stats *stats = &rxq->stats; 778 uint32_t buf_size = (uint16_t) 779 (rte_pktmbuf_data_room_size(rxq->mb_pool) - 780 RTE_PKTMBUF_HEADROOM); 781 uint32_t left; 782 void **info; 783 784 assert(q_desc_index == cq_desc->comp_index); 785 786 info = IONIC_INFO_PTR(q, cq_desc->comp_index); 787 788 rxm = info[0]; 789 790 if (!rx_svc) { 791 stats->no_cb_arg++; 792 /* Flush */ 793 rte_pktmbuf_free(rxm); 794 /* 795 * Note: rte_mempool_put is faster with no segs 796 * rte_mempool_put(rxq->mb_pool, rxm); 797 */ 798 return; 799 } 800 801 if (cq_desc->status) { 802 stats->bad_cq_status++; 803 ionic_rx_recycle(q, q_desc_index, rxm); 804 return; 805 } 806 807 if (rx_svc->nb_rx >= rx_svc->nb_pkts) { 808 stats->no_room++; 809 ionic_rx_recycle(q, q_desc_index, rxm); 810 return; 811 } 812 813 if (cq_desc->len > rxq->frame_size || cq_desc->len == 0) { 814 stats->bad_len++; 815 ionic_rx_recycle(q, q_desc_index, rxm); 816 return; 817 } 818 819 rxm->data_off = RTE_PKTMBUF_HEADROOM; 820 rte_prefetch1((char *)rxm->buf_addr + rxm->data_off); 821 rxm->nb_segs = 1; /* cq_desc->num_sg_elems */ 822 rxm->pkt_len = cq_desc->len; 823 rxm->port = rxq->qcq.lif->port_id; 824 825 left = cq_desc->len; 826 827 rxm->data_len = RTE_MIN(buf_size, left); 828 left -= rxm->data_len; 829 830 rxm_seg = rxm->next; 831 while (rxm_seg && left) { 832 rxm_seg->data_len = RTE_MIN(buf_size, left); 833 left -= rxm_seg->data_len; 834 835 rxm_seg = rxm_seg->next; 836 rxm->nb_segs++; 837 } 838 839 /* RSS */ 840 pkt_flags |= RTE_MBUF_F_RX_RSS_HASH; 841 rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash); 842 843 /* Vlan Strip */ 844 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { 845 pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 846 rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci); 847 } 848 849 /* Checksum */ 850 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 851 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK) 852 pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 853 else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD) 854 pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 855 856 if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) || 857 (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK)) 858 pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 859 else if ((cq_desc->csum_flags & 860 IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || 861 (cq_desc->csum_flags & 862 IONIC_RXQ_COMP_CSUM_F_UDP_BAD)) 863 pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 864 } 865 866 rxm->ol_flags = pkt_flags; 867 868 /* Packet Type */ 869 switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { 870 case IONIC_PKT_TYPE_IPV4: 871 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; 872 break; 873 case IONIC_PKT_TYPE_IPV6: 874 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; 875 break; 876 case IONIC_PKT_TYPE_IPV4_TCP: 877 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 878 RTE_PTYPE_L4_TCP; 879 break; 880 case IONIC_PKT_TYPE_IPV6_TCP: 881 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 882 RTE_PTYPE_L4_TCP; 883 break; 884 case IONIC_PKT_TYPE_IPV4_UDP: 885 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 886 RTE_PTYPE_L4_UDP; 887 break; 888 case IONIC_PKT_TYPE_IPV6_UDP: 889 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 890 RTE_PTYPE_L4_UDP; 891 break; 892 default: 893 { 894 struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm, 895 struct rte_ether_hdr *); 896 uint16_t ether_type = eth_h->ether_type; 897 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) 898 pkt_type = RTE_PTYPE_L2_ETHER_ARP; 899 else 900 pkt_type = RTE_PTYPE_UNKNOWN; 901 stats->mtods++; 902 break; 903 } 904 } 905 906 rxm->packet_type = pkt_type; 907 908 rx_svc->rx_pkts[rx_svc->nb_rx] = rxm; 909 rx_svc->nb_rx++; 910 911 stats->packets++; 912 stats->bytes += rxm->pkt_len; 913 } 914 915 static void 916 ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, 917 struct rte_mbuf *mbuf) 918 { 919 struct ionic_rxq_desc *desc_base = q->base; 920 struct ionic_rxq_desc *old = &desc_base[q_desc_index]; 921 struct ionic_rxq_desc *new = &desc_base[q->head_idx]; 922 923 new->addr = old->addr; 924 new->len = old->len; 925 926 q->info[q->head_idx] = mbuf; 927 928 q->head_idx = Q_NEXT_TO_POST(q, 1); 929 930 ionic_q_flush(q); 931 } 932 933 static __rte_always_inline int 934 ionic_rx_fill(struct ionic_rx_qcq *rxq) 935 { 936 struct ionic_queue *q = &rxq->qcq.q; 937 struct ionic_rxq_desc *desc, *desc_base = q->base; 938 struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 939 struct ionic_rxq_sg_elem *elem; 940 void **info; 941 rte_iova_t dma_addr; 942 uint32_t i, j, nsegs, buf_size, size; 943 944 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 945 RTE_PKTMBUF_HEADROOM); 946 947 /* Initialize software ring entries */ 948 for (i = ionic_q_space_avail(q); i; i--) { 949 struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool); 950 struct rte_mbuf *prev_rxm_seg; 951 952 if (rxm == NULL) { 953 IONIC_PRINT(ERR, "RX mbuf alloc failed"); 954 return -ENOMEM; 955 } 956 957 info = IONIC_INFO_PTR(q, q->head_idx); 958 959 nsegs = (rxq->frame_size + buf_size - 1) / buf_size; 960 961 desc = &desc_base[q->head_idx]; 962 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm)); 963 desc->addr = dma_addr; 964 desc->len = buf_size; 965 size = buf_size; 966 desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG : 967 IONIC_RXQ_DESC_OPCODE_SIMPLE; 968 rxm->next = NULL; 969 970 prev_rxm_seg = rxm; 971 sg_desc = &sg_desc_base[q->head_idx]; 972 elem = sg_desc->elems; 973 for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) { 974 struct rte_mbuf *rxm_seg; 975 rte_iova_t data_iova; 976 977 rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool); 978 if (rxm_seg == NULL) { 979 IONIC_PRINT(ERR, "RX mbuf alloc failed"); 980 return -ENOMEM; 981 } 982 983 data_iova = rte_mbuf_data_iova(rxm_seg); 984 dma_addr = rte_cpu_to_le_64(data_iova); 985 elem->addr = dma_addr; 986 elem->len = buf_size; 987 size += buf_size; 988 elem++; 989 rxm_seg->next = NULL; 990 prev_rxm_seg->next = rxm_seg; 991 prev_rxm_seg = rxm_seg; 992 } 993 994 if (size < rxq->frame_size) 995 IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)", 996 size, rxq->frame_size); 997 998 info[0] = rxm; 999 1000 q->head_idx = Q_NEXT_TO_POST(q, 1); 1001 } 1002 1003 ionic_q_flush(q); 1004 1005 return 0; 1006 } 1007 1008 /* 1009 * Start Receive Units for specified queue. 1010 */ 1011 int __rte_cold 1012 ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1013 { 1014 uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 1015 struct ionic_rx_qcq *rxq; 1016 int err; 1017 1018 if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 1019 IONIC_PRINT(DEBUG, "RX queue %u already started", 1020 rx_queue_id); 1021 return 0; 1022 } 1023 1024 rxq = eth_dev->data->rx_queues[rx_queue_id]; 1025 1026 rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN; 1027 1028 IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u", 1029 rx_queue_id, rxq->qcq.q.num_descs, rxq->frame_size); 1030 1031 if (!(rxq->flags & IONIC_QCQ_F_INITED)) { 1032 err = ionic_lif_rxq_init(rxq); 1033 if (err) 1034 return err; 1035 } else { 1036 ionic_qcq_enable(&rxq->qcq); 1037 } 1038 1039 /* Allocate buffers for descriptor rings */ 1040 if (ionic_rx_fill(rxq) != 0) { 1041 IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d", 1042 rx_queue_id); 1043 return -1; 1044 } 1045 1046 rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 1047 1048 return 0; 1049 } 1050 1051 static __rte_always_inline void 1052 ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do, 1053 struct ionic_rx_service *rx_svc) 1054 { 1055 struct ionic_cq *cq = &rxq->qcq.cq; 1056 struct ionic_queue *q = &rxq->qcq.q; 1057 struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; 1058 bool more; 1059 uint32_t curr_q_tail_idx, curr_cq_tail_idx; 1060 uint32_t work_done = 0; 1061 1062 if (work_to_do == 0) 1063 return; 1064 1065 cq_desc = &cq_desc_base[cq->tail_idx]; 1066 while (color_match(cq_desc->pkt_type_color, cq->done_color)) { 1067 curr_cq_tail_idx = cq->tail_idx; 1068 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 1069 1070 if (cq->tail_idx == 0) 1071 cq->done_color = !cq->done_color; 1072 1073 /* Prefetch the next 4 descriptors */ 1074 if ((cq->tail_idx & 0x3) == 0) 1075 rte_prefetch0(&cq_desc_base[cq->tail_idx]); 1076 1077 do { 1078 more = (q->tail_idx != cq_desc->comp_index); 1079 1080 curr_q_tail_idx = q->tail_idx; 1081 q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 1082 1083 /* Prefetch the next 4 descriptors */ 1084 if ((q->tail_idx & 0x3) == 0) 1085 /* q desc info */ 1086 rte_prefetch0(&q->info[q->tail_idx]); 1087 1088 ionic_rx_clean(rxq, curr_q_tail_idx, curr_cq_tail_idx, 1089 rx_svc); 1090 1091 } while (more); 1092 1093 if (++work_done == work_to_do) 1094 break; 1095 1096 cq_desc = &cq_desc_base[cq->tail_idx]; 1097 } 1098 } 1099 1100 /* 1101 * Stop Receive Units for specified queue. 1102 */ 1103 int __rte_cold 1104 ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1105 { 1106 struct ionic_rx_qcq *rxq; 1107 1108 IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id); 1109 1110 rxq = eth_dev->data->rx_queues[rx_queue_id]; 1111 1112 eth_dev->data->rx_queue_state[rx_queue_id] = 1113 RTE_ETH_QUEUE_STATE_STOPPED; 1114 1115 ionic_qcq_disable(&rxq->qcq); 1116 1117 /* Flush */ 1118 ionic_rxq_service(rxq, -1, NULL); 1119 1120 return 0; 1121 } 1122 1123 uint16_t 1124 ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 1125 uint16_t nb_pkts) 1126 { 1127 struct ionic_rx_qcq *rxq = rx_queue; 1128 struct ionic_rx_service rx_svc; 1129 1130 rx_svc.rx_pkts = rx_pkts; 1131 rx_svc.nb_pkts = nb_pkts; 1132 rx_svc.nb_rx = 0; 1133 1134 ionic_rxq_service(rxq, nb_pkts, &rx_svc); 1135 1136 ionic_rx_fill(rxq); 1137 1138 return rx_svc.nb_rx; 1139 } 1140