1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018-2022 Advanced Micro Devices, Inc. 3 */ 4 5 #include <stdio.h> 6 #include <errno.h> 7 #include <stdint.h> 8 #include <assert.h> 9 10 #include <rte_common.h> 11 #include <rte_byteorder.h> 12 #include <rte_atomic.h> 13 #include <rte_mempool.h> 14 #include <rte_mbuf.h> 15 #include <rte_ether.h> 16 #include <rte_prefetch.h> 17 18 #include "ionic.h" 19 #include "ionic_ethdev.h" 20 #include "ionic_lif.h" 21 #include "ionic_rxtx.h" 22 23 static __rte_always_inline void 24 ionic_tx_flush_sg(struct ionic_tx_qcq *txq) 25 { 26 struct ionic_cq *cq = &txq->qcq.cq; 27 struct ionic_queue *q = &txq->qcq.q; 28 struct ionic_tx_stats *stats = &txq->stats; 29 struct rte_mbuf *txm; 30 struct ionic_txq_comp *cq_desc_base = cq->base; 31 volatile struct ionic_txq_comp *cq_desc; 32 void **info; 33 uint32_t i; 34 35 cq_desc = &cq_desc_base[cq->tail_idx]; 36 37 while (color_match(cq_desc->color, cq->done_color)) { 38 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 39 if (cq->tail_idx == 0) 40 cq->done_color = !cq->done_color; 41 42 /* Prefetch 4 x 16B comp at cq->tail_idx + 4 */ 43 if ((cq->tail_idx & 0x3) == 0) 44 rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]); 45 46 while (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) { 47 /* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */ 48 rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 2))); 49 50 /* Prefetch next mbuf */ 51 void **next_info = 52 IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 1)); 53 if (next_info[0]) 54 rte_mbuf_prefetch_part2(next_info[0]); 55 if (next_info[1]) 56 rte_mbuf_prefetch_part2(next_info[1]); 57 58 info = IONIC_INFO_PTR(q, q->tail_idx); 59 for (i = 0; i < q->num_segs; i++) { 60 txm = info[i]; 61 if (!txm) 62 break; 63 64 if (txq->flags & IONIC_QCQ_F_FAST_FREE) 65 rte_mempool_put(txm->pool, txm); 66 else 67 rte_pktmbuf_free_seg(txm); 68 69 info[i] = NULL; 70 } 71 72 q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 73 } 74 75 cq_desc = &cq_desc_base[cq->tail_idx]; 76 stats->comps++; 77 } 78 } 79 80 static __rte_always_inline int 81 ionic_tx_sg(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 82 { 83 struct ionic_queue *q = &txq->qcq.q; 84 struct ionic_txq_desc *desc, *desc_base = q->base; 85 struct ionic_txq_sg_desc_v1 *sg_desc, *sg_desc_base = q->sg_base; 86 struct ionic_txq_sg_elem *elem; 87 struct ionic_tx_stats *stats = &txq->stats; 88 struct rte_mbuf *txm_seg; 89 rte_iova_t data_iova; 90 void **info; 91 uint64_t ol_flags = txm->ol_flags; 92 uint64_t addr, cmd; 93 uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; 94 uint8_t flags = 0; 95 96 desc = &desc_base[q->head_idx]; 97 sg_desc = &sg_desc_base[q->head_idx]; 98 info = IONIC_INFO_PTR(q, q->head_idx); 99 100 if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 101 (txq->flags & IONIC_QCQ_F_CSUM_L3)) { 102 opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 103 flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; 104 } 105 106 if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) && 107 (txq->flags & IONIC_QCQ_F_CSUM_TCP)) || 108 ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) && 109 (txq->flags & IONIC_QCQ_F_CSUM_UDP))) { 110 opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 111 flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; 112 } 113 114 if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE) 115 stats->no_csum++; 116 117 if (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 118 (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 119 ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 120 (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) { 121 flags |= IONIC_TXQ_DESC_FLAG_ENCAP; 122 } 123 124 if (ol_flags & RTE_MBUF_F_TX_VLAN) { 125 flags |= IONIC_TXQ_DESC_FLAG_VLAN; 126 desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci); 127 } 128 129 addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm)); 130 131 cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); 132 desc->cmd = rte_cpu_to_le_64(cmd); 133 desc->len = rte_cpu_to_le_16(txm->data_len); 134 135 info[0] = txm; 136 137 if (txm->nb_segs > 1) { 138 txm_seg = txm->next; 139 140 elem = sg_desc->elems; 141 142 while (txm_seg != NULL) { 143 /* Stash the mbuf ptr in the array */ 144 info++; 145 *info = txm_seg; 146 147 /* Configure the SGE */ 148 data_iova = rte_mbuf_data_iova(txm_seg); 149 elem->len = rte_cpu_to_le_16(txm_seg->data_len); 150 elem->addr = rte_cpu_to_le_64(data_iova); 151 elem++; 152 153 txm_seg = txm_seg->next; 154 } 155 } 156 157 q->head_idx = Q_NEXT_TO_POST(q, 1); 158 159 return 0; 160 } 161 162 uint16_t 163 ionic_xmit_pkts_sg(void *tx_queue, struct rte_mbuf **tx_pkts, 164 uint16_t nb_pkts) 165 { 166 struct ionic_tx_qcq *txq = tx_queue; 167 struct ionic_queue *q = &txq->qcq.q; 168 struct ionic_txq_desc *desc_base = q->base; 169 struct ionic_tx_stats *stats = &txq->stats; 170 struct rte_mbuf *mbuf; 171 uint32_t bytes_tx = 0; 172 uint16_t nb_avail, nb_tx = 0; 173 uint64_t then, now, hz, delta; 174 int err; 175 176 rte_prefetch0(&desc_base[q->head_idx]); 177 rte_prefetch0(IONIC_INFO_PTR(q, q->head_idx)); 178 179 if (nb_pkts) { 180 rte_mbuf_prefetch_part1(tx_pkts[0]); 181 rte_mbuf_prefetch_part2(tx_pkts[0]); 182 } 183 184 if (ionic_q_space_avail(q) < txq->free_thresh) { 185 /* Cleaning old buffers */ 186 ionic_tx_flush_sg(txq); 187 } 188 189 nb_avail = ionic_q_space_avail(q); 190 if (nb_avail < nb_pkts) { 191 stats->stop += nb_pkts - nb_avail; 192 nb_pkts = nb_avail; 193 } 194 195 while (nb_tx < nb_pkts) { 196 uint16_t next_idx = Q_NEXT_TO_POST(q, 1); 197 rte_prefetch0(&desc_base[next_idx]); 198 rte_prefetch0(IONIC_INFO_PTR(q, next_idx)); 199 200 if (nb_tx + 1 < nb_pkts) { 201 rte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]); 202 rte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]); 203 } 204 205 mbuf = tx_pkts[nb_tx]; 206 207 if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) 208 err = ionic_tx_tso(txq, mbuf); 209 else 210 err = ionic_tx_sg(txq, mbuf); 211 if (err) { 212 stats->drop += nb_pkts - nb_tx; 213 break; 214 } 215 216 bytes_tx += mbuf->pkt_len; 217 nb_tx++; 218 } 219 220 if (nb_tx > 0) { 221 rte_wmb(); 222 ionic_txq_flush(q); 223 224 txq->last_wdog_cycles = rte_get_timer_cycles(); 225 226 stats->packets += nb_tx; 227 stats->bytes += bytes_tx; 228 } else { 229 /* 230 * Ring the doorbell again if no work could be posted and work 231 * is still pending after the deadline. 232 */ 233 if (q->head_idx != q->tail_idx) { 234 then = txq->last_wdog_cycles; 235 now = rte_get_timer_cycles(); 236 hz = rte_get_timer_hz(); 237 delta = (now - then) * 1000; 238 239 if (delta >= hz * IONIC_Q_WDOG_MS) { 240 ionic_q_flush(q); 241 txq->last_wdog_cycles = now; 242 } 243 } 244 } 245 246 return nb_tx; 247 } 248 249 /* 250 * Cleans one descriptor. Connects the filled mbufs into a chain. 251 * Does not advance the tail index. 252 */ 253 static __rte_always_inline void 254 ionic_rx_clean_one_sg(struct ionic_rx_qcq *rxq, 255 volatile struct ionic_rxq_comp *cq_desc, 256 struct ionic_rx_service *rx_svc) 257 { 258 struct ionic_queue *q = &rxq->qcq.q; 259 struct rte_mbuf *rxm; 260 struct rte_mbuf *rxm_seg, *prev_rxm; 261 struct ionic_rx_stats *stats = &rxq->stats; 262 uint64_t pkt_flags = 0; 263 uint32_t pkt_type; 264 uint32_t left, i; 265 uint16_t cq_desc_len; 266 uint8_t ptype, cflags; 267 void **info; 268 269 cq_desc_len = rte_le_to_cpu_16(cq_desc->len); 270 271 info = IONIC_INFO_PTR(q, q->tail_idx); 272 273 rxm = info[0]; 274 275 if (cq_desc->status) { 276 stats->bad_cq_status++; 277 return; 278 } 279 280 if (cq_desc_len > rxq->frame_size || cq_desc_len == 0) { 281 stats->bad_len++; 282 return; 283 } 284 285 info[0] = NULL; 286 287 /* Set the mbuf metadata based on the cq entry */ 288 rxm->rearm_data[0] = rxq->rearm_data; 289 rxm->pkt_len = cq_desc_len; 290 rxm->data_len = RTE_MIN(rxq->hdr_seg_size, cq_desc_len); 291 left = cq_desc_len - rxm->data_len; 292 rxm->nb_segs = cq_desc->num_sg_elems + 1; 293 294 prev_rxm = rxm; 295 296 for (i = 1; i < rxm->nb_segs && left; i++) { 297 rxm_seg = info[i]; 298 info[i] = NULL; 299 300 /* Set the chained mbuf metadata */ 301 rxm_seg->rearm_data[0] = rxq->rearm_seg_data; 302 rxm_seg->data_len = RTE_MIN(rxq->seg_size, left); 303 left -= rxm_seg->data_len; 304 305 /* Link the mbuf */ 306 prev_rxm->next = rxm_seg; 307 prev_rxm = rxm_seg; 308 } 309 310 /* Terminate the mbuf chain */ 311 prev_rxm->next = NULL; 312 313 /* RSS */ 314 pkt_flags |= RTE_MBUF_F_RX_RSS_HASH; 315 rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash); 316 317 /* Vlan Strip */ 318 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { 319 pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 320 rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci); 321 } 322 323 /* Checksum */ 324 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 325 cflags = cq_desc->csum_flags & IONIC_CSUM_FLAG_MASK; 326 pkt_flags |= ionic_csum_flags[cflags]; 327 } 328 329 rxm->ol_flags = pkt_flags; 330 331 /* Packet Type */ 332 ptype = cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK; 333 pkt_type = ionic_ptype_table[ptype]; 334 if (pkt_type == RTE_PTYPE_UNKNOWN) { 335 struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm, 336 struct rte_ether_hdr *); 337 uint16_t ether_type = eth_h->ether_type; 338 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) 339 pkt_type = RTE_PTYPE_L2_ETHER_ARP; 340 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_LLDP)) 341 pkt_type = RTE_PTYPE_L2_ETHER_LLDP; 342 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_1588)) 343 pkt_type = RTE_PTYPE_L2_ETHER_TIMESYNC; 344 stats->mtods++; 345 } else if (pkt_flags & RTE_MBUF_F_RX_VLAN) { 346 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN; 347 } else { 348 pkt_type |= RTE_PTYPE_L2_ETHER; 349 } 350 351 rxm->packet_type = pkt_type; 352 353 rx_svc->rx_pkts[rx_svc->nb_rx] = rxm; 354 rx_svc->nb_rx++; 355 356 stats->packets++; 357 stats->bytes += rxm->pkt_len; 358 } 359 360 /* 361 * Fills one descriptor with mbufs. Does not advance the head index. 362 */ 363 static __rte_always_inline int 364 ionic_rx_fill_one_sg(struct ionic_rx_qcq *rxq) 365 { 366 struct ionic_queue *q = &rxq->qcq.q; 367 struct rte_mbuf *rxm; 368 struct rte_mbuf *rxm_seg; 369 struct ionic_rxq_desc *desc, *desc_base = q->base; 370 struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 371 rte_iova_t data_iova; 372 uint32_t i; 373 void **info; 374 int ret; 375 376 info = IONIC_INFO_PTR(q, q->head_idx); 377 desc = &desc_base[q->head_idx]; 378 sg_desc = &sg_desc_base[q->head_idx]; 379 380 /* mbuf is unused => whole chain is unused */ 381 if (info[0]) 382 return 0; 383 384 if (rxq->mb_idx == 0) { 385 ret = rte_mempool_get_bulk(rxq->mb_pool, 386 (void **)rxq->mbs, 387 IONIC_MBUF_BULK_ALLOC); 388 if (ret) { 389 assert(0); 390 return -ENOMEM; 391 } 392 393 rxq->mb_idx = IONIC_MBUF_BULK_ALLOC; 394 } 395 396 rxm = rxq->mbs[--rxq->mb_idx]; 397 info[0] = rxm; 398 399 data_iova = rte_mbuf_data_iova_default(rxm); 400 desc->addr = rte_cpu_to_le_64(data_iova); 401 402 for (i = 1; i < q->num_segs; i++) { 403 /* mbuf is unused => rest of the chain is unused */ 404 if (info[i]) 405 return 0; 406 407 if (rxq->mb_idx == 0) { 408 ret = rte_mempool_get_bulk(rxq->mb_pool, 409 (void **)rxq->mbs, 410 IONIC_MBUF_BULK_ALLOC); 411 if (ret) { 412 assert(0); 413 return -ENOMEM; 414 } 415 416 rxq->mb_idx = IONIC_MBUF_BULK_ALLOC; 417 } 418 419 rxm_seg = rxq->mbs[--rxq->mb_idx]; 420 info[i] = rxm_seg; 421 422 /* The data_off does not get set to 0 until later */ 423 data_iova = rxm_seg->buf_iova; 424 sg_desc->elems[i - 1].addr = rte_cpu_to_le_64(data_iova); 425 } 426 427 return 0; 428 } 429 430 /* 431 * Walk the CQ to find completed receive descriptors. 432 * Any completed descriptor found is refilled. 433 */ 434 static __rte_always_inline void 435 ionic_rxq_service_sg(struct ionic_rx_qcq *rxq, uint32_t work_to_do, 436 struct ionic_rx_service *rx_svc) 437 { 438 struct ionic_cq *cq = &rxq->qcq.cq; 439 struct ionic_queue *q = &rxq->qcq.q; 440 struct ionic_rxq_desc *q_desc_base = q->base; 441 struct ionic_rxq_comp *cq_desc_base = cq->base; 442 volatile struct ionic_rxq_comp *cq_desc; 443 uint32_t work_done = 0; 444 uint64_t then, now, hz, delta; 445 446 cq_desc = &cq_desc_base[cq->tail_idx]; 447 448 while (color_match(cq_desc->pkt_type_color, cq->done_color)) { 449 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 450 if (cq->tail_idx == 0) 451 cq->done_color = !cq->done_color; 452 453 /* Prefetch 8 x 8B bufinfo */ 454 rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 8))); 455 /* Prefetch 4 x 16B comp */ 456 rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]); 457 /* Prefetch 4 x 16B descriptors */ 458 rte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]); 459 460 /* Clean one descriptor */ 461 ionic_rx_clean_one_sg(rxq, cq_desc, rx_svc); 462 q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 463 464 /* Fill one descriptor */ 465 (void)ionic_rx_fill_one_sg(rxq); 466 467 q->head_idx = Q_NEXT_TO_POST(q, 1); 468 469 if (++work_done == work_to_do) 470 break; 471 472 cq_desc = &cq_desc_base[cq->tail_idx]; 473 } 474 475 /* Update the queue indices and ring the doorbell */ 476 if (work_done) { 477 ionic_rxq_flush(q); 478 479 rxq->last_wdog_cycles = rte_get_timer_cycles(); 480 rxq->wdog_ms = IONIC_Q_WDOG_MS; 481 } else { 482 /* 483 * Ring the doorbell again if no recvs were posted and the 484 * recv queue is not empty after the deadline. 485 * 486 * Exponentially back off the deadline to avoid excessive 487 * doorbells when the recv queue is idle. 488 */ 489 if (q->head_idx != q->tail_idx) { 490 then = rxq->last_wdog_cycles; 491 now = rte_get_timer_cycles(); 492 hz = rte_get_timer_hz(); 493 delta = (now - then) * 1000; 494 495 if (delta >= hz * rxq->wdog_ms) { 496 ionic_q_flush(q); 497 rxq->last_wdog_cycles = now; 498 499 delta = 2 * rxq->wdog_ms; 500 if (delta > IONIC_Q_WDOG_MAX_MS) 501 delta = IONIC_Q_WDOG_MAX_MS; 502 503 rxq->wdog_ms = delta; 504 } 505 } 506 } 507 } 508 509 uint16_t 510 ionic_recv_pkts_sg(void *rx_queue, struct rte_mbuf **rx_pkts, 511 uint16_t nb_pkts) 512 { 513 struct ionic_rx_qcq *rxq = rx_queue; 514 struct ionic_rx_service rx_svc; 515 516 rx_svc.rx_pkts = rx_pkts; 517 rx_svc.nb_rx = 0; 518 519 ionic_rxq_service_sg(rxq, nb_pkts, &rx_svc); 520 521 return rx_svc.nb_rx; 522 } 523 524 /* 525 * Fills all descriptors with mbufs. 526 */ 527 int __rte_cold 528 ionic_rx_fill_sg(struct ionic_rx_qcq *rxq) 529 { 530 struct ionic_queue *q = &rxq->qcq.q; 531 uint32_t i; 532 int err = 0; 533 534 for (i = 0; i < q->num_descs - 1u; i++) { 535 err = ionic_rx_fill_one_sg(rxq); 536 if (err) 537 break; 538 539 q->head_idx = Q_NEXT_TO_POST(q, 1); 540 } 541 542 ionic_rxq_flush(q); 543 544 return err; 545 } 546