1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018-2022 Advanced Micro Devices, Inc. 3 */ 4 5 #include <stdio.h> 6 #include <errno.h> 7 #include <stdint.h> 8 #include <assert.h> 9 10 #include <rte_common.h> 11 #include <rte_byteorder.h> 12 #include <rte_atomic.h> 13 #include <rte_mempool.h> 14 #include <rte_mbuf.h> 15 #include <rte_ether.h> 16 #include <rte_prefetch.h> 17 18 #include "ionic.h" 19 #include "ionic_ethdev.h" 20 #include "ionic_lif.h" 21 #include "ionic_rxtx.h" 22 23 static __rte_always_inline void 24 ionic_tx_flush(struct ionic_tx_qcq *txq) 25 { 26 struct ionic_cq *cq = &txq->qcq.cq; 27 struct ionic_queue *q = &txq->qcq.q; 28 struct ionic_tx_stats *stats = &txq->stats; 29 struct rte_mbuf *txm; 30 struct ionic_txq_comp *cq_desc_base = cq->base; 31 volatile struct ionic_txq_comp *cq_desc; 32 void **info; 33 34 cq_desc = &cq_desc_base[cq->tail_idx]; 35 36 while (color_match(cq_desc->color, cq->done_color)) { 37 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 38 if (cq->tail_idx == 0) 39 cq->done_color = !cq->done_color; 40 41 /* Prefetch 4 x 16B comp at cq->tail_idx + 4 */ 42 if ((cq->tail_idx & 0x3) == 0) 43 rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]); 44 45 while (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) { 46 /* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */ 47 rte_prefetch0(&q->info[Q_NEXT_TO_SRVC(q, 2)]); 48 49 /* Prefetch next mbuf */ 50 void **next_info = 51 &q->info[Q_NEXT_TO_SRVC(q, 1)]; 52 if (next_info[0]) 53 rte_mbuf_prefetch_part2(next_info[0]); 54 55 info = &q->info[q->tail_idx]; 56 { 57 txm = info[0]; 58 59 if (txq->flags & IONIC_QCQ_F_FAST_FREE) 60 rte_mempool_put(txm->pool, txm); 61 else 62 rte_pktmbuf_free_seg(txm); 63 64 info[0] = NULL; 65 } 66 67 q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 68 } 69 70 cq_desc = &cq_desc_base[cq->tail_idx]; 71 stats->comps++; 72 } 73 } 74 75 static __rte_always_inline int 76 ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 77 { 78 struct ionic_queue *q = &txq->qcq.q; 79 struct ionic_txq_desc *desc, *desc_base = q->base; 80 struct ionic_tx_stats *stats = &txq->stats; 81 void **info; 82 uint64_t ol_flags = txm->ol_flags; 83 uint64_t addr, cmd; 84 uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; 85 uint8_t flags = 0; 86 87 if (txm->nb_segs > 1) 88 return -EINVAL; 89 90 desc = &desc_base[q->head_idx]; 91 info = &q->info[q->head_idx]; 92 93 if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 94 (txq->flags & IONIC_QCQ_F_CSUM_L3)) { 95 opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 96 flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; 97 } 98 99 if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) && 100 (txq->flags & IONIC_QCQ_F_CSUM_TCP)) || 101 ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) && 102 (txq->flags & IONIC_QCQ_F_CSUM_UDP))) { 103 opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 104 flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; 105 } 106 107 if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE) 108 stats->no_csum++; 109 110 if (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 111 (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 112 ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 113 (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) { 114 flags |= IONIC_TXQ_DESC_FLAG_ENCAP; 115 } 116 117 if (ol_flags & RTE_MBUF_F_TX_VLAN) { 118 flags |= IONIC_TXQ_DESC_FLAG_VLAN; 119 desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci); 120 } 121 122 addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm)); 123 124 cmd = encode_txq_desc_cmd(opcode, flags, 0, addr); 125 desc->cmd = rte_cpu_to_le_64(cmd); 126 desc->len = rte_cpu_to_le_16(txm->data_len); 127 128 info[0] = txm; 129 130 q->head_idx = Q_NEXT_TO_POST(q, 1); 131 132 return 0; 133 } 134 135 uint16_t 136 ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 137 uint16_t nb_pkts) 138 { 139 struct ionic_tx_qcq *txq = tx_queue; 140 struct ionic_queue *q = &txq->qcq.q; 141 struct ionic_txq_desc *desc_base = q->base; 142 struct ionic_tx_stats *stats = &txq->stats; 143 struct rte_mbuf *mbuf; 144 uint32_t bytes_tx = 0; 145 uint16_t nb_avail, nb_tx = 0; 146 uint64_t then, now, hz, delta; 147 int err; 148 149 rte_prefetch0(&desc_base[q->head_idx]); 150 rte_prefetch0(&q->info[q->head_idx]); 151 152 if (nb_pkts) { 153 rte_mbuf_prefetch_part1(tx_pkts[0]); 154 rte_mbuf_prefetch_part2(tx_pkts[0]); 155 } 156 157 if (ionic_q_space_avail(q) < txq->free_thresh) { 158 /* Cleaning old buffers */ 159 ionic_tx_flush(txq); 160 } 161 162 nb_avail = ionic_q_space_avail(q); 163 if (nb_avail < nb_pkts) { 164 stats->stop += nb_pkts - nb_avail; 165 nb_pkts = nb_avail; 166 } 167 168 while (nb_tx < nb_pkts) { 169 uint16_t next_idx = Q_NEXT_TO_POST(q, 1); 170 rte_prefetch0(&desc_base[next_idx]); 171 rte_prefetch0(&q->info[next_idx]); 172 173 if (nb_tx + 1 < nb_pkts) { 174 rte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]); 175 rte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]); 176 } 177 178 mbuf = tx_pkts[nb_tx]; 179 180 if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) 181 err = ionic_tx_tso(txq, mbuf); 182 else 183 err = ionic_tx(txq, mbuf); 184 if (err) { 185 stats->drop += nb_pkts - nb_tx; 186 break; 187 } 188 189 bytes_tx += mbuf->pkt_len; 190 nb_tx++; 191 } 192 193 if (nb_tx > 0) { 194 ionic_txq_flush(q); 195 196 txq->last_wdog_cycles = rte_get_timer_cycles(); 197 198 stats->packets += nb_tx; 199 stats->bytes += bytes_tx; 200 } else { 201 /* 202 * Ring the doorbell again if no work could be posted and work 203 * is still pending after the deadline. 204 */ 205 if (q->head_idx != q->tail_idx) { 206 then = txq->last_wdog_cycles; 207 now = rte_get_timer_cycles(); 208 hz = rte_get_timer_hz(); 209 delta = (now - then) * 1000; 210 211 if (delta >= hz * IONIC_Q_WDOG_MS) { 212 ionic_q_flush(q); 213 txq->last_wdog_cycles = now; 214 } 215 } 216 } 217 218 return nb_tx; 219 } 220 221 /* 222 * Cleans one descriptor. Connects the filled mbufs into a chain. 223 * Does not advance the tail index. 224 */ 225 static __rte_always_inline void 226 ionic_rx_clean_one(struct ionic_rx_qcq *rxq, 227 volatile struct ionic_rxq_comp *cq_desc, 228 struct ionic_rx_service *rx_svc) 229 { 230 struct ionic_queue *q = &rxq->qcq.q; 231 struct rte_mbuf *rxm; 232 struct ionic_rx_stats *stats = &rxq->stats; 233 uint64_t pkt_flags = 0; 234 uint32_t pkt_type; 235 uint16_t cq_desc_len; 236 uint8_t ptype, cflags; 237 void **info; 238 239 cq_desc_len = rte_le_to_cpu_16(cq_desc->len); 240 241 info = &q->info[q->tail_idx]; 242 243 rxm = info[0]; 244 245 if (cq_desc->status) { 246 stats->bad_cq_status++; 247 return; 248 } 249 250 if (cq_desc_len > rxq->frame_size || cq_desc_len == 0) { 251 stats->bad_len++; 252 return; 253 } 254 255 info[0] = NULL; 256 257 /* Set the mbuf metadata based on the cq entry */ 258 rxm->rearm_data[0] = rxq->rearm_data; 259 rxm->pkt_len = cq_desc_len; 260 rxm->data_len = cq_desc_len; 261 262 /* RSS */ 263 pkt_flags |= RTE_MBUF_F_RX_RSS_HASH; 264 rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash); 265 266 /* Vlan Strip */ 267 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { 268 pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 269 rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci); 270 } 271 272 /* Checksum */ 273 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 274 cflags = cq_desc->csum_flags & IONIC_CSUM_FLAG_MASK; 275 pkt_flags |= ionic_csum_flags[cflags]; 276 } 277 278 rxm->ol_flags = pkt_flags; 279 280 /* Packet Type */ 281 ptype = cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK; 282 pkt_type = ionic_ptype_table[ptype]; 283 if (pkt_type == RTE_PTYPE_UNKNOWN) { 284 struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm, 285 struct rte_ether_hdr *); 286 uint16_t ether_type = eth_h->ether_type; 287 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) 288 pkt_type = RTE_PTYPE_L2_ETHER_ARP; 289 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_LLDP)) 290 pkt_type = RTE_PTYPE_L2_ETHER_LLDP; 291 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_1588)) 292 pkt_type = RTE_PTYPE_L2_ETHER_TIMESYNC; 293 stats->mtods++; 294 } else if (pkt_flags & RTE_MBUF_F_RX_VLAN) { 295 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN; 296 } else { 297 pkt_type |= RTE_PTYPE_L2_ETHER; 298 } 299 300 rxm->packet_type = pkt_type; 301 302 rx_svc->rx_pkts[rx_svc->nb_rx] = rxm; 303 rx_svc->nb_rx++; 304 305 stats->packets++; 306 stats->bytes += rxm->pkt_len; 307 } 308 309 /* 310 * Fills one descriptor with mbufs. Does not advance the head index. 311 */ 312 static __rte_always_inline int 313 ionic_rx_fill_one(struct ionic_rx_qcq *rxq) 314 { 315 struct ionic_queue *q = &rxq->qcq.q; 316 struct rte_mbuf *rxm; 317 struct ionic_rxq_desc *desc, *desc_base = q->base; 318 rte_iova_t data_iova; 319 void **info; 320 int ret; 321 322 info = &q->info[q->head_idx]; 323 desc = &desc_base[q->head_idx]; 324 325 /* mbuf is unused */ 326 if (info[0]) 327 return 0; 328 329 if (rxq->mb_idx == 0) { 330 ret = rte_mempool_get_bulk(rxq->mb_pool, 331 (void **)rxq->mbs, 332 IONIC_MBUF_BULK_ALLOC); 333 if (ret) { 334 assert(0); 335 return -ENOMEM; 336 } 337 338 rxq->mb_idx = IONIC_MBUF_BULK_ALLOC; 339 } 340 341 rxm = rxq->mbs[--rxq->mb_idx]; 342 info[0] = rxm; 343 344 data_iova = rte_mbuf_data_iova_default(rxm); 345 desc->addr = rte_cpu_to_le_64(data_iova); 346 347 return 0; 348 } 349 350 /* 351 * Walk the CQ to find completed receive descriptors. 352 * Any completed descriptor found is refilled. 353 */ 354 static __rte_always_inline void 355 ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do, 356 struct ionic_rx_service *rx_svc) 357 { 358 struct ionic_cq *cq = &rxq->qcq.cq; 359 struct ionic_queue *q = &rxq->qcq.q; 360 struct ionic_rxq_desc *q_desc_base = q->base; 361 struct ionic_rxq_comp *cq_desc_base = cq->base; 362 volatile struct ionic_rxq_comp *cq_desc; 363 uint32_t work_done = 0; 364 uint64_t then, now, hz, delta; 365 366 cq_desc = &cq_desc_base[cq->tail_idx]; 367 368 while (color_match(cq_desc->pkt_type_color, cq->done_color)) { 369 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 370 if (cq->tail_idx == 0) 371 cq->done_color = !cq->done_color; 372 373 /* Prefetch 8 x 8B bufinfo */ 374 rte_prefetch0(&q->info[Q_NEXT_TO_SRVC(q, 8)]); 375 /* Prefetch 4 x 16B comp */ 376 rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]); 377 /* Prefetch 4 x 16B descriptors */ 378 rte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]); 379 380 /* Clean one descriptor */ 381 ionic_rx_clean_one(rxq, cq_desc, rx_svc); 382 q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 383 384 /* Fill one descriptor */ 385 (void)ionic_rx_fill_one(rxq); 386 387 q->head_idx = Q_NEXT_TO_POST(q, 1); 388 389 if (++work_done == work_to_do) 390 break; 391 392 cq_desc = &cq_desc_base[cq->tail_idx]; 393 } 394 395 /* Update the queue indices and ring the doorbell */ 396 if (work_done) { 397 ionic_rxq_flush(q); 398 399 rxq->last_wdog_cycles = rte_get_timer_cycles(); 400 rxq->wdog_ms = IONIC_Q_WDOG_MS; 401 } else { 402 /* 403 * Ring the doorbell again if no recvs were posted and the 404 * recv queue is not empty after the deadline. 405 * 406 * Exponentially back off the deadline to avoid excessive 407 * doorbells when the recv queue is idle. 408 */ 409 if (q->head_idx != q->tail_idx) { 410 then = rxq->last_wdog_cycles; 411 now = rte_get_timer_cycles(); 412 hz = rte_get_timer_hz(); 413 delta = (now - then) * 1000; 414 415 if (delta >= hz * rxq->wdog_ms) { 416 ionic_q_flush(q); 417 rxq->last_wdog_cycles = now; 418 419 delta = 2 * rxq->wdog_ms; 420 if (delta > IONIC_Q_WDOG_MAX_MS) 421 delta = IONIC_Q_WDOG_MAX_MS; 422 423 rxq->wdog_ms = delta; 424 } 425 } 426 } 427 } 428 429 uint16_t 430 ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 431 uint16_t nb_pkts) 432 { 433 struct ionic_rx_qcq *rxq = rx_queue; 434 struct ionic_rx_service rx_svc; 435 436 rx_svc.rx_pkts = rx_pkts; 437 rx_svc.nb_rx = 0; 438 439 ionic_rxq_service(rxq, nb_pkts, &rx_svc); 440 441 return rx_svc.nb_rx; 442 } 443 444 /* 445 * Fills all descriptors with mbufs. 446 */ 447 int __rte_cold 448 ionic_rx_fill(struct ionic_rx_qcq *rxq) 449 { 450 struct ionic_queue *q = &rxq->qcq.q; 451 uint32_t i; 452 int err = 0; 453 454 for (i = 0; i < q->num_descs - 1u; i++) { 455 err = ionic_rx_fill_one(rxq); 456 if (err) 457 break; 458 459 q->head_idx = Q_NEXT_TO_POST(q, 1); 460 } 461 462 ionic_rxq_flush(q); 463 464 return err; 465 } 466