1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018-2022 Advanced Micro Devices, Inc. 3 */ 4 5 #include <stdio.h> 6 #include <errno.h> 7 #include <stdint.h> 8 #include <assert.h> 9 10 #include <rte_common.h> 11 #include <rte_byteorder.h> 12 #include <rte_atomic.h> 13 #include <rte_mempool.h> 14 #include <rte_mbuf.h> 15 #include <rte_ether.h> 16 #include <rte_prefetch.h> 17 18 #include "ionic.h" 19 #include "ionic_ethdev.h" 20 #include "ionic_lif.h" 21 #include "ionic_rxtx.h" 22 23 static __rte_always_inline void 24 ionic_tx_flush(struct ionic_tx_qcq *txq) 25 { 26 struct ionic_cq *cq = &txq->qcq.cq; 27 struct ionic_queue *q = &txq->qcq.q; 28 struct ionic_tx_stats *stats = &txq->stats; 29 struct rte_mbuf *txm; 30 struct ionic_txq_comp *cq_desc_base = cq->base; 31 volatile struct ionic_txq_comp *cq_desc; 32 void **info; 33 34 cq_desc = &cq_desc_base[cq->tail_idx]; 35 36 while (color_match(cq_desc->color, cq->done_color)) { 37 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 38 if (cq->tail_idx == 0) 39 cq->done_color = !cq->done_color; 40 41 /* Prefetch 4 x 16B comp at cq->tail_idx + 4 */ 42 if ((cq->tail_idx & 0x3) == 0) 43 rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]); 44 45 while (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) { 46 /* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */ 47 rte_prefetch0(&q->info[Q_NEXT_TO_SRVC(q, 2)]); 48 49 /* Prefetch next mbuf */ 50 void **next_info = 51 &q->info[Q_NEXT_TO_SRVC(q, 1)]; 52 if (next_info[0]) 53 rte_mbuf_prefetch_part2(next_info[0]); 54 55 info = &q->info[q->tail_idx]; 56 { 57 txm = info[0]; 58 59 if (txq->flags & IONIC_QCQ_F_FAST_FREE) 60 rte_mempool_put(txm->pool, txm); 61 else 62 rte_pktmbuf_free_seg(txm); 63 64 info[0] = NULL; 65 } 66 67 q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 68 } 69 70 cq_desc = &cq_desc_base[cq->tail_idx]; 71 stats->comps++; 72 } 73 } 74 75 static __rte_always_inline int 76 ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 77 { 78 struct ionic_queue *q = &txq->qcq.q; 79 struct ionic_txq_desc *desc, *desc_base = q->base; 80 struct ionic_tx_stats *stats = &txq->stats; 81 void **info; 82 uint64_t ol_flags = txm->ol_flags; 83 uint64_t addr, cmd; 84 uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; 85 uint8_t flags = 0; 86 87 if (txm->nb_segs > 1) 88 return -EINVAL; 89 90 desc = &desc_base[q->head_idx]; 91 info = &q->info[q->head_idx]; 92 93 if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 94 (txq->flags & IONIC_QCQ_F_CSUM_L3)) { 95 opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 96 flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; 97 } 98 99 if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) && 100 (txq->flags & IONIC_QCQ_F_CSUM_TCP)) || 101 ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) && 102 (txq->flags & IONIC_QCQ_F_CSUM_UDP))) { 103 opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 104 flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; 105 } 106 107 if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE) 108 stats->no_csum++; 109 110 if (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 111 (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 112 ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 113 (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) { 114 flags |= IONIC_TXQ_DESC_FLAG_ENCAP; 115 } 116 117 if (ol_flags & RTE_MBUF_F_TX_VLAN) { 118 flags |= IONIC_TXQ_DESC_FLAG_VLAN; 119 desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci); 120 } 121 122 addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm)); 123 124 cmd = encode_txq_desc_cmd(opcode, flags, 0, addr); 125 desc->cmd = rte_cpu_to_le_64(cmd); 126 desc->len = rte_cpu_to_le_16(txm->data_len); 127 128 info[0] = txm; 129 130 q->head_idx = Q_NEXT_TO_POST(q, 1); 131 132 return 0; 133 } 134 135 uint16_t 136 ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 137 uint16_t nb_pkts) 138 { 139 struct ionic_tx_qcq *txq = tx_queue; 140 struct ionic_queue *q = &txq->qcq.q; 141 struct ionic_txq_desc *desc_base = q->base; 142 struct ionic_tx_stats *stats = &txq->stats; 143 struct rte_mbuf *mbuf; 144 uint32_t bytes_tx = 0; 145 uint16_t nb_avail, nb_tx = 0; 146 uint64_t then, now, hz, delta; 147 int err; 148 149 rte_prefetch0(&desc_base[q->head_idx]); 150 rte_prefetch0(&q->info[q->head_idx]); 151 152 if (nb_pkts) { 153 rte_mbuf_prefetch_part1(tx_pkts[0]); 154 rte_mbuf_prefetch_part2(tx_pkts[0]); 155 } 156 157 if (ionic_q_space_avail(q) < txq->free_thresh) { 158 /* Cleaning old buffers */ 159 ionic_tx_flush(txq); 160 } 161 162 nb_avail = ionic_q_space_avail(q); 163 if (nb_avail < nb_pkts) { 164 stats->stop += nb_pkts - nb_avail; 165 nb_pkts = nb_avail; 166 } 167 168 while (nb_tx < nb_pkts) { 169 uint16_t next_idx = Q_NEXT_TO_POST(q, 1); 170 rte_prefetch0(&desc_base[next_idx]); 171 rte_prefetch0(&q->info[next_idx]); 172 173 if (nb_tx + 1 < nb_pkts) { 174 rte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]); 175 rte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]); 176 } 177 178 mbuf = tx_pkts[nb_tx]; 179 180 if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) 181 err = ionic_tx_tso(txq, mbuf); 182 else 183 err = ionic_tx(txq, mbuf); 184 if (err) { 185 stats->drop += nb_pkts - nb_tx; 186 break; 187 } 188 189 bytes_tx += mbuf->pkt_len; 190 nb_tx++; 191 } 192 193 if (nb_tx > 0) { 194 rte_wmb(); 195 ionic_txq_flush(q); 196 197 txq->last_wdog_cycles = rte_get_timer_cycles(); 198 199 stats->packets += nb_tx; 200 stats->bytes += bytes_tx; 201 } else { 202 /* 203 * Ring the doorbell again if no work could be posted and work 204 * is still pending after the deadline. 205 */ 206 if (q->head_idx != q->tail_idx) { 207 then = txq->last_wdog_cycles; 208 now = rte_get_timer_cycles(); 209 hz = rte_get_timer_hz(); 210 delta = (now - then) * 1000; 211 212 if (delta >= hz * IONIC_Q_WDOG_MS) { 213 ionic_q_flush(q); 214 txq->last_wdog_cycles = now; 215 } 216 } 217 } 218 219 return nb_tx; 220 } 221 222 /* 223 * Cleans one descriptor. Connects the filled mbufs into a chain. 224 * Does not advance the tail index. 225 */ 226 static __rte_always_inline void 227 ionic_rx_clean_one(struct ionic_rx_qcq *rxq, 228 volatile struct ionic_rxq_comp *cq_desc, 229 struct ionic_rx_service *rx_svc) 230 { 231 struct ionic_queue *q = &rxq->qcq.q; 232 struct rte_mbuf *rxm; 233 struct ionic_rx_stats *stats = &rxq->stats; 234 uint64_t pkt_flags = 0; 235 uint32_t pkt_type; 236 uint16_t cq_desc_len; 237 uint8_t ptype, cflags; 238 void **info; 239 240 cq_desc_len = rte_le_to_cpu_16(cq_desc->len); 241 242 info = &q->info[q->tail_idx]; 243 244 rxm = info[0]; 245 246 if (cq_desc->status) { 247 stats->bad_cq_status++; 248 return; 249 } 250 251 if (cq_desc_len > rxq->frame_size || cq_desc_len == 0) { 252 stats->bad_len++; 253 return; 254 } 255 256 info[0] = NULL; 257 258 /* Set the mbuf metadata based on the cq entry */ 259 rxm->rearm_data[0] = rxq->rearm_data; 260 rxm->pkt_len = cq_desc_len; 261 rxm->data_len = cq_desc_len; 262 263 /* RSS */ 264 pkt_flags |= RTE_MBUF_F_RX_RSS_HASH; 265 rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash); 266 267 /* Vlan Strip */ 268 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { 269 pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 270 rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci); 271 } 272 273 /* Checksum */ 274 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 275 cflags = cq_desc->csum_flags & IONIC_CSUM_FLAG_MASK; 276 pkt_flags |= ionic_csum_flags[cflags]; 277 } 278 279 rxm->ol_flags = pkt_flags; 280 281 /* Packet Type */ 282 ptype = cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK; 283 pkt_type = ionic_ptype_table[ptype]; 284 if (pkt_type == RTE_PTYPE_UNKNOWN) { 285 struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm, 286 struct rte_ether_hdr *); 287 uint16_t ether_type = eth_h->ether_type; 288 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) 289 pkt_type = RTE_PTYPE_L2_ETHER_ARP; 290 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_LLDP)) 291 pkt_type = RTE_PTYPE_L2_ETHER_LLDP; 292 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_1588)) 293 pkt_type = RTE_PTYPE_L2_ETHER_TIMESYNC; 294 stats->mtods++; 295 } else if (pkt_flags & RTE_MBUF_F_RX_VLAN) { 296 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN; 297 } else { 298 pkt_type |= RTE_PTYPE_L2_ETHER; 299 } 300 301 rxm->packet_type = pkt_type; 302 303 rx_svc->rx_pkts[rx_svc->nb_rx] = rxm; 304 rx_svc->nb_rx++; 305 306 stats->packets++; 307 stats->bytes += rxm->pkt_len; 308 } 309 310 /* 311 * Fills one descriptor with mbufs. Does not advance the head index. 312 */ 313 static __rte_always_inline int 314 ionic_rx_fill_one(struct ionic_rx_qcq *rxq) 315 { 316 struct ionic_queue *q = &rxq->qcq.q; 317 struct rte_mbuf *rxm; 318 struct ionic_rxq_desc *desc, *desc_base = q->base; 319 rte_iova_t data_iova; 320 void **info; 321 int ret; 322 323 info = &q->info[q->head_idx]; 324 desc = &desc_base[q->head_idx]; 325 326 /* mbuf is unused */ 327 if (info[0]) 328 return 0; 329 330 if (rxq->mb_idx == 0) { 331 ret = rte_mempool_get_bulk(rxq->mb_pool, 332 (void **)rxq->mbs, 333 IONIC_MBUF_BULK_ALLOC); 334 if (ret) { 335 assert(0); 336 return -ENOMEM; 337 } 338 339 rxq->mb_idx = IONIC_MBUF_BULK_ALLOC; 340 } 341 342 rxm = rxq->mbs[--rxq->mb_idx]; 343 info[0] = rxm; 344 345 data_iova = rte_mbuf_data_iova_default(rxm); 346 desc->addr = rte_cpu_to_le_64(data_iova); 347 348 return 0; 349 } 350 351 /* 352 * Walk the CQ to find completed receive descriptors. 353 * Any completed descriptor found is refilled. 354 */ 355 static __rte_always_inline void 356 ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do, 357 struct ionic_rx_service *rx_svc) 358 { 359 struct ionic_cq *cq = &rxq->qcq.cq; 360 struct ionic_queue *q = &rxq->qcq.q; 361 struct ionic_rxq_desc *q_desc_base = q->base; 362 struct ionic_rxq_comp *cq_desc_base = cq->base; 363 volatile struct ionic_rxq_comp *cq_desc; 364 uint32_t work_done = 0; 365 uint64_t then, now, hz, delta; 366 367 cq_desc = &cq_desc_base[cq->tail_idx]; 368 369 while (color_match(cq_desc->pkt_type_color, cq->done_color)) { 370 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 371 if (cq->tail_idx == 0) 372 cq->done_color = !cq->done_color; 373 374 /* Prefetch 8 x 8B bufinfo */ 375 rte_prefetch0(&q->info[Q_NEXT_TO_SRVC(q, 8)]); 376 /* Prefetch 4 x 16B comp */ 377 rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]); 378 /* Prefetch 4 x 16B descriptors */ 379 rte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]); 380 381 /* Clean one descriptor */ 382 ionic_rx_clean_one(rxq, cq_desc, rx_svc); 383 q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 384 385 /* Fill one descriptor */ 386 (void)ionic_rx_fill_one(rxq); 387 388 q->head_idx = Q_NEXT_TO_POST(q, 1); 389 390 if (++work_done == work_to_do) 391 break; 392 393 cq_desc = &cq_desc_base[cq->tail_idx]; 394 } 395 396 /* Update the queue indices and ring the doorbell */ 397 if (work_done) { 398 ionic_rxq_flush(q); 399 400 rxq->last_wdog_cycles = rte_get_timer_cycles(); 401 rxq->wdog_ms = IONIC_Q_WDOG_MS; 402 } else { 403 /* 404 * Ring the doorbell again if no recvs were posted and the 405 * recv queue is not empty after the deadline. 406 * 407 * Exponentially back off the deadline to avoid excessive 408 * doorbells when the recv queue is idle. 409 */ 410 if (q->head_idx != q->tail_idx) { 411 then = rxq->last_wdog_cycles; 412 now = rte_get_timer_cycles(); 413 hz = rte_get_timer_hz(); 414 delta = (now - then) * 1000; 415 416 if (delta >= hz * rxq->wdog_ms) { 417 ionic_q_flush(q); 418 rxq->last_wdog_cycles = now; 419 420 delta = 2 * rxq->wdog_ms; 421 if (delta > IONIC_Q_WDOG_MAX_MS) 422 delta = IONIC_Q_WDOG_MAX_MS; 423 424 rxq->wdog_ms = delta; 425 } 426 } 427 } 428 } 429 430 uint16_t 431 ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 432 uint16_t nb_pkts) 433 { 434 struct ionic_rx_qcq *rxq = rx_queue; 435 struct ionic_rx_service rx_svc; 436 437 rx_svc.rx_pkts = rx_pkts; 438 rx_svc.nb_rx = 0; 439 440 ionic_rxq_service(rxq, nb_pkts, &rx_svc); 441 442 return rx_svc.nb_rx; 443 } 444 445 /* 446 * Fills all descriptors with mbufs. 447 */ 448 int __rte_cold 449 ionic_rx_fill(struct ionic_rx_qcq *rxq) 450 { 451 struct ionic_queue *q = &rxq->qcq.q; 452 uint32_t i; 453 int err = 0; 454 455 for (i = 0; i < q->num_descs - 1u; i++) { 456 err = ionic_rx_fill_one(rxq); 457 if (err) 458 break; 459 460 q->head_idx = Q_NEXT_TO_POST(q, 1); 461 } 462 463 ionic_rxq_flush(q); 464 465 return err; 466 } 467