1e86a6fccSAndrew Boyer /* SPDX-License-Identifier: BSD-3-Clause 2e86a6fccSAndrew Boyer * Copyright 2018-2022 Advanced Micro Devices, Inc. 3e86a6fccSAndrew Boyer */ 4e86a6fccSAndrew Boyer 5e86a6fccSAndrew Boyer #include <stdio.h> 6e86a6fccSAndrew Boyer #include <errno.h> 7e86a6fccSAndrew Boyer #include <stdint.h> 8e86a6fccSAndrew Boyer #include <assert.h> 9e86a6fccSAndrew Boyer 10e86a6fccSAndrew Boyer #include <rte_common.h> 11e86a6fccSAndrew Boyer #include <rte_byteorder.h> 12e86a6fccSAndrew Boyer #include <rte_atomic.h> 13e86a6fccSAndrew Boyer #include <rte_mempool.h> 14e86a6fccSAndrew Boyer #include <rte_mbuf.h> 15e86a6fccSAndrew Boyer #include <rte_ether.h> 16e86a6fccSAndrew Boyer #include <rte_prefetch.h> 17e86a6fccSAndrew Boyer 18e86a6fccSAndrew Boyer #include "ionic.h" 19e86a6fccSAndrew Boyer #include "ionic_if.h" 20e86a6fccSAndrew Boyer #include "ionic_dev.h" 21e86a6fccSAndrew Boyer #include "ionic_lif.h" 22e86a6fccSAndrew Boyer #include "ionic_rxtx.h" 23e86a6fccSAndrew Boyer 24e86a6fccSAndrew Boyer static __rte_always_inline void 25e86a6fccSAndrew Boyer ionic_tx_flush_sg(struct ionic_tx_qcq *txq) 26e86a6fccSAndrew Boyer { 27e86a6fccSAndrew Boyer struct ionic_cq *cq = &txq->qcq.cq; 28e86a6fccSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 29e86a6fccSAndrew Boyer struct rte_mbuf *txm; 30e86a6fccSAndrew Boyer struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base; 31e86a6fccSAndrew Boyer void **info; 32e86a6fccSAndrew Boyer uint32_t i; 33e86a6fccSAndrew Boyer 34e86a6fccSAndrew Boyer cq_desc = &cq_desc_base[cq->tail_idx]; 35e86a6fccSAndrew Boyer 36e86a6fccSAndrew Boyer while (color_match(cq_desc->color, cq->done_color)) { 37e86a6fccSAndrew Boyer cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 38e86a6fccSAndrew Boyer if (cq->tail_idx == 0) 39e86a6fccSAndrew Boyer cq->done_color = !cq->done_color; 40e86a6fccSAndrew Boyer 41e86a6fccSAndrew Boyer /* Prefetch 4 x 16B comp at cq->tail_idx + 4 */ 42e86a6fccSAndrew Boyer if ((cq->tail_idx & 0x3) == 0) 43e86a6fccSAndrew Boyer rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]); 44e86a6fccSAndrew Boyer 45e86a6fccSAndrew Boyer while (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) { 46e86a6fccSAndrew Boyer /* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */ 47e86a6fccSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 2))); 48e86a6fccSAndrew Boyer 49e86a6fccSAndrew Boyer /* Prefetch next mbuf */ 50e86a6fccSAndrew Boyer void **next_info = 51e86a6fccSAndrew Boyer IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 1)); 52e86a6fccSAndrew Boyer if (next_info[0]) 53e86a6fccSAndrew Boyer rte_mbuf_prefetch_part2(next_info[0]); 54e86a6fccSAndrew Boyer if (next_info[1]) 55e86a6fccSAndrew Boyer rte_mbuf_prefetch_part2(next_info[1]); 56e86a6fccSAndrew Boyer 57e86a6fccSAndrew Boyer info = IONIC_INFO_PTR(q, q->tail_idx); 58e86a6fccSAndrew Boyer for (i = 0; i < q->num_segs; i++) { 59e86a6fccSAndrew Boyer txm = info[i]; 60e86a6fccSAndrew Boyer if (!txm) 61e86a6fccSAndrew Boyer break; 62e86a6fccSAndrew Boyer 63e86a6fccSAndrew Boyer if (txq->flags & IONIC_QCQ_F_FAST_FREE) 64e86a6fccSAndrew Boyer rte_mempool_put(txm->pool, txm); 65e86a6fccSAndrew Boyer else 66e86a6fccSAndrew Boyer rte_pktmbuf_free_seg(txm); 67e86a6fccSAndrew Boyer 68e86a6fccSAndrew Boyer info[i] = NULL; 69e86a6fccSAndrew Boyer } 70e86a6fccSAndrew Boyer 71e86a6fccSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 72e86a6fccSAndrew Boyer } 73e86a6fccSAndrew Boyer 74e86a6fccSAndrew Boyer cq_desc = &cq_desc_base[cq->tail_idx]; 75e86a6fccSAndrew Boyer } 76e86a6fccSAndrew Boyer } 77e86a6fccSAndrew Boyer 78e86a6fccSAndrew Boyer static __rte_always_inline int 79e86a6fccSAndrew Boyer ionic_tx_sg(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 80e86a6fccSAndrew Boyer { 81e86a6fccSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 82e86a6fccSAndrew Boyer struct ionic_txq_desc *desc, *desc_base = q->base; 83e86a6fccSAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc, *sg_desc_base = q->sg_base; 84e86a6fccSAndrew Boyer struct ionic_txq_sg_elem *elem; 85e86a6fccSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 86e86a6fccSAndrew Boyer struct rte_mbuf *txm_seg; 87e86a6fccSAndrew Boyer rte_iova_t data_iova; 88e86a6fccSAndrew Boyer void **info; 89e86a6fccSAndrew Boyer uint64_t ol_flags = txm->ol_flags; 90e86a6fccSAndrew Boyer uint64_t addr, cmd; 91e86a6fccSAndrew Boyer uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; 92e86a6fccSAndrew Boyer uint8_t flags = 0; 93e86a6fccSAndrew Boyer 94e86a6fccSAndrew Boyer desc = &desc_base[q->head_idx]; 95e86a6fccSAndrew Boyer sg_desc = &sg_desc_base[q->head_idx]; 96e86a6fccSAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 97e86a6fccSAndrew Boyer 98e86a6fccSAndrew Boyer if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 99e86a6fccSAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_L3)) { 100e86a6fccSAndrew Boyer opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 101e86a6fccSAndrew Boyer flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; 102e86a6fccSAndrew Boyer } 103e86a6fccSAndrew Boyer 104e86a6fccSAndrew Boyer if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) && 105e86a6fccSAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_TCP)) || 106e86a6fccSAndrew Boyer ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) && 107e86a6fccSAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_UDP))) { 108e86a6fccSAndrew Boyer opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 109e86a6fccSAndrew Boyer flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; 110e86a6fccSAndrew Boyer } 111e86a6fccSAndrew Boyer 112e86a6fccSAndrew Boyer if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE) 113e86a6fccSAndrew Boyer stats->no_csum++; 114e86a6fccSAndrew Boyer 115e86a6fccSAndrew Boyer if (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 116e86a6fccSAndrew Boyer (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 117e86a6fccSAndrew Boyer ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 118e86a6fccSAndrew Boyer (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) { 119e86a6fccSAndrew Boyer flags |= IONIC_TXQ_DESC_FLAG_ENCAP; 120e86a6fccSAndrew Boyer } 121e86a6fccSAndrew Boyer 122e86a6fccSAndrew Boyer if (ol_flags & RTE_MBUF_F_TX_VLAN) { 123e86a6fccSAndrew Boyer flags |= IONIC_TXQ_DESC_FLAG_VLAN; 124e86a6fccSAndrew Boyer desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci); 125e86a6fccSAndrew Boyer } 126e86a6fccSAndrew Boyer 127e86a6fccSAndrew Boyer addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm)); 128e86a6fccSAndrew Boyer 129e86a6fccSAndrew Boyer cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); 130e86a6fccSAndrew Boyer desc->cmd = rte_cpu_to_le_64(cmd); 131e86a6fccSAndrew Boyer desc->len = rte_cpu_to_le_16(txm->data_len); 132e86a6fccSAndrew Boyer 133e86a6fccSAndrew Boyer info[0] = txm; 134e86a6fccSAndrew Boyer 135e86a6fccSAndrew Boyer if (txm->nb_segs > 1) { 136e86a6fccSAndrew Boyer txm_seg = txm->next; 137e86a6fccSAndrew Boyer 138e86a6fccSAndrew Boyer elem = sg_desc->elems; 139e86a6fccSAndrew Boyer 140e86a6fccSAndrew Boyer while (txm_seg != NULL) { 141e86a6fccSAndrew Boyer /* Stash the mbuf ptr in the array */ 142e86a6fccSAndrew Boyer info++; 143e86a6fccSAndrew Boyer *info = txm_seg; 144e86a6fccSAndrew Boyer 145e86a6fccSAndrew Boyer /* Configure the SGE */ 146e86a6fccSAndrew Boyer data_iova = rte_mbuf_data_iova(txm_seg); 147e86a6fccSAndrew Boyer elem->len = rte_cpu_to_le_16(txm_seg->data_len); 148e86a6fccSAndrew Boyer elem->addr = rte_cpu_to_le_64(data_iova); 149e86a6fccSAndrew Boyer elem++; 150e86a6fccSAndrew Boyer 151e86a6fccSAndrew Boyer txm_seg = txm_seg->next; 152e86a6fccSAndrew Boyer } 153e86a6fccSAndrew Boyer } 154e86a6fccSAndrew Boyer 155e86a6fccSAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 156e86a6fccSAndrew Boyer 157e86a6fccSAndrew Boyer return 0; 158e86a6fccSAndrew Boyer } 159e86a6fccSAndrew Boyer 160e86a6fccSAndrew Boyer uint16_t 161e86a6fccSAndrew Boyer ionic_xmit_pkts_sg(void *tx_queue, struct rte_mbuf **tx_pkts, 162e86a6fccSAndrew Boyer uint16_t nb_pkts) 163e86a6fccSAndrew Boyer { 164e86a6fccSAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 165e86a6fccSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 166e86a6fccSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 167e86a6fccSAndrew Boyer struct rte_mbuf *mbuf; 168e86a6fccSAndrew Boyer uint32_t bytes_tx = 0; 169e86a6fccSAndrew Boyer uint16_t nb_avail, nb_tx = 0; 170*a5b1ffd8SAndrew Boyer uint64_t then, now, hz, delta; 171e86a6fccSAndrew Boyer int err; 172e86a6fccSAndrew Boyer 173e86a6fccSAndrew Boyer struct ionic_txq_desc *desc_base = q->base; 174e86a6fccSAndrew Boyer if (!(txq->flags & IONIC_QCQ_F_CMB)) 175e86a6fccSAndrew Boyer rte_prefetch0(&desc_base[q->head_idx]); 176e86a6fccSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, q->head_idx)); 177e86a6fccSAndrew Boyer 178e86a6fccSAndrew Boyer if (tx_pkts) { 179e86a6fccSAndrew Boyer rte_mbuf_prefetch_part1(tx_pkts[0]); 180e86a6fccSAndrew Boyer rte_mbuf_prefetch_part2(tx_pkts[0]); 181e86a6fccSAndrew Boyer } 182e86a6fccSAndrew Boyer 183e86a6fccSAndrew Boyer if (ionic_q_space_avail(q) < txq->free_thresh) { 184e86a6fccSAndrew Boyer /* Cleaning old buffers */ 185e86a6fccSAndrew Boyer ionic_tx_flush_sg(txq); 186e86a6fccSAndrew Boyer } 187e86a6fccSAndrew Boyer 188e86a6fccSAndrew Boyer nb_avail = ionic_q_space_avail(q); 189e86a6fccSAndrew Boyer if (nb_avail < nb_pkts) { 190e86a6fccSAndrew Boyer stats->stop += nb_pkts - nb_avail; 191e86a6fccSAndrew Boyer nb_pkts = nb_avail; 192e86a6fccSAndrew Boyer } 193e86a6fccSAndrew Boyer 194e86a6fccSAndrew Boyer while (nb_tx < nb_pkts) { 195e86a6fccSAndrew Boyer uint16_t next_idx = Q_NEXT_TO_POST(q, 1); 196e86a6fccSAndrew Boyer if (!(txq->flags & IONIC_QCQ_F_CMB)) 197e86a6fccSAndrew Boyer rte_prefetch0(&desc_base[next_idx]); 198e86a6fccSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, next_idx)); 199e86a6fccSAndrew Boyer 200e86a6fccSAndrew Boyer if (nb_tx + 1 < nb_pkts) { 201e86a6fccSAndrew Boyer rte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]); 202e86a6fccSAndrew Boyer rte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]); 203e86a6fccSAndrew Boyer } 204e86a6fccSAndrew Boyer 205e86a6fccSAndrew Boyer mbuf = tx_pkts[nb_tx]; 206e86a6fccSAndrew Boyer 207e86a6fccSAndrew Boyer if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) 208e86a6fccSAndrew Boyer err = ionic_tx_tso(txq, mbuf); 209e86a6fccSAndrew Boyer else 210e86a6fccSAndrew Boyer err = ionic_tx_sg(txq, mbuf); 211e86a6fccSAndrew Boyer if (err) { 212e86a6fccSAndrew Boyer stats->drop += nb_pkts - nb_tx; 213e86a6fccSAndrew Boyer break; 214e86a6fccSAndrew Boyer } 215e86a6fccSAndrew Boyer 216e86a6fccSAndrew Boyer bytes_tx += mbuf->pkt_len; 217e86a6fccSAndrew Boyer nb_tx++; 218e86a6fccSAndrew Boyer } 219e86a6fccSAndrew Boyer 220e86a6fccSAndrew Boyer if (nb_tx > 0) { 221e86a6fccSAndrew Boyer rte_wmb(); 222e86a6fccSAndrew Boyer ionic_q_flush(q); 223e86a6fccSAndrew Boyer 224*a5b1ffd8SAndrew Boyer txq->last_wdog_cycles = rte_get_timer_cycles(); 225*a5b1ffd8SAndrew Boyer 226e86a6fccSAndrew Boyer stats->packets += nb_tx; 227e86a6fccSAndrew Boyer stats->bytes += bytes_tx; 228*a5b1ffd8SAndrew Boyer } else { 229*a5b1ffd8SAndrew Boyer /* 230*a5b1ffd8SAndrew Boyer * Ring the doorbell again if no work could be posted and work 231*a5b1ffd8SAndrew Boyer * is still pending after the deadline. 232*a5b1ffd8SAndrew Boyer */ 233*a5b1ffd8SAndrew Boyer if (q->head_idx != q->tail_idx) { 234*a5b1ffd8SAndrew Boyer then = txq->last_wdog_cycles; 235*a5b1ffd8SAndrew Boyer now = rte_get_timer_cycles(); 236*a5b1ffd8SAndrew Boyer hz = rte_get_timer_hz(); 237*a5b1ffd8SAndrew Boyer delta = (now - then) * 1000; 238*a5b1ffd8SAndrew Boyer 239*a5b1ffd8SAndrew Boyer if (delta >= hz * IONIC_Q_WDOG_MS) { 240*a5b1ffd8SAndrew Boyer ionic_q_flush(q); 241*a5b1ffd8SAndrew Boyer txq->last_wdog_cycles = now; 242*a5b1ffd8SAndrew Boyer } 243*a5b1ffd8SAndrew Boyer } 244e86a6fccSAndrew Boyer } 245e86a6fccSAndrew Boyer 246e86a6fccSAndrew Boyer return nb_tx; 247e86a6fccSAndrew Boyer } 248e86a6fccSAndrew Boyer 249e86a6fccSAndrew Boyer /* 250e86a6fccSAndrew Boyer * Cleans one descriptor. Connects the filled mbufs into a chain. 251e86a6fccSAndrew Boyer * Does not advance the tail index. 252e86a6fccSAndrew Boyer */ 253e86a6fccSAndrew Boyer static __rte_always_inline void 254e86a6fccSAndrew Boyer ionic_rx_clean_one_sg(struct ionic_rx_qcq *rxq, 255e86a6fccSAndrew Boyer struct ionic_rxq_comp *cq_desc, 256e86a6fccSAndrew Boyer struct ionic_rx_service *rx_svc) 257e86a6fccSAndrew Boyer { 258e86a6fccSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 259e86a6fccSAndrew Boyer struct rte_mbuf *rxm; 260e86a6fccSAndrew Boyer struct rte_mbuf *rxm_seg, *prev_rxm; 261e86a6fccSAndrew Boyer struct ionic_rx_stats *stats = &rxq->stats; 262e86a6fccSAndrew Boyer uint64_t pkt_flags = 0; 263e86a6fccSAndrew Boyer uint32_t pkt_type; 264e86a6fccSAndrew Boyer uint32_t left, i; 265e86a6fccSAndrew Boyer uint16_t cq_desc_len; 266e86a6fccSAndrew Boyer uint8_t ptype, cflags; 267e86a6fccSAndrew Boyer void **info; 268e86a6fccSAndrew Boyer 269e86a6fccSAndrew Boyer cq_desc_len = rte_le_to_cpu_16(cq_desc->len); 270e86a6fccSAndrew Boyer 271e86a6fccSAndrew Boyer info = IONIC_INFO_PTR(q, q->tail_idx); 272e86a6fccSAndrew Boyer 273e86a6fccSAndrew Boyer rxm = info[0]; 274e86a6fccSAndrew Boyer 275e86a6fccSAndrew Boyer if (cq_desc->status) { 276e86a6fccSAndrew Boyer stats->bad_cq_status++; 277e86a6fccSAndrew Boyer return; 278e86a6fccSAndrew Boyer } 279e86a6fccSAndrew Boyer 280e86a6fccSAndrew Boyer if (cq_desc_len > rxq->frame_size || cq_desc_len == 0) { 281e86a6fccSAndrew Boyer stats->bad_len++; 282e86a6fccSAndrew Boyer return; 283e86a6fccSAndrew Boyer } 284e86a6fccSAndrew Boyer 285e86a6fccSAndrew Boyer info[0] = NULL; 286e86a6fccSAndrew Boyer 287e86a6fccSAndrew Boyer /* Set the mbuf metadata based on the cq entry */ 288e86a6fccSAndrew Boyer rxm->rearm_data[0] = rxq->rearm_data; 289e86a6fccSAndrew Boyer rxm->pkt_len = cq_desc_len; 290e86a6fccSAndrew Boyer rxm->data_len = RTE_MIN(rxq->hdr_seg_size, cq_desc_len); 291e86a6fccSAndrew Boyer left = cq_desc_len - rxm->data_len; 292e86a6fccSAndrew Boyer rxm->nb_segs = cq_desc->num_sg_elems + 1; 293e86a6fccSAndrew Boyer 294e86a6fccSAndrew Boyer prev_rxm = rxm; 295e86a6fccSAndrew Boyer 296e86a6fccSAndrew Boyer for (i = 1; i < rxm->nb_segs && left; i++) { 297e86a6fccSAndrew Boyer rxm_seg = info[i]; 298e86a6fccSAndrew Boyer info[i] = NULL; 299e86a6fccSAndrew Boyer 300e86a6fccSAndrew Boyer /* Set the chained mbuf metadata */ 301e86a6fccSAndrew Boyer rxm_seg->rearm_data[0] = rxq->rearm_seg_data; 302e86a6fccSAndrew Boyer rxm_seg->data_len = RTE_MIN(rxq->seg_size, left); 303e86a6fccSAndrew Boyer left -= rxm_seg->data_len; 304e86a6fccSAndrew Boyer 305e86a6fccSAndrew Boyer /* Link the mbuf */ 306e86a6fccSAndrew Boyer prev_rxm->next = rxm_seg; 307e86a6fccSAndrew Boyer prev_rxm = rxm_seg; 308e86a6fccSAndrew Boyer } 309e86a6fccSAndrew Boyer 310e86a6fccSAndrew Boyer /* Terminate the mbuf chain */ 311e86a6fccSAndrew Boyer prev_rxm->next = NULL; 312e86a6fccSAndrew Boyer 313e86a6fccSAndrew Boyer /* RSS */ 314e86a6fccSAndrew Boyer pkt_flags |= RTE_MBUF_F_RX_RSS_HASH; 315e86a6fccSAndrew Boyer rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash); 316e86a6fccSAndrew Boyer 317e86a6fccSAndrew Boyer /* Vlan Strip */ 318e86a6fccSAndrew Boyer if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { 319e86a6fccSAndrew Boyer pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 320e86a6fccSAndrew Boyer rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci); 321e86a6fccSAndrew Boyer } 322e86a6fccSAndrew Boyer 323e86a6fccSAndrew Boyer /* Checksum */ 324e86a6fccSAndrew Boyer if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 325e86a6fccSAndrew Boyer cflags = cq_desc->csum_flags & IONIC_CSUM_FLAG_MASK; 326e86a6fccSAndrew Boyer pkt_flags |= ionic_csum_flags[cflags]; 327e86a6fccSAndrew Boyer } 328e86a6fccSAndrew Boyer 329e86a6fccSAndrew Boyer rxm->ol_flags = pkt_flags; 330e86a6fccSAndrew Boyer 331e86a6fccSAndrew Boyer /* Packet Type */ 332e86a6fccSAndrew Boyer ptype = cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK; 333e86a6fccSAndrew Boyer pkt_type = ionic_ptype_table[ptype]; 334e86a6fccSAndrew Boyer if (pkt_type == RTE_PTYPE_UNKNOWN) { 335e86a6fccSAndrew Boyer struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm, 336e86a6fccSAndrew Boyer struct rte_ether_hdr *); 337e86a6fccSAndrew Boyer uint16_t ether_type = eth_h->ether_type; 338e86a6fccSAndrew Boyer if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) 339e86a6fccSAndrew Boyer pkt_type = RTE_PTYPE_L2_ETHER_ARP; 340e86a6fccSAndrew Boyer else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_LLDP)) 341e86a6fccSAndrew Boyer pkt_type = RTE_PTYPE_L2_ETHER_LLDP; 342e86a6fccSAndrew Boyer else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_1588)) 343e86a6fccSAndrew Boyer pkt_type = RTE_PTYPE_L2_ETHER_TIMESYNC; 344e86a6fccSAndrew Boyer stats->mtods++; 345e86a6fccSAndrew Boyer } else if (pkt_flags & RTE_MBUF_F_RX_VLAN) { 346e86a6fccSAndrew Boyer pkt_type |= RTE_PTYPE_L2_ETHER_VLAN; 347e86a6fccSAndrew Boyer } else { 348e86a6fccSAndrew Boyer pkt_type |= RTE_PTYPE_L2_ETHER; 349e86a6fccSAndrew Boyer } 350e86a6fccSAndrew Boyer 351e86a6fccSAndrew Boyer rxm->packet_type = pkt_type; 352e86a6fccSAndrew Boyer 353e86a6fccSAndrew Boyer rx_svc->rx_pkts[rx_svc->nb_rx] = rxm; 354e86a6fccSAndrew Boyer rx_svc->nb_rx++; 355e86a6fccSAndrew Boyer 356e86a6fccSAndrew Boyer stats->packets++; 357e86a6fccSAndrew Boyer stats->bytes += rxm->pkt_len; 358e86a6fccSAndrew Boyer } 359e86a6fccSAndrew Boyer 360e86a6fccSAndrew Boyer /* 361e86a6fccSAndrew Boyer * Fills one descriptor with mbufs. Does not advance the head index. 362e86a6fccSAndrew Boyer */ 363e86a6fccSAndrew Boyer static __rte_always_inline int 364e86a6fccSAndrew Boyer ionic_rx_fill_one_sg(struct ionic_rx_qcq *rxq) 365e86a6fccSAndrew Boyer { 366e86a6fccSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 367e86a6fccSAndrew Boyer struct rte_mbuf *rxm; 368e86a6fccSAndrew Boyer struct rte_mbuf *rxm_seg; 369e86a6fccSAndrew Boyer struct ionic_rxq_desc *desc, *desc_base = q->base; 370e86a6fccSAndrew Boyer struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 371e86a6fccSAndrew Boyer rte_iova_t data_iova; 372e86a6fccSAndrew Boyer uint32_t i; 373e86a6fccSAndrew Boyer void **info; 374e86a6fccSAndrew Boyer int ret; 375e86a6fccSAndrew Boyer 376e86a6fccSAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 377e86a6fccSAndrew Boyer desc = &desc_base[q->head_idx]; 378e86a6fccSAndrew Boyer sg_desc = &sg_desc_base[q->head_idx]; 379e86a6fccSAndrew Boyer 380e86a6fccSAndrew Boyer /* mbuf is unused => whole chain is unused */ 381e86a6fccSAndrew Boyer if (info[0]) 382e86a6fccSAndrew Boyer return 0; 383e86a6fccSAndrew Boyer 384e86a6fccSAndrew Boyer if (rxq->mb_idx == 0) { 385e86a6fccSAndrew Boyer ret = rte_mempool_get_bulk(rxq->mb_pool, 386e86a6fccSAndrew Boyer (void **)rxq->mbs, 387e86a6fccSAndrew Boyer IONIC_MBUF_BULK_ALLOC); 388e86a6fccSAndrew Boyer if (ret) { 389e86a6fccSAndrew Boyer assert(0); 390e86a6fccSAndrew Boyer return -ENOMEM; 391e86a6fccSAndrew Boyer } 392e86a6fccSAndrew Boyer 393e86a6fccSAndrew Boyer rxq->mb_idx = IONIC_MBUF_BULK_ALLOC; 394e86a6fccSAndrew Boyer } 395e86a6fccSAndrew Boyer 396e86a6fccSAndrew Boyer rxm = rxq->mbs[--rxq->mb_idx]; 397e86a6fccSAndrew Boyer info[0] = rxm; 398e86a6fccSAndrew Boyer 399e86a6fccSAndrew Boyer data_iova = rte_mbuf_data_iova_default(rxm); 400e86a6fccSAndrew Boyer desc->addr = rte_cpu_to_le_64(data_iova); 401e86a6fccSAndrew Boyer 402e86a6fccSAndrew Boyer for (i = 1; i < q->num_segs; i++) { 403e86a6fccSAndrew Boyer /* mbuf is unused => rest of the chain is unused */ 404e86a6fccSAndrew Boyer if (info[i]) 405e86a6fccSAndrew Boyer return 0; 406e86a6fccSAndrew Boyer 407e86a6fccSAndrew Boyer if (rxq->mb_idx == 0) { 408e86a6fccSAndrew Boyer ret = rte_mempool_get_bulk(rxq->mb_pool, 409e86a6fccSAndrew Boyer (void **)rxq->mbs, 410e86a6fccSAndrew Boyer IONIC_MBUF_BULK_ALLOC); 411e86a6fccSAndrew Boyer if (ret) { 412e86a6fccSAndrew Boyer assert(0); 413e86a6fccSAndrew Boyer return -ENOMEM; 414e86a6fccSAndrew Boyer } 415e86a6fccSAndrew Boyer 416e86a6fccSAndrew Boyer rxq->mb_idx = IONIC_MBUF_BULK_ALLOC; 417e86a6fccSAndrew Boyer } 418e86a6fccSAndrew Boyer 419e86a6fccSAndrew Boyer rxm_seg = rxq->mbs[--rxq->mb_idx]; 420e86a6fccSAndrew Boyer info[i] = rxm_seg; 421e86a6fccSAndrew Boyer 422e86a6fccSAndrew Boyer /* The data_off does not get set to 0 until later */ 423e86a6fccSAndrew Boyer data_iova = rxm_seg->buf_iova; 424e86a6fccSAndrew Boyer sg_desc->elems[i - 1].addr = rte_cpu_to_le_64(data_iova); 425e86a6fccSAndrew Boyer } 426e86a6fccSAndrew Boyer 427e86a6fccSAndrew Boyer return 0; 428e86a6fccSAndrew Boyer } 429e86a6fccSAndrew Boyer 430e86a6fccSAndrew Boyer /* 431e86a6fccSAndrew Boyer * Walk the CQ to find completed receive descriptors. 432e86a6fccSAndrew Boyer * Any completed descriptor found is refilled. 433e86a6fccSAndrew Boyer */ 434e86a6fccSAndrew Boyer static __rte_always_inline void 435e86a6fccSAndrew Boyer ionic_rxq_service_sg(struct ionic_rx_qcq *rxq, uint32_t work_to_do, 436e86a6fccSAndrew Boyer struct ionic_rx_service *rx_svc) 437e86a6fccSAndrew Boyer { 438e86a6fccSAndrew Boyer struct ionic_cq *cq = &rxq->qcq.cq; 439e86a6fccSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 440e86a6fccSAndrew Boyer struct ionic_rxq_desc *q_desc_base = q->base; 441e86a6fccSAndrew Boyer struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; 442e86a6fccSAndrew Boyer uint32_t work_done = 0; 443*a5b1ffd8SAndrew Boyer uint64_t then, now, hz, delta; 444e86a6fccSAndrew Boyer 445e86a6fccSAndrew Boyer cq_desc = &cq_desc_base[cq->tail_idx]; 446e86a6fccSAndrew Boyer 447e86a6fccSAndrew Boyer while (color_match(cq_desc->pkt_type_color, cq->done_color)) { 448e86a6fccSAndrew Boyer cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 449e86a6fccSAndrew Boyer if (cq->tail_idx == 0) 450e86a6fccSAndrew Boyer cq->done_color = !cq->done_color; 451e86a6fccSAndrew Boyer 452e86a6fccSAndrew Boyer /* Prefetch 8 x 8B bufinfo */ 453e86a6fccSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 8))); 454e86a6fccSAndrew Boyer /* Prefetch 4 x 16B comp */ 455e86a6fccSAndrew Boyer rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]); 456e86a6fccSAndrew Boyer /* Prefetch 4 x 16B descriptors */ 457e86a6fccSAndrew Boyer if (!(rxq->flags & IONIC_QCQ_F_CMB)) 458e86a6fccSAndrew Boyer rte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]); 459e86a6fccSAndrew Boyer 460e86a6fccSAndrew Boyer /* Clean one descriptor */ 461e86a6fccSAndrew Boyer ionic_rx_clean_one_sg(rxq, cq_desc, rx_svc); 462e86a6fccSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 463e86a6fccSAndrew Boyer 464e86a6fccSAndrew Boyer /* Fill one descriptor */ 465e86a6fccSAndrew Boyer (void)ionic_rx_fill_one_sg(rxq); 466e86a6fccSAndrew Boyer 467e86a6fccSAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 468e86a6fccSAndrew Boyer 469e86a6fccSAndrew Boyer if (++work_done == work_to_do) 470e86a6fccSAndrew Boyer break; 471e86a6fccSAndrew Boyer 472e86a6fccSAndrew Boyer cq_desc = &cq_desc_base[cq->tail_idx]; 473e86a6fccSAndrew Boyer } 474e86a6fccSAndrew Boyer 475e86a6fccSAndrew Boyer /* Update the queue indices and ring the doorbell */ 476*a5b1ffd8SAndrew Boyer if (work_done) { 477e86a6fccSAndrew Boyer ionic_q_flush(q); 478*a5b1ffd8SAndrew Boyer rxq->last_wdog_cycles = rte_get_timer_cycles(); 479*a5b1ffd8SAndrew Boyer rxq->wdog_ms = IONIC_Q_WDOG_MS; 480*a5b1ffd8SAndrew Boyer } else { 481*a5b1ffd8SAndrew Boyer /* 482*a5b1ffd8SAndrew Boyer * Ring the doorbell again if no recvs were posted and the 483*a5b1ffd8SAndrew Boyer * recv queue is not empty after the deadline. 484*a5b1ffd8SAndrew Boyer * 485*a5b1ffd8SAndrew Boyer * Exponentially back off the deadline to avoid excessive 486*a5b1ffd8SAndrew Boyer * doorbells when the recv queue is idle. 487*a5b1ffd8SAndrew Boyer */ 488*a5b1ffd8SAndrew Boyer if (q->head_idx != q->tail_idx) { 489*a5b1ffd8SAndrew Boyer then = rxq->last_wdog_cycles; 490*a5b1ffd8SAndrew Boyer now = rte_get_timer_cycles(); 491*a5b1ffd8SAndrew Boyer hz = rte_get_timer_hz(); 492*a5b1ffd8SAndrew Boyer delta = (now - then) * 1000; 493*a5b1ffd8SAndrew Boyer 494*a5b1ffd8SAndrew Boyer if (delta >= hz * rxq->wdog_ms) { 495*a5b1ffd8SAndrew Boyer ionic_q_flush(q); 496*a5b1ffd8SAndrew Boyer rxq->last_wdog_cycles = now; 497*a5b1ffd8SAndrew Boyer 498*a5b1ffd8SAndrew Boyer delta = 2 * rxq->wdog_ms; 499*a5b1ffd8SAndrew Boyer if (delta > IONIC_Q_WDOG_MAX_MS) 500*a5b1ffd8SAndrew Boyer delta = IONIC_Q_WDOG_MAX_MS; 501*a5b1ffd8SAndrew Boyer 502*a5b1ffd8SAndrew Boyer rxq->wdog_ms = delta; 503*a5b1ffd8SAndrew Boyer } 504*a5b1ffd8SAndrew Boyer } 505*a5b1ffd8SAndrew Boyer } 506e86a6fccSAndrew Boyer } 507e86a6fccSAndrew Boyer 508e86a6fccSAndrew Boyer uint16_t 509e86a6fccSAndrew Boyer ionic_recv_pkts_sg(void *rx_queue, struct rte_mbuf **rx_pkts, 510e86a6fccSAndrew Boyer uint16_t nb_pkts) 511e86a6fccSAndrew Boyer { 512e86a6fccSAndrew Boyer struct ionic_rx_qcq *rxq = rx_queue; 513e86a6fccSAndrew Boyer struct ionic_rx_service rx_svc; 514e86a6fccSAndrew Boyer 515e86a6fccSAndrew Boyer rx_svc.rx_pkts = rx_pkts; 516e86a6fccSAndrew Boyer rx_svc.nb_rx = 0; 517e86a6fccSAndrew Boyer 518e86a6fccSAndrew Boyer ionic_rxq_service_sg(rxq, nb_pkts, &rx_svc); 519e86a6fccSAndrew Boyer 520e86a6fccSAndrew Boyer return rx_svc.nb_rx; 521e86a6fccSAndrew Boyer } 522e86a6fccSAndrew Boyer 523e86a6fccSAndrew Boyer /* 524e86a6fccSAndrew Boyer * Fills all descriptors with mbufs. 525e86a6fccSAndrew Boyer */ 526e86a6fccSAndrew Boyer int __rte_cold 527e86a6fccSAndrew Boyer ionic_rx_fill_sg(struct ionic_rx_qcq *rxq) 528e86a6fccSAndrew Boyer { 529e86a6fccSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 530e86a6fccSAndrew Boyer uint32_t i; 531e86a6fccSAndrew Boyer int err = 0; 532e86a6fccSAndrew Boyer 533e86a6fccSAndrew Boyer for (i = 0; i < q->num_descs - 1u; i++) { 534e86a6fccSAndrew Boyer err = ionic_rx_fill_one_sg(rxq); 535e86a6fccSAndrew Boyer if (err) 536e86a6fccSAndrew Boyer break; 537e86a6fccSAndrew Boyer 538e86a6fccSAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 539e86a6fccSAndrew Boyer } 540e86a6fccSAndrew Boyer 541e86a6fccSAndrew Boyer ionic_q_flush(q); 542e86a6fccSAndrew Boyer 543e86a6fccSAndrew Boyer return err; 544e86a6fccSAndrew Boyer } 545