1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2020-2021 Broadcom 3 * All rights reserved. 4 */ 5 6 #ifndef _BNXT_RXTX_VEC_COMMON_H_ 7 #define _BNXT_RXTX_VEC_COMMON_H_ 8 #include "hsi_struct_def_dpdk.h" 9 #include "bnxt_rxq.h" 10 #include "bnxt_rxr.h" 11 12 #define TX_BD_FLAGS_CMPL ((1 << TX_BD_LONG_FLAGS_BD_CNT_SFT) | \ 13 TX_BD_SHORT_FLAGS_COAL_NOW | \ 14 TX_BD_SHORT_TYPE_TX_BD_SHORT | \ 15 TX_BD_LONG_FLAGS_PACKET_END) 16 17 #define TX_BD_FLAGS_NOCMPL (TX_BD_FLAGS_CMPL | TX_BD_LONG_FLAGS_NO_CMPL) 18 19 static inline uint32_t 20 bnxt_xmit_flags_len(uint16_t len, uint16_t flags) 21 { 22 switch (len >> 9) { 23 case 0: 24 return flags | TX_BD_LONG_FLAGS_LHINT_LT512; 25 case 1: 26 return flags | TX_BD_LONG_FLAGS_LHINT_LT1K; 27 case 2: 28 return flags | TX_BD_LONG_FLAGS_LHINT_LT2K; 29 case 3: 30 return flags | TX_BD_LONG_FLAGS_LHINT_LT2K; 31 default: 32 return flags | TX_BD_LONG_FLAGS_LHINT_GTE2K; 33 } 34 } 35 36 static inline int 37 bnxt_rxq_vec_setup_common(struct bnxt_rx_queue *rxq) 38 { 39 uintptr_t p; 40 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ 41 42 mb_def.nb_segs = 1; 43 mb_def.data_off = RTE_PKTMBUF_HEADROOM; 44 mb_def.port = rxq->port_id; 45 rte_mbuf_refcnt_set(&mb_def, 1); 46 47 /* prevent compiler reordering: rearm_data covers previous fields */ 48 rte_compiler_barrier(); 49 p = (uintptr_t)&mb_def.rearm_data; 50 rxq->mbuf_initializer = *(uint64_t *)p; 51 rxq->rxrearm_nb = 0; 52 rxq->rxrearm_start = 0; 53 return 0; 54 } 55 56 static inline void 57 bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr) 58 { 59 struct rx_prod_pkt_bd *rxbds = &rxr->rx_desc_ring[rxq->rxrearm_start]; 60 struct rte_mbuf **rx_bufs = &rxr->rx_buf_ring[rxq->rxrearm_start]; 61 int nb, i; 62 63 /* 64 * Number of mbufs to allocate must be a multiple of four. The 65 * allocation must not go past the end of the ring. 66 */ 67 nb = RTE_MIN(rxq->rxrearm_nb & ~0x3, 68 rxq->nb_rx_desc - rxq->rxrearm_start); 69 70 /* Allocate new mbufs into the software ring. */ 71 if (rte_mempool_get_bulk(rxq->mb_pool, (void *)rx_bufs, nb) < 0) { 72 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += nb; 73 74 for (i = 0; i < nb; i++) 75 rx_bufs[i] = &rxq->fake_mbuf; 76 return; 77 } 78 79 /* Initialize the mbufs in vector, process 4 mbufs per loop. */ 80 for (i = 0; i < nb; i += 4) { 81 rxbds[0].address = rte_mbuf_data_iova_default(rx_bufs[0]); 82 rxbds[1].address = rte_mbuf_data_iova_default(rx_bufs[1]); 83 rxbds[2].address = rte_mbuf_data_iova_default(rx_bufs[2]); 84 rxbds[3].address = rte_mbuf_data_iova_default(rx_bufs[3]); 85 86 rxbds += 4; 87 rx_bufs += 4; 88 } 89 90 rxq->rxrearm_start += nb; 91 bnxt_db_write(&rxr->rx_db, rxq->rxrearm_start - 1); 92 if (rxq->rxrearm_start >= rxq->nb_rx_desc) 93 rxq->rxrearm_start = 0; 94 95 rxq->rxrearm_nb -= nb; 96 } 97 98 /* 99 * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE 100 * is enabled. 101 */ 102 static inline void 103 bnxt_tx_cmp_vec_fast(struct bnxt_tx_queue *txq, uint32_t nr_pkts) 104 { 105 struct bnxt_tx_ring_info *txr = txq->tx_ring; 106 uint16_t cons, raw_cons = txr->tx_raw_cons; 107 uint32_t ring_mask, ring_size, num; 108 struct rte_mempool *pool; 109 110 ring_mask = txr->tx_ring_struct->ring_mask; 111 ring_size = txr->tx_ring_struct->ring_size; 112 113 cons = raw_cons & ring_mask; 114 num = RTE_MIN(nr_pkts, ring_size - cons); 115 pool = txr->tx_buf_ring[cons]->pool; 116 117 rte_mempool_put_bulk(pool, (void **)&txr->tx_buf_ring[cons], num); 118 memset(&txr->tx_buf_ring[cons], 0, num * sizeof(struct rte_mbuf *)); 119 raw_cons += num; 120 num = nr_pkts - num; 121 if (num) { 122 cons = raw_cons & ring_mask; 123 rte_mempool_put_bulk(pool, (void **)&txr->tx_buf_ring[cons], 124 num); 125 memset(&txr->tx_buf_ring[cons], 0, 126 num * sizeof(struct rte_mbuf *)); 127 raw_cons += num; 128 } 129 130 txr->tx_raw_cons = raw_cons; 131 } 132 133 static inline void 134 bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, uint32_t nr_pkts) 135 { 136 struct bnxt_tx_ring_info *txr = txq->tx_ring; 137 uint16_t cons, raw_cons = txr->tx_raw_cons; 138 uint32_t ring_mask, ring_size, num, blk; 139 struct rte_mempool *pool; 140 141 ring_mask = txr->tx_ring_struct->ring_mask; 142 ring_size = txr->tx_ring_struct->ring_size; 143 144 while (nr_pkts) { 145 struct rte_mbuf *mbuf; 146 147 cons = raw_cons & ring_mask; 148 num = RTE_MIN(nr_pkts, ring_size - cons); 149 pool = txr->tx_buf_ring[cons]->pool; 150 151 blk = 0; 152 do { 153 mbuf = txr->tx_buf_ring[cons + blk]; 154 mbuf = rte_pktmbuf_prefree_seg(mbuf); 155 if (!mbuf || mbuf->pool != pool) 156 break; 157 blk++; 158 } while (blk < num); 159 160 if (blk) { 161 rte_mempool_put_bulk(pool, 162 (void **)&txr->tx_buf_ring[cons], 163 blk); 164 memset(&txr->tx_buf_ring[cons], 0, 165 blk * sizeof(struct rte_mbuf *)); 166 raw_cons += blk; 167 nr_pkts -= blk; 168 } 169 if (!mbuf) { 170 /* Skip freeing mbufs with non-zero reference count. */ 171 raw_cons++; 172 nr_pkts--; 173 } 174 } 175 txr->tx_raw_cons = raw_cons; 176 } 177 #endif /* _BNXT_RXTX_VEC_COMMON_H_ */ 178