1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020-2023 Broadcom
3 * All rights reserved.
4 */
5
6 #ifndef _BNXT_RXTX_VEC_COMMON_H_
7 #define _BNXT_RXTX_VEC_COMMON_H_
8 #include "hsi_struct_def_dpdk.h"
9 #include "bnxt_rxq.h"
10 #include "bnxt_rxr.h"
11
12 #define TX_BD_FLAGS_CMPL ((1 << TX_BD_LONG_FLAGS_BD_CNT_SFT) | \
13 TX_BD_SHORT_FLAGS_COAL_NOW | \
14 TX_BD_SHORT_TYPE_TX_BD_SHORT | \
15 TX_BD_LONG_FLAGS_PACKET_END)
16
17 #define TX_BD_FLAGS_NOCMPL (TX_BD_FLAGS_CMPL | TX_BD_LONG_FLAGS_NO_CMPL)
18
19 static inline uint32_t
bnxt_xmit_flags_len(uint16_t len,uint16_t flags)20 bnxt_xmit_flags_len(uint16_t len, uint16_t flags)
21 {
22 switch (len >> 9) {
23 case 0:
24 return flags | TX_BD_LONG_FLAGS_LHINT_LT512;
25 case 1:
26 return flags | TX_BD_LONG_FLAGS_LHINT_LT1K;
27 case 2:
28 return flags | TX_BD_LONG_FLAGS_LHINT_LT2K;
29 case 3:
30 return flags | TX_BD_LONG_FLAGS_LHINT_LT2K;
31 default:
32 return flags | TX_BD_LONG_FLAGS_LHINT_GTE2K;
33 }
34 }
35
36 static inline int
bnxt_rxq_vec_setup_common(struct bnxt_rx_queue * rxq)37 bnxt_rxq_vec_setup_common(struct bnxt_rx_queue *rxq)
38 {
39 uintptr_t p;
40 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
41
42 mb_def.nb_segs = 1;
43 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
44 mb_def.port = rxq->port_id;
45 rte_mbuf_refcnt_set(&mb_def, 1);
46
47 /* prevent compiler reordering: rearm_data covers previous fields */
48 rte_compiler_barrier();
49 p = (uintptr_t)&mb_def.rearm_data;
50 rxq->mbuf_initializer = *(uint64_t *)p;
51 rxq->rxrearm_nb = 0;
52 rxq->rxrearm_start = 0;
53 return 0;
54 }
55
56 static inline void
bnxt_rxq_rearm(struct bnxt_rx_queue * rxq,struct bnxt_rx_ring_info * rxr)57 bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
58 {
59 struct rx_prod_pkt_bd *rxbds = &rxr->rx_desc_ring[rxq->rxrearm_start];
60 struct rte_mbuf **rx_bufs = &rxr->rx_buf_ring[rxq->rxrearm_start];
61 int nb, i;
62
63 /*
64 * Number of mbufs to allocate must be a multiple of four. The
65 * allocation must not go past the end of the ring.
66 */
67 nb = RTE_MIN(rxq->rxrearm_nb & ~0x3,
68 rxq->nb_rx_desc - rxq->rxrearm_start);
69
70 /* Allocate new mbufs into the software ring. */
71 if (rte_mempool_get_bulk(rxq->mb_pool, (void *)rx_bufs, nb) < 0) {
72 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += nb;
73
74 for (i = 0; i < nb; i++)
75 rx_bufs[i] = &rxq->fake_mbuf;
76 return;
77 }
78
79 /* Initialize the mbufs in vector, process 4 mbufs per loop. */
80 for (i = 0; i < nb; i += 4) {
81 rxbds[0].address = rte_mbuf_data_iova_default(rx_bufs[0]);
82 rxbds[1].address = rte_mbuf_data_iova_default(rx_bufs[1]);
83 rxbds[2].address = rte_mbuf_data_iova_default(rx_bufs[2]);
84 rxbds[3].address = rte_mbuf_data_iova_default(rx_bufs[3]);
85
86 rxbds += 4;
87 rx_bufs += 4;
88 }
89
90 rxq->rxrearm_start += nb;
91 /*
92 * We can pass rxq->rxrearm_star - 1 as well, but then the epoch
93 * bit calculation is messed up.
94 */
95 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
96 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
97 rxq->rxrearm_start = 0;
98
99 rxq->rxrearm_nb -= nb;
100 }
101
102 /*
103 * Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
104 * is enabled.
105 */
106 static inline void
bnxt_tx_cmp_vec_fast(struct bnxt_tx_queue * txq,uint32_t nr_pkts)107 bnxt_tx_cmp_vec_fast(struct bnxt_tx_queue *txq, uint32_t nr_pkts)
108 {
109 struct bnxt_tx_ring_info *txr = txq->tx_ring;
110 uint16_t cons, raw_cons = txr->tx_raw_cons;
111 uint32_t ring_mask, ring_size, num;
112 struct rte_mempool *pool;
113
114 ring_mask = txr->tx_ring_struct->ring_mask;
115 ring_size = txr->tx_ring_struct->ring_size;
116
117 cons = raw_cons & ring_mask;
118 num = RTE_MIN(nr_pkts, ring_size - cons);
119 pool = txr->tx_buf_ring[cons]->pool;
120
121 rte_mempool_put_bulk(pool, (void **)&txr->tx_buf_ring[cons], num);
122 memset(&txr->tx_buf_ring[cons], 0, num * sizeof(struct rte_mbuf *));
123 raw_cons += num;
124 num = nr_pkts - num;
125 if (num) {
126 cons = raw_cons & ring_mask;
127 rte_mempool_put_bulk(pool, (void **)&txr->tx_buf_ring[cons],
128 num);
129 memset(&txr->tx_buf_ring[cons], 0,
130 num * sizeof(struct rte_mbuf *));
131 raw_cons += num;
132 }
133
134 txr->tx_raw_cons = raw_cons;
135 }
136
137 static inline void
bnxt_tx_cmp_vec(struct bnxt_tx_queue * txq,uint32_t nr_pkts)138 bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, uint32_t nr_pkts)
139 {
140 struct bnxt_tx_ring_info *txr = txq->tx_ring;
141 uint16_t cons, raw_cons = txr->tx_raw_cons;
142 uint32_t ring_mask, ring_size, num, blk;
143 struct rte_mempool *pool;
144
145 ring_mask = txr->tx_ring_struct->ring_mask;
146 ring_size = txr->tx_ring_struct->ring_size;
147
148 while (nr_pkts) {
149 struct rte_mbuf *mbuf;
150
151 cons = raw_cons & ring_mask;
152 num = RTE_MIN(nr_pkts, ring_size - cons);
153 pool = txr->tx_buf_ring[cons]->pool;
154
155 blk = 0;
156 do {
157 mbuf = txr->tx_buf_ring[cons + blk];
158 mbuf = rte_pktmbuf_prefree_seg(mbuf);
159 if (!mbuf || mbuf->pool != pool)
160 break;
161 blk++;
162 } while (blk < num);
163
164 if (blk) {
165 rte_mempool_put_bulk(pool,
166 (void **)&txr->tx_buf_ring[cons],
167 blk);
168 memset(&txr->tx_buf_ring[cons], 0,
169 blk * sizeof(struct rte_mbuf *));
170 raw_cons += blk;
171 nr_pkts -= blk;
172 }
173 if (!mbuf) {
174 /* Skip freeing mbufs with non-zero reference count. */
175 raw_cons++;
176 nr_pkts--;
177 }
178 }
179 txr->tx_raw_cons = raw_cons;
180 }
181 #endif /* _BNXT_RXTX_VEC_COMMON_H_ */
182