xref: /dpdk/drivers/net/intel/ixgbe/ixgbe_rxtx_vec_common.h (revision b87fc2117eb2a35a8c65c9dd74b5aace40fbad95)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4 
5 #ifndef _IXGBE_RXTX_VEC_COMMON_H_
6 #define _IXGBE_RXTX_VEC_COMMON_H_
7 #include <stdint.h>
8 #include <ethdev_driver.h>
9 
10 #include "../common/rx.h"
11 #include "ixgbe_ethdev.h"
12 #include "ixgbe_rxtx.h"
13 
14 static __rte_always_inline int
15 ixgbe_tx_free_bufs(struct ci_tx_queue *txq)
16 {
17 	struct ci_tx_entry_vec *txep;
18 	uint32_t status;
19 	uint32_t n;
20 	uint32_t i;
21 	int nb_free = 0;
22 	struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
23 
24 	/* check DD bit on threshold descriptor */
25 	status = txq->ixgbe_tx_ring[txq->tx_next_dd].wb.status;
26 	if (!(status & IXGBE_ADVTXD_STAT_DD))
27 		return 0;
28 
29 	n = txq->tx_rs_thresh;
30 
31 	/*
32 	 * first buffer to free from S/W ring is at index
33 	 * tx_next_dd - (tx_rs_thresh-1)
34 	 */
35 	txep = &txq->sw_ring_vec[txq->tx_next_dd - (n - 1)];
36 	m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
37 	if (likely(m != NULL)) {
38 		free[0] = m;
39 		nb_free = 1;
40 		for (i = 1; i < n; i++) {
41 			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
42 			if (likely(m != NULL)) {
43 				if (likely(m->pool == free[0]->pool))
44 					free[nb_free++] = m;
45 				else {
46 					rte_mempool_put_bulk(free[0]->pool,
47 							(void *)free, nb_free);
48 					free[0] = m;
49 					nb_free = 1;
50 				}
51 			}
52 		}
53 		rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
54 	} else {
55 		for (i = 1; i < n; i++) {
56 			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
57 			if (m != NULL)
58 				rte_mempool_put(m->pool, m);
59 		}
60 	}
61 
62 	/* buffers were freed, update counters */
63 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
64 	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
65 	if (txq->tx_next_dd >= txq->nb_tx_desc)
66 		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
67 
68 	return txq->tx_rs_thresh;
69 }
70 
71 static __rte_always_inline void
72 tx_backlog_entry(struct ci_tx_entry_vec *txep,
73 		 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
74 {
75 	int i;
76 
77 	for (i = 0; i < (int)nb_pkts; ++i)
78 		txep[i].mbuf = tx_pkts[i];
79 }
80 
81 static inline void
82 _ixgbe_tx_queue_release_mbufs_vec(struct ci_tx_queue *txq)
83 {
84 	unsigned int i;
85 	struct ci_tx_entry_vec *txe;
86 	const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
87 
88 	if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc)
89 		return;
90 
91 	/* release the used mbufs in sw_ring */
92 	for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
93 	     i != txq->tx_tail;
94 	     i = (i + 1) % txq->nb_tx_desc) {
95 		txe = &txq->sw_ring_vec[i];
96 		rte_pktmbuf_free_seg(txe->mbuf);
97 	}
98 	txq->nb_tx_free = max_desc;
99 
100 	/* reset tx_entry */
101 	for (i = 0; i < txq->nb_tx_desc; i++) {
102 		txe = &txq->sw_ring_vec[i];
103 		txe->mbuf = NULL;
104 	}
105 }
106 
107 static inline void
108 _ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
109 {
110 	unsigned int i;
111 
112 	if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
113 		return;
114 
115 	/* free all mbufs that are valid in the ring */
116 	if (rxq->rxrearm_nb == 0) {
117 		for (i = 0; i < rxq->nb_rx_desc; i++) {
118 			if (rxq->sw_ring[i].mbuf != NULL)
119 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
120 		}
121 	} else {
122 		for (i = rxq->rx_tail;
123 		     i != rxq->rxrearm_start;
124 		     i = (i + 1) % rxq->nb_rx_desc) {
125 			if (rxq->sw_ring[i].mbuf != NULL)
126 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
127 		}
128 	}
129 
130 	rxq->rxrearm_nb = rxq->nb_rx_desc;
131 
132 	/* set all entries to NULL */
133 	memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
134 }
135 
136 static inline void
137 _ixgbe_tx_free_swring_vec(struct ci_tx_queue *txq)
138 {
139 	if (txq == NULL)
140 		return;
141 
142 	if (txq->sw_ring != NULL) {
143 		rte_free(txq->sw_ring_vec - 1);
144 		txq->sw_ring_vec = NULL;
145 	}
146 }
147 
148 static inline void
149 _ixgbe_reset_tx_queue_vec(struct ci_tx_queue *txq)
150 {
151 	static const union ixgbe_adv_tx_desc zeroed_desc = { { 0 } };
152 	struct ci_tx_entry_vec *txe = txq->sw_ring_vec;
153 	uint16_t i;
154 
155 	/* Zero out HW ring memory */
156 	for (i = 0; i < txq->nb_tx_desc; i++)
157 		txq->ixgbe_tx_ring[i] = zeroed_desc;
158 
159 	/* Initialize SW ring entries */
160 	for (i = 0; i < txq->nb_tx_desc; i++) {
161 		volatile union ixgbe_adv_tx_desc *txd = &txq->ixgbe_tx_ring[i];
162 
163 		txd->wb.status = IXGBE_TXD_STAT_DD;
164 		txe[i].mbuf = NULL;
165 	}
166 
167 	txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
168 	txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
169 
170 	txq->tx_tail = 0;
171 	txq->nb_tx_used = 0;
172 	/*
173 	 * Always allow 1 descriptor to be un-allocated to avoid
174 	 * a H/W race condition
175 	 */
176 	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
177 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
178 	txq->ctx_curr = 0;
179 	memset(txq->ctx_cache, 0, IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
180 }
181 
182 static inline int
183 ixgbe_rxq_vec_setup_default(struct ixgbe_rx_queue *rxq)
184 {
185 	uintptr_t p;
186 	struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
187 
188 	mb_def.nb_segs = 1;
189 	mb_def.data_off = RTE_PKTMBUF_HEADROOM;
190 	mb_def.port = rxq->port_id;
191 	rte_mbuf_refcnt_set(&mb_def, 1);
192 
193 	/* prevent compiler reordering: rearm_data covers previous fields */
194 	rte_compiler_barrier();
195 	p = (uintptr_t)&mb_def.rearm_data;
196 	rxq->mbuf_initializer = *(uint64_t *)p;
197 	return 0;
198 }
199 
200 static inline int
201 ixgbe_txq_vec_setup_default(struct ci_tx_queue *txq,
202 			    const struct ixgbe_txq_ops *txq_ops)
203 {
204 	if (txq->sw_ring_vec == NULL)
205 		return -1;
206 
207 	/* leave the first one for overflow */
208 	txq->sw_ring_vec = txq->sw_ring_vec + 1;
209 	txq->ops = txq_ops;
210 
211 	return 0;
212 }
213 
214 static inline int
215 ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
216 {
217 #ifndef RTE_LIBRTE_IEEE1588
218 	struct rte_eth_fdir_conf *fconf = IXGBE_DEV_FDIR_CONF(dev);
219 
220 	/* no fdir support */
221 	if (fconf->mode != RTE_FDIR_MODE_NONE)
222 		return -1;
223 
224 	return 0;
225 #else
226 	RTE_SET_USED(dev);
227 	return -1;
228 #endif
229 }
230 #endif
231