xref: /dpdk/drivers/net/intel/ixgbe/ixgbe_rxtx_vec_common.h (revision cef05386b08a19741f0559f7f072eefb8b59f0bb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4 
5 #ifndef _IXGBE_RXTX_VEC_COMMON_H_
6 #define _IXGBE_RXTX_VEC_COMMON_H_
7 #include <stdint.h>
8 #include <ethdev_driver.h>
9 
10 #include "../common/rx.h"
11 #include "ixgbe_ethdev.h"
12 #include "ixgbe_rxtx.h"
13 
14 static __rte_always_inline int
15 ixgbe_tx_free_bufs(struct ci_tx_queue *txq)
16 {
17 	struct ci_tx_entry_vec *txep;
18 	uint32_t status;
19 	uint32_t n;
20 	uint32_t i;
21 	int nb_free = 0;
22 	struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
23 
24 	/* check DD bit on threshold descriptor */
25 	status = txq->ixgbe_tx_ring[txq->tx_next_dd].wb.status;
26 	if (!(status & IXGBE_ADVTXD_STAT_DD))
27 		return 0;
28 
29 	n = txq->tx_rs_thresh;
30 
31 	/*
32 	 * first buffer to free from S/W ring is at index
33 	 * tx_next_dd - (tx_rs_thresh-1)
34 	 */
35 	txep = &txq->sw_ring_vec[txq->tx_next_dd - (n - 1)];
36 	m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
37 	if (likely(m != NULL)) {
38 		free[0] = m;
39 		nb_free = 1;
40 		for (i = 1; i < n; i++) {
41 			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
42 			if (likely(m != NULL)) {
43 				if (likely(m->pool == free[0]->pool))
44 					free[nb_free++] = m;
45 				else {
46 					rte_mempool_put_bulk(free[0]->pool,
47 							(void *)free, nb_free);
48 					free[0] = m;
49 					nb_free = 1;
50 				}
51 			}
52 		}
53 		rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
54 	} else {
55 		for (i = 1; i < n; i++) {
56 			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
57 			if (m != NULL)
58 				rte_mempool_put(m->pool, m);
59 		}
60 	}
61 
62 	/* buffers were freed, update counters */
63 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
64 	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
65 	if (txq->tx_next_dd >= txq->nb_tx_desc)
66 		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
67 
68 	return txq->tx_rs_thresh;
69 }
70 
71 static __rte_always_inline void
72 tx_backlog_entry(struct ci_tx_entry_vec *txep,
73 		 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
74 {
75 	int i;
76 
77 	for (i = 0; i < (int)nb_pkts; ++i)
78 		txep[i].mbuf = tx_pkts[i];
79 }
80 
81 static inline void
82 _ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
83 {
84 	unsigned int i;
85 
86 	if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
87 		return;
88 
89 	/* free all mbufs that are valid in the ring */
90 	if (rxq->rxrearm_nb == 0) {
91 		for (i = 0; i < rxq->nb_rx_desc; i++) {
92 			if (rxq->sw_ring[i].mbuf != NULL)
93 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
94 		}
95 	} else {
96 		for (i = rxq->rx_tail;
97 		     i != rxq->rxrearm_start;
98 		     i = (i + 1) % rxq->nb_rx_desc) {
99 			if (rxq->sw_ring[i].mbuf != NULL)
100 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
101 		}
102 	}
103 
104 	rxq->rxrearm_nb = rxq->nb_rx_desc;
105 
106 	/* set all entries to NULL */
107 	memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
108 }
109 
110 static inline void
111 _ixgbe_tx_free_swring_vec(struct ci_tx_queue *txq)
112 {
113 	if (txq == NULL)
114 		return;
115 
116 	if (txq->sw_ring != NULL) {
117 		rte_free(txq->sw_ring_vec - 1);
118 		txq->sw_ring_vec = NULL;
119 	}
120 }
121 
122 static inline void
123 _ixgbe_reset_tx_queue_vec(struct ci_tx_queue *txq)
124 {
125 	static const union ixgbe_adv_tx_desc zeroed_desc = { { 0 } };
126 	struct ci_tx_entry_vec *txe = txq->sw_ring_vec;
127 	uint16_t i;
128 
129 	/* Zero out HW ring memory */
130 	for (i = 0; i < txq->nb_tx_desc; i++)
131 		txq->ixgbe_tx_ring[i] = zeroed_desc;
132 
133 	/* Initialize SW ring entries */
134 	for (i = 0; i < txq->nb_tx_desc; i++) {
135 		volatile union ixgbe_adv_tx_desc *txd = &txq->ixgbe_tx_ring[i];
136 
137 		txd->wb.status = IXGBE_TXD_STAT_DD;
138 		txe[i].mbuf = NULL;
139 	}
140 
141 	txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
142 	txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
143 
144 	txq->tx_tail = 0;
145 	txq->nb_tx_used = 0;
146 	/*
147 	 * Always allow 1 descriptor to be un-allocated to avoid
148 	 * a H/W race condition
149 	 */
150 	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
151 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
152 	txq->ctx_curr = 0;
153 	memset(txq->ctx_cache, 0, IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
154 }
155 
156 static inline int
157 ixgbe_rxq_vec_setup_default(struct ixgbe_rx_queue *rxq)
158 {
159 	uintptr_t p;
160 	struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
161 
162 	mb_def.nb_segs = 1;
163 	mb_def.data_off = RTE_PKTMBUF_HEADROOM;
164 	mb_def.port = rxq->port_id;
165 	rte_mbuf_refcnt_set(&mb_def, 1);
166 
167 	/* prevent compiler reordering: rearm_data covers previous fields */
168 	rte_compiler_barrier();
169 	p = (uintptr_t)&mb_def.rearm_data;
170 	rxq->mbuf_initializer = *(uint64_t *)p;
171 	return 0;
172 }
173 
174 static inline int
175 ixgbe_txq_vec_setup_default(struct ci_tx_queue *txq,
176 			    const struct ixgbe_txq_ops *txq_ops)
177 {
178 	if (txq->sw_ring_vec == NULL)
179 		return -1;
180 
181 	/* leave the first one for overflow */
182 	txq->sw_ring_vec = txq->sw_ring_vec + 1;
183 	txq->ops = txq_ops;
184 	txq->vector_tx = 1;
185 	txq->vector_sw_ring = 1;
186 
187 	return 0;
188 }
189 
190 static inline int
191 ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
192 {
193 #ifndef RTE_LIBRTE_IEEE1588
194 	struct rte_eth_fdir_conf *fconf = IXGBE_DEV_FDIR_CONF(dev);
195 
196 	/* no fdir support */
197 	if (fconf->mode != RTE_FDIR_MODE_NONE)
198 		return -1;
199 
200 	return 0;
201 #else
202 	RTE_SET_USED(dev);
203 	return -1;
204 #endif
205 }
206 #endif
207