xref: /dpdk/drivers/net/intel/ice/ice_rxtx_vec_common.h (revision 7662502d4c0344059903be75e9afa0ffe26865b3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4 
5 #ifndef _ICE_RXTX_VEC_COMMON_H_
6 #define _ICE_RXTX_VEC_COMMON_H_
7 
8 #include "../common/rx.h"
9 #include "ice_rxtx.h"
10 
11 static inline uint16_t
12 ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs,
13 			  uint16_t nb_bufs, uint8_t *split_flags)
14 {
15 	struct rte_mbuf *pkts[ICE_VPMD_RX_BURST] = {0}; /*finished pkts*/
16 	struct rte_mbuf *start = rxq->pkt_first_seg;
17 	struct rte_mbuf *end =  rxq->pkt_last_seg;
18 	unsigned int pkt_idx, buf_idx;
19 
20 	for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
21 		if (end) {
22 			/* processing a split packet */
23 			end->next = rx_bufs[buf_idx];
24 			rx_bufs[buf_idx]->data_len += rxq->crc_len;
25 
26 			start->nb_segs++;
27 			start->pkt_len += rx_bufs[buf_idx]->data_len;
28 			end = end->next;
29 
30 			if (!split_flags[buf_idx]) {
31 				/* it's the last packet of the set */
32 				start->hash = end->hash;
33 				start->vlan_tci = end->vlan_tci;
34 				start->ol_flags = end->ol_flags;
35 				/* we need to strip crc for the whole packet */
36 				start->pkt_len -= rxq->crc_len;
37 				if (end->data_len > rxq->crc_len) {
38 					end->data_len -= rxq->crc_len;
39 				} else {
40 					/* free up last mbuf */
41 					struct rte_mbuf *secondlast = start;
42 
43 					start->nb_segs--;
44 					while (secondlast->next != end)
45 						secondlast = secondlast->next;
46 					secondlast->data_len -= (rxq->crc_len -
47 							end->data_len);
48 					secondlast->next = NULL;
49 					rte_pktmbuf_free_seg(end);
50 				}
51 				pkts[pkt_idx++] = start;
52 				start = NULL;
53 				end = NULL;
54 			}
55 		} else {
56 			/* not processing a split packet */
57 			if (!split_flags[buf_idx]) {
58 				/* not a split packet, save and skip */
59 				pkts[pkt_idx++] = rx_bufs[buf_idx];
60 				continue;
61 			}
62 			start = rx_bufs[buf_idx];
63 			end = start;
64 			rx_bufs[buf_idx]->data_len += rxq->crc_len;
65 			rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
66 		}
67 	}
68 
69 	/* save the partial packet for next time */
70 	rxq->pkt_first_seg = start;
71 	rxq->pkt_last_seg = end;
72 	memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
73 	return pkt_idx;
74 }
75 
76 static inline int
77 ice_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx)
78 {
79 	return (txq->ice_tx_ring[idx].cmd_type_offset_bsz &
80 			rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
81 				rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
82 }
83 
84 static inline void
85 _ice_rx_queue_release_mbufs_vec(struct ice_rx_queue *rxq)
86 {
87 	const unsigned int mask = rxq->nb_rx_desc - 1;
88 	unsigned int i;
89 
90 	if (unlikely(!rxq->sw_ring)) {
91 		PMD_DRV_LOG(DEBUG, "sw_ring is NULL");
92 		return;
93 	}
94 
95 	if (rxq->rxrearm_nb >= rxq->nb_rx_desc)
96 		return;
97 
98 	/* free all mbufs that are valid in the ring */
99 	if (rxq->rxrearm_nb == 0) {
100 		for (i = 0; i < rxq->nb_rx_desc; i++) {
101 			if (rxq->sw_ring[i].mbuf)
102 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
103 		}
104 	} else {
105 		for (i = rxq->rx_tail;
106 		     i != rxq->rxrearm_start;
107 		     i = (i + 1) & mask) {
108 			if (rxq->sw_ring[i].mbuf)
109 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
110 		}
111 	}
112 
113 	rxq->rxrearm_nb = rxq->nb_rx_desc;
114 
115 	/* set all entries to NULL */
116 	memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
117 }
118 
119 static inline int
120 ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
121 {
122 	uintptr_t p;
123 	struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
124 
125 	mb_def.nb_segs = 1;
126 	mb_def.data_off = RTE_PKTMBUF_HEADROOM;
127 	mb_def.port = rxq->port_id;
128 	rte_mbuf_refcnt_set(&mb_def, 1);
129 
130 	/* prevent compiler reordering: rearm_data covers previous fields */
131 	rte_compiler_barrier();
132 	p = (uintptr_t)&mb_def.rearm_data;
133 	rxq->mbuf_initializer = *(uint64_t *)p;
134 	return 0;
135 }
136 
137 #define ICE_TX_NO_VECTOR_FLAGS (			\
138 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		\
139 		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
140 		RTE_ETH_TX_OFFLOAD_TCP_TSO |	\
141 		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |    \
142 		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |    \
143 		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |    \
144 		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |    \
145 		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
146 
147 #define ICE_TX_VECTOR_OFFLOAD (				\
148 		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		\
149 		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |		\
150 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |		\
151 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |		\
152 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |		\
153 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
154 
155 #define ICE_RX_VECTOR_OFFLOAD (				\
156 		RTE_ETH_RX_OFFLOAD_CHECKSUM |		\
157 		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		\
158 		RTE_ETH_RX_OFFLOAD_VLAN |			\
159 		RTE_ETH_RX_OFFLOAD_RSS_HASH)
160 
161 #define ICE_VECTOR_PATH		0
162 #define ICE_VECTOR_OFFLOAD_PATH	1
163 
164 static inline int
165 ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
166 {
167 	if (!rxq)
168 		return -1;
169 
170 	if (!rte_is_power_of_2(rxq->nb_rx_desc))
171 		return -1;
172 
173 	if (rxq->rx_free_thresh < ICE_VPMD_RX_BURST)
174 		return -1;
175 
176 	if (rxq->nb_rx_desc % rxq->rx_free_thresh)
177 		return -1;
178 
179 	if (rxq->proto_xtr != PROTO_XTR_NONE)
180 		return -1;
181 
182 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
183 		return -1;
184 
185 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)
186 		return -1;
187 
188 	if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)
189 		return ICE_VECTOR_OFFLOAD_PATH;
190 
191 	return ICE_VECTOR_PATH;
192 }
193 
194 static inline int
195 ice_tx_vec_queue_default(struct ci_tx_queue *txq)
196 {
197 	if (!txq)
198 		return -1;
199 
200 	if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST ||
201 	    txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ)
202 		return -1;
203 
204 	if (txq->offloads & ICE_TX_NO_VECTOR_FLAGS)
205 		return -1;
206 
207 	if (txq->offloads & ICE_TX_VECTOR_OFFLOAD)
208 		return ICE_VECTOR_OFFLOAD_PATH;
209 
210 	return ICE_VECTOR_PATH;
211 }
212 
213 static inline int
214 ice_rx_vec_dev_check_default(struct rte_eth_dev *dev)
215 {
216 	int i;
217 	struct ice_rx_queue *rxq;
218 	int ret = 0;
219 	int result = 0;
220 
221 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
222 		rxq = dev->data->rx_queues[i];
223 		ret = (ice_rx_vec_queue_default(rxq));
224 		if (ret < 0)
225 			return -1;
226 		if (ret == ICE_VECTOR_OFFLOAD_PATH)
227 			result = ret;
228 	}
229 
230 	return result;
231 }
232 
233 static inline int
234 ice_tx_vec_dev_check_default(struct rte_eth_dev *dev)
235 {
236 	int i;
237 	struct ci_tx_queue *txq;
238 	int ret = 0;
239 	int result = 0;
240 
241 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
242 		txq = dev->data->tx_queues[i];
243 		ret = ice_tx_vec_queue_default(txq);
244 		if (ret < 0)
245 			return -1;
246 		if (ret == ICE_VECTOR_OFFLOAD_PATH)
247 			result = ret;
248 	}
249 
250 	return result;
251 }
252 
253 static inline void
254 ice_txd_enable_offload(struct rte_mbuf *tx_pkt,
255 		       uint64_t *txd_hi)
256 {
257 	uint64_t ol_flags = tx_pkt->ol_flags;
258 	uint32_t td_cmd = 0;
259 	uint32_t td_offset = 0;
260 
261 	/* Tx Checksum Offload */
262 	/* SET MACLEN */
263 	td_offset |= (tx_pkt->l2_len >> 1) <<
264 		ICE_TX_DESC_LEN_MACLEN_S;
265 
266 	/* Enable L3 checksum offload */
267 	if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
268 		td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
269 		td_offset |= (tx_pkt->l3_len >> 2) <<
270 			ICE_TX_DESC_LEN_IPLEN_S;
271 	} else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
272 		td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
273 		td_offset |= (tx_pkt->l3_len >> 2) <<
274 			ICE_TX_DESC_LEN_IPLEN_S;
275 	} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
276 		td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
277 		td_offset |= (tx_pkt->l3_len >> 2) <<
278 			ICE_TX_DESC_LEN_IPLEN_S;
279 	}
280 
281 	/* Enable L4 checksum offloads */
282 	switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
283 	case RTE_MBUF_F_TX_TCP_CKSUM:
284 		td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
285 		td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
286 			ICE_TX_DESC_LEN_L4_LEN_S;
287 		break;
288 	case RTE_MBUF_F_TX_SCTP_CKSUM:
289 		td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
290 		td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
291 			ICE_TX_DESC_LEN_L4_LEN_S;
292 		break;
293 	case RTE_MBUF_F_TX_UDP_CKSUM:
294 		td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
295 		td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
296 			ICE_TX_DESC_LEN_L4_LEN_S;
297 		break;
298 	default:
299 		break;
300 	}
301 
302 	*txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S;
303 
304 	/* Tx VLAN/QINQ insertion Offload */
305 	if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
306 		td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
307 		*txd_hi |= ((uint64_t)tx_pkt->vlan_tci <<
308 				ICE_TXD_QW1_L2TAG1_S);
309 	}
310 
311 	*txd_hi |= ((uint64_t)td_cmd) << ICE_TXD_QW1_CMD_S;
312 }
313 #endif
314