xref: /dpdk/drivers/net/intel/ice/ice_rxtx_vec_common.h (revision 61dcf278a0958542414603afb21a3d3badd49380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4 
5 #ifndef _ICE_RXTX_VEC_COMMON_H_
6 #define _ICE_RXTX_VEC_COMMON_H_
7 
8 #include "../common/rx.h"
9 #include "ice_rxtx.h"
10 
11 static inline uint16_t
12 ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs,
13 			  uint16_t nb_bufs, uint8_t *split_flags)
14 {
15 	struct rte_mbuf *pkts[ICE_VPMD_RX_BURST] = {0}; /*finished pkts*/
16 	struct rte_mbuf *start = rxq->pkt_first_seg;
17 	struct rte_mbuf *end =  rxq->pkt_last_seg;
18 	unsigned int pkt_idx, buf_idx;
19 
20 	for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
21 		if (end) {
22 			/* processing a split packet */
23 			end->next = rx_bufs[buf_idx];
24 			rx_bufs[buf_idx]->data_len += rxq->crc_len;
25 
26 			start->nb_segs++;
27 			start->pkt_len += rx_bufs[buf_idx]->data_len;
28 			end = end->next;
29 
30 			if (!split_flags[buf_idx]) {
31 				/* it's the last packet of the set */
32 				start->hash = end->hash;
33 				start->vlan_tci = end->vlan_tci;
34 				start->ol_flags = end->ol_flags;
35 				/* we need to strip crc for the whole packet */
36 				start->pkt_len -= rxq->crc_len;
37 				if (end->data_len > rxq->crc_len) {
38 					end->data_len -= rxq->crc_len;
39 				} else {
40 					/* free up last mbuf */
41 					struct rte_mbuf *secondlast = start;
42 
43 					start->nb_segs--;
44 					while (secondlast->next != end)
45 						secondlast = secondlast->next;
46 					secondlast->data_len -= (rxq->crc_len -
47 							end->data_len);
48 					secondlast->next = NULL;
49 					rte_pktmbuf_free_seg(end);
50 				}
51 				pkts[pkt_idx++] = start;
52 				start = NULL;
53 				end = NULL;
54 			}
55 		} else {
56 			/* not processing a split packet */
57 			if (!split_flags[buf_idx]) {
58 				/* not a split packet, save and skip */
59 				pkts[pkt_idx++] = rx_bufs[buf_idx];
60 				continue;
61 			}
62 			start = rx_bufs[buf_idx];
63 			end = start;
64 			rx_bufs[buf_idx]->data_len += rxq->crc_len;
65 			rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
66 		}
67 	}
68 
69 	/* save the partial packet for next time */
70 	rxq->pkt_first_seg = start;
71 	rxq->pkt_last_seg = end;
72 	memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
73 	return pkt_idx;
74 }
75 
76 static inline int
77 ice_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx)
78 {
79 	return (txq->ice_tx_ring[idx].cmd_type_offset_bsz &
80 			rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
81 				rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
82 }
83 
84 static inline void
85 _ice_rx_queue_release_mbufs_vec(struct ice_rx_queue *rxq)
86 {
87 	const unsigned int mask = rxq->nb_rx_desc - 1;
88 	unsigned int i;
89 
90 	if (unlikely(!rxq->sw_ring)) {
91 		PMD_DRV_LOG(DEBUG, "sw_ring is NULL");
92 		return;
93 	}
94 
95 	if (rxq->rxrearm_nb >= rxq->nb_rx_desc)
96 		return;
97 
98 	/* free all mbufs that are valid in the ring */
99 	if (rxq->rxrearm_nb == 0) {
100 		for (i = 0; i < rxq->nb_rx_desc; i++) {
101 			if (rxq->sw_ring[i].mbuf)
102 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
103 		}
104 	} else {
105 		for (i = rxq->rx_tail;
106 		     i != rxq->rxrearm_start;
107 		     i = (i + 1) & mask) {
108 			if (rxq->sw_ring[i].mbuf)
109 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
110 		}
111 	}
112 
113 	rxq->rxrearm_nb = rxq->nb_rx_desc;
114 
115 	/* set all entries to NULL */
116 	memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
117 }
118 
119 #define ICE_TX_NO_VECTOR_FLAGS (			\
120 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		\
121 		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
122 		RTE_ETH_TX_OFFLOAD_TCP_TSO |	\
123 		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |    \
124 		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |    \
125 		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |    \
126 		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |    \
127 		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
128 
129 #define ICE_TX_VECTOR_OFFLOAD (				\
130 		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		\
131 		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |		\
132 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |		\
133 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |		\
134 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |		\
135 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
136 
137 #define ICE_RX_VECTOR_OFFLOAD (				\
138 		RTE_ETH_RX_OFFLOAD_CHECKSUM |		\
139 		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		\
140 		RTE_ETH_RX_OFFLOAD_VLAN |			\
141 		RTE_ETH_RX_OFFLOAD_RSS_HASH)
142 
143 #define ICE_VECTOR_PATH		0
144 #define ICE_VECTOR_OFFLOAD_PATH	1
145 
146 static inline int
147 ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
148 {
149 	if (!rxq)
150 		return -1;
151 
152 	if (!rte_is_power_of_2(rxq->nb_rx_desc))
153 		return -1;
154 
155 	if (rxq->rx_free_thresh < ICE_VPMD_RX_BURST)
156 		return -1;
157 
158 	if (rxq->nb_rx_desc % rxq->rx_free_thresh)
159 		return -1;
160 
161 	if (rxq->proto_xtr != PROTO_XTR_NONE)
162 		return -1;
163 
164 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
165 		return -1;
166 
167 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)
168 		return -1;
169 
170 	if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)
171 		return ICE_VECTOR_OFFLOAD_PATH;
172 
173 	return ICE_VECTOR_PATH;
174 }
175 
176 static inline int
177 ice_tx_vec_queue_default(struct ci_tx_queue *txq)
178 {
179 	if (!txq)
180 		return -1;
181 
182 	if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST ||
183 	    txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ)
184 		return -1;
185 
186 	if (txq->offloads & ICE_TX_NO_VECTOR_FLAGS)
187 		return -1;
188 
189 	if (txq->offloads & ICE_TX_VECTOR_OFFLOAD)
190 		return ICE_VECTOR_OFFLOAD_PATH;
191 
192 	return ICE_VECTOR_PATH;
193 }
194 
195 static inline int
196 ice_rx_vec_dev_check_default(struct rte_eth_dev *dev)
197 {
198 	int i;
199 	struct ice_rx_queue *rxq;
200 	int ret = 0;
201 	int result = 0;
202 
203 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
204 		rxq = dev->data->rx_queues[i];
205 		ret = (ice_rx_vec_queue_default(rxq));
206 		if (ret < 0)
207 			return -1;
208 		if (ret == ICE_VECTOR_OFFLOAD_PATH)
209 			result = ret;
210 	}
211 
212 	return result;
213 }
214 
215 static inline int
216 ice_tx_vec_dev_check_default(struct rte_eth_dev *dev)
217 {
218 	int i;
219 	struct ci_tx_queue *txq;
220 	int ret = 0;
221 	int result = 0;
222 
223 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
224 		txq = dev->data->tx_queues[i];
225 		ret = ice_tx_vec_queue_default(txq);
226 		if (ret < 0)
227 			return -1;
228 		if (ret == ICE_VECTOR_OFFLOAD_PATH)
229 			result = ret;
230 	}
231 
232 	return result;
233 }
234 
235 static inline void
236 ice_txd_enable_offload(struct rte_mbuf *tx_pkt,
237 		       uint64_t *txd_hi)
238 {
239 	uint64_t ol_flags = tx_pkt->ol_flags;
240 	uint32_t td_cmd = 0;
241 	uint32_t td_offset = 0;
242 
243 	/* Tx Checksum Offload */
244 	/* SET MACLEN */
245 	td_offset |= (tx_pkt->l2_len >> 1) <<
246 		ICE_TX_DESC_LEN_MACLEN_S;
247 
248 	/* Enable L3 checksum offload */
249 	if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
250 		td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
251 		td_offset |= (tx_pkt->l3_len >> 2) <<
252 			ICE_TX_DESC_LEN_IPLEN_S;
253 	} else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
254 		td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
255 		td_offset |= (tx_pkt->l3_len >> 2) <<
256 			ICE_TX_DESC_LEN_IPLEN_S;
257 	} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
258 		td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
259 		td_offset |= (tx_pkt->l3_len >> 2) <<
260 			ICE_TX_DESC_LEN_IPLEN_S;
261 	}
262 
263 	/* Enable L4 checksum offloads */
264 	switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
265 	case RTE_MBUF_F_TX_TCP_CKSUM:
266 		td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
267 		td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
268 			ICE_TX_DESC_LEN_L4_LEN_S;
269 		break;
270 	case RTE_MBUF_F_TX_SCTP_CKSUM:
271 		td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
272 		td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
273 			ICE_TX_DESC_LEN_L4_LEN_S;
274 		break;
275 	case RTE_MBUF_F_TX_UDP_CKSUM:
276 		td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
277 		td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
278 			ICE_TX_DESC_LEN_L4_LEN_S;
279 		break;
280 	default:
281 		break;
282 	}
283 
284 	*txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S;
285 
286 	/* Tx VLAN/QINQ insertion Offload */
287 	if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
288 		td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
289 		*txd_hi |= ((uint64_t)tx_pkt->vlan_tci <<
290 				ICE_TXD_QW1_L2TAG1_S);
291 	}
292 
293 	*txd_hi |= ((uint64_t)td_cmd) << ICE_TXD_QW1_CMD_S;
294 }
295 #endif
296