xref: /dpdk/drivers/net/intel/i40e/i40e_rxtx_vec_common.h (revision 9eb60580d155f5e3a36927dbb1e59ef9623231ce)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4 
5 #ifndef _I40E_RXTX_VEC_COMMON_H_
6 #define _I40E_RXTX_VEC_COMMON_H_
7 #include <stdint.h>
8 #include <ethdev_driver.h>
9 #include <rte_malloc.h>
10 
11 #include "../common/rx.h"
12 #include "i40e_ethdev.h"
13 #include "i40e_rxtx.h"
14 
15 static inline uint16_t
16 reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
17 		   uint16_t nb_bufs, uint8_t *split_flags)
18 {
19 	struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/
20 	struct rte_mbuf *start = rxq->pkt_first_seg;
21 	struct rte_mbuf *end =  rxq->pkt_last_seg;
22 	unsigned pkt_idx, buf_idx;
23 
24 	for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
25 		if (end != NULL) {
26 			/* processing a split packet */
27 			end->next = rx_bufs[buf_idx];
28 			rx_bufs[buf_idx]->data_len += rxq->crc_len;
29 
30 			start->nb_segs++;
31 			start->pkt_len += rx_bufs[buf_idx]->data_len;
32 			end = end->next;
33 
34 			if (!split_flags[buf_idx]) {
35 				/* it's the last packet of the set */
36 				start->hash = end->hash;
37 				start->vlan_tci = end->vlan_tci;
38 				start->ol_flags = end->ol_flags;
39 				/* we need to strip crc for the whole packet */
40 				start->pkt_len -= rxq->crc_len;
41 				if (end->data_len > rxq->crc_len)
42 					end->data_len -= rxq->crc_len;
43 				else {
44 					/* free up last mbuf */
45 					struct rte_mbuf *secondlast = start;
46 
47 					start->nb_segs--;
48 					while (secondlast->next != end)
49 						secondlast = secondlast->next;
50 					secondlast->data_len -= (rxq->crc_len -
51 							end->data_len);
52 					secondlast->next = NULL;
53 					rte_pktmbuf_free_seg(end);
54 				}
55 				pkts[pkt_idx++] = start;
56 				start = end = NULL;
57 			}
58 		} else {
59 			/* not processing a split packet */
60 			if (!split_flags[buf_idx]) {
61 				/* not a split packet, save and skip */
62 				pkts[pkt_idx++] = rx_bufs[buf_idx];
63 				continue;
64 			}
65 			end = start = rx_bufs[buf_idx];
66 			rx_bufs[buf_idx]->data_len += rxq->crc_len;
67 			rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
68 		}
69 	}
70 
71 	/* save the partial packet for next time */
72 	rxq->pkt_first_seg = start;
73 	rxq->pkt_last_seg = end;
74 	memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
75 	return pkt_idx;
76 }
77 
78 static inline int
79 i40e_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx)
80 {
81 	return (txq->i40e_tx_ring[idx].cmd_type_offset_bsz &
82 			rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
83 				rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE);
84 }
85 
86 static inline void
87 _i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
88 {
89 	const unsigned mask = rxq->nb_rx_desc - 1;
90 	unsigned i;
91 
92 	if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
93 		return;
94 
95 	/* free all mbufs that are valid in the ring */
96 	if (rxq->rxrearm_nb == 0) {
97 		for (i = 0; i < rxq->nb_rx_desc; i++) {
98 			if (rxq->sw_ring[i].mbuf != NULL)
99 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
100 		}
101 	} else {
102 		for (i = rxq->rx_tail;
103 		     i != rxq->rxrearm_start;
104 		     i = (i + 1) & mask) {
105 			if (rxq->sw_ring[i].mbuf != NULL)
106 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
107 		}
108 	}
109 
110 	rxq->rxrearm_nb = rxq->nb_rx_desc;
111 
112 	/* set all entries to NULL */
113 	memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
114 }
115 
116 static inline int
117 i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
118 {
119 #ifndef RTE_LIBRTE_IEEE1588
120 	struct i40e_adapter *ad =
121 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
122 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
123 
124 	/* no QinQ support */
125 	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
126 		return -1;
127 
128 	/**
129 	 * Vector mode is allowed only when number of Rx queue
130 	 * descriptor is power of 2.
131 	 */
132 	ad->rx_vec_allowed = true;
133 	for (uint16_t i = 0; i < dev->data->nb_rx_queues; i++) {
134 		struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
135 		if (!rxq)
136 			continue;
137 		if (!ci_rxq_vec_capable(rxq->nb_rx_desc, rxq->rx_free_thresh, rxq->offloads)) {
138 			ad->rx_vec_allowed = false;
139 			break;
140 		}
141 	}
142 
143 	return 0;
144 #else
145 	RTE_SET_USED(dev);
146 	return -1;
147 #endif
148 }
149 
150 #endif
151