xref: /dpdk/drivers/net/hns3/hns3_rxtx_vec_sve.c (revision f1ad6decfbd44c3dc2d73dcda3fa8fb37b140186)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020-2021 HiSilicon Limited.
3  */
4 
5 #include <arm_sve.h>
6 #include <rte_io.h>
7 #include <ethdev_driver.h>
8 
9 #include "hns3_ethdev.h"
10 #include "hns3_rxtx.h"
11 #include "hns3_rxtx_vec.h"
12 
13 #define PG16_128BIT		svwhilelt_b16(0, 8)
14 #define PG16_256BIT		svwhilelt_b16(0, 16)
15 #define PG32_256BIT		svwhilelt_b32(0, 8)
16 #define PG64_64BIT		svwhilelt_b64(0, 1)
17 #define PG64_128BIT		svwhilelt_b64(0, 2)
18 #define PG64_256BIT		svwhilelt_b64(0, 4)
19 #define PG64_ALLBIT		svptrue_b64()
20 
21 #define BD_SIZE			32
22 #define BD_FIELD_ADDR_OFFSET	0
23 #define BD_FIELD_VALID_OFFSET	28
24 
25 static inline uint32_t
hns3_desc_parse_field_sve(struct hns3_rx_queue * rxq,struct rte_mbuf ** rx_pkts,struct hns3_desc * rxdp,uint32_t bd_vld_num)26 hns3_desc_parse_field_sve(struct hns3_rx_queue *rxq,
27 			  struct rte_mbuf **rx_pkts,
28 			  struct hns3_desc *rxdp,
29 			  uint32_t   bd_vld_num)
30 {
31 	uint32_t l234_info, ol_info, bd_base_info;
32 	uint32_t retcode = 0;
33 	int ret, i;
34 
35 	for (i = 0; i < (int)bd_vld_num; i++) {
36 		/* init rte_mbuf.rearm_data last 64-bit */
37 		rx_pkts[i]->ol_flags = RTE_MBUF_F_RX_RSS_HASH;
38 		rx_pkts[i]->hash.rss = rxdp[i].rx.rss_hash;
39 		rx_pkts[i]->pkt_len = rte_le_to_cpu_16(rxdp[i].rx.pkt_len) -
40 					rxq->crc_len;
41 		rx_pkts[i]->data_len = rx_pkts[i]->pkt_len;
42 
43 		l234_info = rxdp[i].rx.l234_info;
44 		ol_info = rxdp[i].rx.ol_info;
45 		bd_base_info = rxdp[i].rx.bd_base_info;
46 		ret = hns3_handle_bdinfo(rxq, rx_pkts[i], bd_base_info, l234_info);
47 		if (unlikely(ret)) {
48 			retcode |= 1u << i;
49 			continue;
50 		}
51 
52 		rx_pkts[i]->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info);
53 
54 		/* Increment bytes counter */
55 		rxq->basic_stats.bytes += rx_pkts[i]->pkt_len;
56 	}
57 
58 	return retcode;
59 }
60 
61 static inline void
hns3_rx_prefetch_mbuf_sve(struct hns3_entry * sw_ring)62 hns3_rx_prefetch_mbuf_sve(struct hns3_entry *sw_ring)
63 {
64 	svuint64_t prf1st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[0]);
65 	svuint64_t prf2st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[4]);
66 	svprfd_gather_u64base(PG64_256BIT, prf1st, SV_PLDL1KEEP);
67 	svprfd_gather_u64base(PG64_256BIT, prf2st, SV_PLDL1KEEP);
68 }
69 
70 static inline uint16_t
hns3_recv_burst_vec_sve(struct hns3_rx_queue * __restrict rxq,struct rte_mbuf ** __restrict rx_pkts,uint16_t nb_pkts,uint64_t * bd_err_mask)71 hns3_recv_burst_vec_sve(struct hns3_rx_queue *__restrict rxq,
72 			struct rte_mbuf **__restrict rx_pkts,
73 			uint16_t nb_pkts,
74 			uint64_t *bd_err_mask)
75 {
76 	uint16_t rx_id = rxq->next_to_use;
77 	struct hns3_entry *sw_ring = &rxq->sw_ring[rx_id];
78 	struct hns3_desc *rxdp = &rxq->rx_ring[rx_id];
79 	struct hns3_desc *rxdp2, *next_rxdp;
80 	uint64_t bd_valid_num;
81 	uint32_t parse_retcode;
82 	uint16_t nb_rx = 0;
83 	int pos, offset;
84 
85 	svbool_t pg32 = svwhilelt_b32(0, HNS3_SVE_DEFAULT_DESCS_PER_LOOP);
86 
87 	/* compile-time verifies the xlen_adjust mask */
88 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
89 			 offsetof(struct rte_mbuf, pkt_len) + 4);
90 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
91 			 offsetof(struct rte_mbuf, data_len) + 2);
92 
93 	for (pos = 0; pos < nb_pkts; pos += HNS3_SVE_DEFAULT_DESCS_PER_LOOP,
94 				     rxdp += HNS3_SVE_DEFAULT_DESCS_PER_LOOP) {
95 		svuint64_t mbp1st, mbp2st, mbuf_init;
96 		svuint32_t vld;
97 		svbool_t vld_op;
98 
99 		/* calc how many bd valid: part 1 */
100 		vld = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp,
101 			svindex_u32(BD_FIELD_VALID_OFFSET, BD_SIZE));
102 		vld = svand_n_u32_z(pg32, vld, BIT(HNS3_RXD_VLD_B));
103 		vld_op = svcmpne_n_u32(pg32, vld, BIT(HNS3_RXD_VLD_B));
104 		bd_valid_num = svcntp_b32(pg32, svbrkb_b_z(pg32, vld_op));
105 		if (bd_valid_num == 0)
106 			break;
107 
108 		/* load 4 mbuf pointer */
109 		mbp1st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[pos]);
110 		/* load 4 more mbuf pointer */
111 		mbp2st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[pos + 4]);
112 
113 		/* use offset to control below data load oper ordering */
114 		offset = rxq->offset_table[bd_valid_num];
115 		rxdp2 = rxdp + offset;
116 
117 		/* store 4 mbuf pointer into rx_pkts */
118 		svst1_u64(PG64_256BIT, (uint64_t *)&rx_pkts[pos], mbp1st);
119 		/* store 4 mbuf pointer into rx_pkts again */
120 		svst1_u64(PG64_256BIT, (uint64_t *)&rx_pkts[pos + 4], mbp2st);
121 
122 		/* init mbuf_initializer */
123 		mbuf_init = svdup_n_u64(rxq->mbuf_initializer);
124 		/* save mbuf_initializer */
125 		svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp1st,
126 			offsetof(struct rte_mbuf, rearm_data), mbuf_init);
127 		svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp2st,
128 			offsetof(struct rte_mbuf, rearm_data), mbuf_init);
129 
130 		next_rxdp = rxdp + HNS3_SVE_DEFAULT_DESCS_PER_LOOP;
131 		rte_prefetch_non_temporal(next_rxdp);
132 		rte_prefetch_non_temporal(next_rxdp + 2);
133 		rte_prefetch_non_temporal(next_rxdp + 4);
134 		rte_prefetch_non_temporal(next_rxdp + 6);
135 
136 		parse_retcode = hns3_desc_parse_field_sve(rxq, &rx_pkts[pos],
137 					&rxdp2[offset], bd_valid_num);
138 		if (unlikely(parse_retcode))
139 			(*bd_err_mask) |= ((uint64_t)parse_retcode) << pos;
140 
141 		hns3_rx_prefetch_mbuf_sve(&sw_ring[pos +
142 					HNS3_SVE_DEFAULT_DESCS_PER_LOOP]);
143 
144 		nb_rx += bd_valid_num;
145 		if (unlikely(bd_valid_num < HNS3_SVE_DEFAULT_DESCS_PER_LOOP))
146 			break;
147 	}
148 
149 	rxq->rx_rearm_nb += nb_rx;
150 	rxq->next_to_use += nb_rx;
151 	if (rxq->next_to_use >= rxq->nb_rx_desc)
152 		rxq->next_to_use = 0;
153 
154 	return nb_rx;
155 }
156 
157 uint16_t
hns3_recv_pkts_vec_sve(void * __restrict rx_queue,struct rte_mbuf ** __restrict rx_pkts,uint16_t nb_pkts)158 hns3_recv_pkts_vec_sve(void *__restrict rx_queue,
159 		       struct rte_mbuf **__restrict rx_pkts,
160 		       uint16_t nb_pkts)
161 {
162 	struct hns3_rx_queue *rxq = rx_queue;
163 	struct hns3_desc *rxdp = &rxq->rx_ring[rxq->next_to_use];
164 	uint64_t pkt_err_mask;  /* bit mask indicate whick pkts is error */
165 	uint16_t nb_rx;
166 
167 	rte_prefetch_non_temporal(rxdp);
168 
169 	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, HNS3_SVE_DEFAULT_DESCS_PER_LOOP);
170 
171 	if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH)
172 		hns3_rxq_rearm_mbuf(rxq);
173 
174 	if (unlikely(!(rxdp->rx.bd_base_info &
175 			rte_cpu_to_le_32(1u << HNS3_RXD_VLD_B))))
176 		return 0;
177 
178 	hns3_rx_prefetch_mbuf_sve(&rxq->sw_ring[rxq->next_to_use]);
179 
180 	if (likely(nb_pkts <= HNS3_DEFAULT_RX_BURST)) {
181 		pkt_err_mask = 0;
182 		nb_rx = hns3_recv_burst_vec_sve(rxq, rx_pkts, nb_pkts,
183 						&pkt_err_mask);
184 		nb_rx = hns3_rx_reassemble_pkts(rx_pkts, nb_rx, pkt_err_mask);
185 		return nb_rx;
186 	}
187 
188 	nb_rx = 0;
189 	while (nb_pkts > 0) {
190 		uint16_t ret, n;
191 
192 		n = RTE_MIN(nb_pkts, HNS3_DEFAULT_RX_BURST);
193 		pkt_err_mask = 0;
194 		ret = hns3_recv_burst_vec_sve(rxq, &rx_pkts[nb_rx], n,
195 					      &pkt_err_mask);
196 		nb_pkts -= ret;
197 		nb_rx += hns3_rx_reassemble_pkts(&rx_pkts[nb_rx], ret,
198 						 pkt_err_mask);
199 		if (ret < n)
200 			break;
201 
202 		if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH)
203 			hns3_rxq_rearm_mbuf(rxq);
204 	}
205 
206 	return nb_rx;
207 }
208 
209 static inline void
hns3_tx_fill_hw_ring_sve(struct hns3_tx_queue * txq,struct rte_mbuf ** pkts,uint16_t nb_pkts)210 hns3_tx_fill_hw_ring_sve(struct hns3_tx_queue *txq,
211 			 struct rte_mbuf **pkts,
212 			 uint16_t nb_pkts)
213 {
214 #define DATA_OFF_LEN_VAL_MASK	0xFFFF
215 	struct hns3_desc *txdp = &txq->tx_ring[txq->next_to_use];
216 	struct hns3_entry *tx_entry = &txq->sw_ring[txq->next_to_use];
217 	const uint64_t valid_bit = (BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B)) <<
218 				   HNS3_UINT32_BIT;
219 	svuint64_t base_addr, buf_iova, data_off, data_len, addr;
220 	svuint64_t offsets = svindex_u64(0, BD_SIZE);
221 	uint32_t cnt = svcntd();
222 	svbool_t pg;
223 	uint32_t i;
224 
225 	for (i = 0; i < nb_pkts; /* i is updated in the inner loop */) {
226 		pg = svwhilelt_b64_u32(i, nb_pkts);
227 		base_addr = svld1_u64(pg, (uint64_t *)pkts);
228 		/* calc mbuf's field buf_iova address */
229 #if RTE_IOVA_IN_MBUF
230 		buf_iova = svadd_n_u64_z(pg, base_addr,
231 					 offsetof(struct rte_mbuf, buf_iova));
232 #else
233 		buf_iova = svadd_n_u64_z(pg, base_addr,
234 					 offsetof(struct rte_mbuf, buf_addr));
235 #endif
236 		/* calc mbuf's field data_off address */
237 		data_off = svadd_n_u64_z(pg, base_addr,
238 					 offsetof(struct rte_mbuf, data_off));
239 		/* calc mbuf's field data_len address */
240 		data_len = svadd_n_u64_z(pg, base_addr,
241 					 offsetof(struct rte_mbuf, data_len));
242 		/* store mbuf to tx_entry */
243 		svst1_u64(pg, (uint64_t *)tx_entry, base_addr);
244 		/* read pkts->buf_iova */
245 		buf_iova = svld1_gather_u64base_u64(pg, buf_iova);
246 		/* read pkts->data_off's 64bit val  */
247 		data_off = svld1_gather_u64base_u64(pg, data_off);
248 		/* read pkts->data_len's 64bit val */
249 		data_len = svld1_gather_u64base_u64(pg, data_len);
250 		/* zero data_off high 48bit by svand ops */
251 		data_off = svand_n_u64_z(pg, data_off, DATA_OFF_LEN_VAL_MASK);
252 		/* zero data_len high 48bit by svand ops */
253 		data_len = svand_n_u64_z(pg, data_len, DATA_OFF_LEN_VAL_MASK);
254 		/* calc mbuf data region iova addr */
255 		addr = svadd_u64_z(pg, buf_iova, data_off);
256 		/* shift due data_len's offset is 2byte of BD's second 8byte */
257 		data_len = svlsl_n_u64_z(pg, data_len, HNS3_UINT16_BIT);
258 		/* save offset 0~7byte of every BD */
259 		svst1_scatter_u64offset_u64(pg, (uint64_t *)&txdp->addr,
260 					    offsets, addr);
261 		/* save offset 8~15byte of every BD */
262 		svst1_scatter_u64offset_u64(pg, (uint64_t *)&txdp->tx.vlan_tag,
263 					    offsets, data_len);
264 		/* save offset 16~23byte of every BD */
265 		svst1_scatter_u64offset_u64(pg,
266 				(uint64_t *)&txdp->tx.outer_vlan_tag,
267 				offsets, svdup_n_u64(0));
268 		/* save offset 24~31byte of every BD */
269 		svst1_scatter_u64offset_u64(pg,
270 				(uint64_t *)&txdp->tx.paylen_fd_dop_ol4cs,
271 				offsets, svdup_n_u64(valid_bit));
272 
273 		/* Increment bytes counter */
274 		txq->basic_stats.bytes +=
275 			(svaddv_u64(pg, data_len) >> HNS3_UINT16_BIT);
276 
277 		/* update index for next loop */
278 		i += cnt;
279 		pkts += cnt;
280 		txdp += cnt;
281 		tx_entry += cnt;
282 	}
283 }
284 
285 static uint16_t
hns3_xmit_fixed_burst_vec_sve(void * __restrict tx_queue,struct rte_mbuf ** __restrict tx_pkts,uint16_t nb_pkts)286 hns3_xmit_fixed_burst_vec_sve(void *__restrict tx_queue,
287 			      struct rte_mbuf **__restrict tx_pkts,
288 			      uint16_t nb_pkts)
289 {
290 	struct hns3_tx_queue *txq = (struct hns3_tx_queue *)tx_queue;
291 	uint16_t nb_tx = 0;
292 
293 	if (txq->tx_bd_ready < txq->tx_free_thresh)
294 		hns3_tx_free_buffers(txq);
295 
296 	nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts);
297 	if (unlikely(nb_pkts == 0)) {
298 		txq->dfx_stats.queue_full_cnt++;
299 		return 0;
300 	}
301 
302 	if (txq->next_to_use + nb_pkts >= txq->nb_tx_desc) {
303 		nb_tx = txq->nb_tx_desc - txq->next_to_use;
304 		hns3_tx_fill_hw_ring_sve(txq, tx_pkts, nb_tx);
305 		txq->next_to_use = 0;
306 	}
307 
308 	if (nb_pkts > nb_tx) {
309 		hns3_tx_fill_hw_ring_sve(txq, tx_pkts + nb_tx, nb_pkts - nb_tx);
310 		txq->next_to_use += nb_pkts - nb_tx;
311 	}
312 
313 	txq->tx_bd_ready -= nb_pkts;
314 	hns3_write_txq_tail_reg(txq, nb_pkts);
315 
316 	return nb_pkts;
317 }
318 
319 uint16_t
hns3_xmit_pkts_vec_sve(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)320 hns3_xmit_pkts_vec_sve(void *tx_queue,
321 		       struct rte_mbuf **tx_pkts,
322 		       uint16_t nb_pkts)
323 {
324 	struct hns3_tx_queue *txq = (struct hns3_tx_queue *)tx_queue;
325 	uint16_t ret, new_burst;
326 	uint16_t nb_tx = 0;
327 
328 	while (nb_pkts) {
329 		new_burst = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
330 		ret = hns3_xmit_fixed_burst_vec_sve(tx_queue, &tx_pkts[nb_tx],
331 						    new_burst);
332 		nb_tx += ret;
333 		nb_pkts -= ret;
334 		if (ret < new_burst)
335 			break;
336 	}
337 
338 	return nb_tx;
339 }
340