xref: /dpdk/drivers/net/hns3/hns3_rxtx_vec_sve.c (revision 8809f78c7dd9f33a44a4f89c58fc91ded34296ed)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Hisilicon Limited.
3  */
4 
5 #include <arm_sve.h>
6 #include <rte_io.h>
7 #include <rte_ethdev_driver.h>
8 
9 #include "hns3_ethdev.h"
10 #include "hns3_rxtx.h"
11 #include "hns3_rxtx_vec.h"
12 
13 #define PG16_128BIT		svwhilelt_b16(0, 8)
14 #define PG16_256BIT		svwhilelt_b16(0, 16)
15 #define PG32_256BIT		svwhilelt_b32(0, 8)
16 #define PG64_64BIT		svwhilelt_b64(0, 1)
17 #define PG64_128BIT		svwhilelt_b64(0, 2)
18 #define PG64_256BIT		svwhilelt_b64(0, 4)
19 #define PG64_ALLBIT		svptrue_b64()
20 
21 #define BD_SIZE			32
22 #define BD_FIELD_ADDR_OFFSET	0
23 #define BD_FIELD_L234_OFFSET	8
24 #define BD_FIELD_XLEN_OFFSET	12
25 #define BD_FIELD_RSS_OFFSET	16
26 #define BD_FIELD_OL_OFFSET	24
27 #define BD_FIELD_VALID_OFFSET	28
28 
29 typedef struct {
30 	uint32_t l234_info[HNS3_SVE_DEFAULT_DESCS_PER_LOOP];
31 	uint32_t ol_info[HNS3_SVE_DEFAULT_DESCS_PER_LOOP];
32 	uint32_t bd_base_info[HNS3_SVE_DEFAULT_DESCS_PER_LOOP];
33 } HNS3_SVE_KEY_FIELD_S;
34 
35 static inline uint32_t
36 hns3_desc_parse_field_sve(struct hns3_rx_queue *rxq,
37 			  struct rte_mbuf **rx_pkts,
38 			  HNS3_SVE_KEY_FIELD_S *key,
39 			  uint32_t   bd_vld_num)
40 {
41 	uint32_t retcode = 0;
42 	uint32_t cksum_err;
43 	int ret, i;
44 
45 	for (i = 0; i < (int)bd_vld_num; i++) {
46 		/* init rte_mbuf.rearm_data last 64-bit */
47 		rx_pkts[i]->ol_flags = PKT_RX_RSS_HASH;
48 
49 		ret = hns3_handle_bdinfo(rxq, rx_pkts[i], key->bd_base_info[i],
50 					 key->l234_info[i], &cksum_err);
51 		if (unlikely(ret)) {
52 			retcode |= 1u << i;
53 			continue;
54 		}
55 
56 		rx_pkts[i]->packet_type = hns3_rx_calc_ptype(rxq,
57 					key->l234_info[i], key->ol_info[i]);
58 		if (likely(key->bd_base_info[i] & BIT(HNS3_RXD_L3L4P_B)))
59 			hns3_rx_set_cksum_flag(rx_pkts[i],
60 					rx_pkts[i]->packet_type, cksum_err);
61 	}
62 
63 	return retcode;
64 }
65 
66 static inline void
67 hns3_rx_prefetch_mbuf_sve(struct hns3_entry *sw_ring)
68 {
69 	svuint64_t prf1st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[0]);
70 	svuint64_t prf2st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[4]);
71 	svprfd_gather_u64base(PG64_256BIT, prf1st, SV_PLDL1KEEP);
72 	svprfd_gather_u64base(PG64_256BIT, prf2st, SV_PLDL1KEEP);
73 }
74 
75 static inline uint16_t
76 hns3_recv_burst_vec_sve(struct hns3_rx_queue *__restrict rxq,
77 			struct rte_mbuf **__restrict rx_pkts,
78 			uint16_t nb_pkts,
79 			uint64_t *bd_err_mask)
80 {
81 #define XLEN_ADJUST_LEN		32
82 #define RSS_ADJUST_LEN		16
83 #define GEN_VLD_U8_ZIP_INDEX	svindex_s8(28, -4)
84 	uint16_t rx_id = rxq->next_to_use;
85 	struct hns3_entry *sw_ring = &rxq->sw_ring[rx_id];
86 	struct hns3_desc *rxdp = &rxq->rx_ring[rx_id];
87 	struct hns3_desc *rxdp2;
88 	HNS3_SVE_KEY_FIELD_S key_field;
89 	uint64_t bd_valid_num;
90 	uint32_t parse_retcode;
91 	uint16_t nb_rx = 0;
92 	int pos, offset;
93 
94 	uint16_t xlen_adjust[XLEN_ADJUST_LEN] = {
95 		0,  0xffff, 1,  0xffff,    /* 1st mbuf: pkt_len and dat_len */
96 		2,  0xffff, 3,  0xffff,    /* 2st mbuf: pkt_len and dat_len */
97 		4,  0xffff, 5,  0xffff,    /* 3st mbuf: pkt_len and dat_len */
98 		6,  0xffff, 7,  0xffff,    /* 4st mbuf: pkt_len and dat_len */
99 		8,  0xffff, 9,  0xffff,    /* 5st mbuf: pkt_len and dat_len */
100 		10, 0xffff, 11, 0xffff,    /* 6st mbuf: pkt_len and dat_len */
101 		12, 0xffff, 13, 0xffff,    /* 7st mbuf: pkt_len and dat_len */
102 		14, 0xffff, 15, 0xffff,    /* 8st mbuf: pkt_len and dat_len */
103 	};
104 
105 	uint32_t rss_adjust[RSS_ADJUST_LEN] = {
106 		0, 0xffff,        /* 1st mbuf: rss */
107 		1, 0xffff,        /* 2st mbuf: rss */
108 		2, 0xffff,        /* 3st mbuf: rss */
109 		3, 0xffff,        /* 4st mbuf: rss */
110 		4, 0xffff,        /* 5st mbuf: rss */
111 		5, 0xffff,        /* 6st mbuf: rss */
112 		6, 0xffff,        /* 7st mbuf: rss */
113 		7, 0xffff,        /* 8st mbuf: rss */
114 	};
115 
116 	svbool_t pg32 = svwhilelt_b32(0, HNS3_SVE_DEFAULT_DESCS_PER_LOOP);
117 	svuint16_t xlen_tbl1 = svld1_u16(PG16_256BIT, xlen_adjust);
118 	svuint16_t xlen_tbl2 = svld1_u16(PG16_256BIT, &xlen_adjust[16]);
119 	svuint32_t rss_tbl1 = svld1_u32(PG32_256BIT, rss_adjust);
120 	svuint32_t rss_tbl2 = svld1_u32(PG32_256BIT, &rss_adjust[8]);
121 
122 	for (pos = 0; pos < nb_pkts; pos += HNS3_SVE_DEFAULT_DESCS_PER_LOOP,
123 				     rxdp += HNS3_SVE_DEFAULT_DESCS_PER_LOOP) {
124 		svuint64_t vld_clz, mbp1st, mbp2st, mbuf_init;
125 		svuint64_t xlen1st, xlen2st, rss1st, rss2st;
126 		svuint32_t l234, ol, vld, vld2, xlen, rss;
127 		svuint8_t  vld_u8;
128 
129 		/* calc how many bd valid: part 1 */
130 		vld = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp,
131 			svindex_u32(BD_FIELD_VALID_OFFSET, BD_SIZE));
132 		vld2 = svlsl_n_u32_z(pg32, vld,
133 				    HNS3_UINT32_BIT - 1 - HNS3_RXD_VLD_B);
134 		vld2 = svreinterpret_u32_s32(svasr_n_s32_z(pg32,
135 			svreinterpret_s32_u32(vld2), HNS3_UINT32_BIT - 1));
136 
137 		/* load 4 mbuf pointer */
138 		mbp1st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[pos]);
139 
140 		/* calc how many bd valid: part 2 */
141 		vld_u8 = svtbl_u8(svreinterpret_u8_u32(vld2),
142 				  svreinterpret_u8_s8(GEN_VLD_U8_ZIP_INDEX));
143 		vld_clz = svnot_u64_z(PG64_64BIT, svreinterpret_u64_u8(vld_u8));
144 		vld_clz = svclz_u64_z(PG64_64BIT, vld_clz);
145 		svst1_u64(PG64_64BIT, &bd_valid_num, vld_clz);
146 		bd_valid_num /= HNS3_UINT8_BIT;
147 
148 		/* load 4 more mbuf pointer */
149 		mbp2st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[pos + 4]);
150 
151 		/* use offset to control below data load oper ordering */
152 		offset = rxq->offset_table[bd_valid_num];
153 		rxdp2 = rxdp + offset;
154 
155 		/* store 4 mbuf pointer into rx_pkts */
156 		svst1_u64(PG64_256BIT, (uint64_t *)&rx_pkts[pos], mbp1st);
157 
158 		/* load key field to vector reg */
159 		l234 = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2,
160 				svindex_u32(BD_FIELD_L234_OFFSET, BD_SIZE));
161 		ol = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2,
162 				svindex_u32(BD_FIELD_OL_OFFSET, BD_SIZE));
163 
164 		/* store 4 mbuf pointer into rx_pkts again */
165 		svst1_u64(PG64_256BIT, (uint64_t *)&rx_pkts[pos + 4], mbp2st);
166 
167 		/* load datalen, pktlen and rss_hash */
168 		xlen = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2,
169 				svindex_u32(BD_FIELD_XLEN_OFFSET, BD_SIZE));
170 		rss = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2,
171 				svindex_u32(BD_FIELD_RSS_OFFSET, BD_SIZE));
172 
173 		/* store key field to stash buffer */
174 		svst1_u32(pg32, (uint32_t *)key_field.l234_info, l234);
175 		svst1_u32(pg32, (uint32_t *)key_field.bd_base_info, vld);
176 		svst1_u32(pg32, (uint32_t *)key_field.ol_info, ol);
177 
178 		/* sub crc_len for pkt_len and data_len */
179 		xlen = svreinterpret_u32_u16(svsub_n_u16_z(PG16_256BIT,
180 			svreinterpret_u16_u32(xlen), rxq->crc_len));
181 
182 		/* init mbuf_initializer */
183 		mbuf_init = svdup_n_u64(rxq->mbuf_initializer);
184 
185 		/* extract datalen, pktlen and rss from xlen and rss */
186 		xlen1st = svreinterpret_u64_u16(
187 			svtbl_u16(svreinterpret_u16_u32(xlen), xlen_tbl1));
188 		xlen2st = svreinterpret_u64_u16(
189 			svtbl_u16(svreinterpret_u16_u32(xlen), xlen_tbl2));
190 		rss1st = svreinterpret_u64_u32(
191 			svtbl_u32(svreinterpret_u32_u32(rss), rss_tbl1));
192 		rss2st = svreinterpret_u64_u32(
193 			svtbl_u32(svreinterpret_u32_u32(rss), rss_tbl2));
194 
195 		/* save mbuf_initializer */
196 		svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp1st,
197 			offsetof(struct rte_mbuf, rearm_data), mbuf_init);
198 		svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp2st,
199 			offsetof(struct rte_mbuf, rearm_data), mbuf_init);
200 
201 		/* save datalen and pktlen and rss */
202 		svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp1st,
203 			offsetof(struct rte_mbuf, pkt_len), xlen1st);
204 		svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp1st,
205 			offsetof(struct rte_mbuf, hash.rss), rss1st);
206 		svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp2st,
207 			offsetof(struct rte_mbuf, pkt_len), xlen2st);
208 		svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp2st,
209 			offsetof(struct rte_mbuf, hash.rss), rss2st);
210 
211 		rte_prefetch_non_temporal(rxdp +
212 					  HNS3_SVE_DEFAULT_DESCS_PER_LOOP);
213 
214 		parse_retcode = hns3_desc_parse_field_sve(rxq, &rx_pkts[pos],
215 					&key_field, bd_valid_num);
216 		if (unlikely(parse_retcode))
217 			(*bd_err_mask) |= ((uint64_t)parse_retcode) << pos;
218 
219 		hns3_rx_prefetch_mbuf_sve(&sw_ring[pos +
220 					HNS3_SVE_DEFAULT_DESCS_PER_LOOP]);
221 
222 		nb_rx += bd_valid_num;
223 		if (unlikely(bd_valid_num < HNS3_SVE_DEFAULT_DESCS_PER_LOOP))
224 			break;
225 	}
226 
227 	rxq->rx_rearm_nb += nb_rx;
228 	rxq->next_to_use += nb_rx;
229 	if (rxq->next_to_use >= rxq->nb_rx_desc)
230 		rxq->next_to_use = 0;
231 
232 	return nb_rx;
233 }
234 
235 static inline void
236 hns3_rxq_rearm_mbuf_sve(struct hns3_rx_queue *rxq)
237 {
238 #define REARM_LOOP_STEP_NUM	4
239 	struct hns3_entry *rxep = &rxq->sw_ring[rxq->rx_rearm_start];
240 	struct hns3_desc *rxdp = rxq->rx_ring + rxq->rx_rearm_start;
241 	struct hns3_entry *rxep_tmp = rxep;
242 	int i;
243 
244 	if (unlikely(rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
245 					  HNS3_DEFAULT_RXQ_REARM_THRESH) < 0)) {
246 		rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
247 		return;
248 	}
249 
250 	for (i = 0; i < HNS3_DEFAULT_RXQ_REARM_THRESH; i += REARM_LOOP_STEP_NUM,
251 		rxep_tmp += REARM_LOOP_STEP_NUM) {
252 		svuint64_t prf = svld1_u64(PG64_256BIT, (uint64_t *)rxep_tmp);
253 		svprfd_gather_u64base(PG64_256BIT, prf, SV_PLDL1STRM);
254 	}
255 
256 	for (i = 0; i < HNS3_DEFAULT_RXQ_REARM_THRESH; i += REARM_LOOP_STEP_NUM,
257 		rxep += REARM_LOOP_STEP_NUM, rxdp += REARM_LOOP_STEP_NUM) {
258 		uint64_t iova[REARM_LOOP_STEP_NUM];
259 		iova[0] = rxep[0].mbuf->buf_iova;
260 		iova[1] = rxep[1].mbuf->buf_iova;
261 		iova[2] = rxep[2].mbuf->buf_iova;
262 		iova[3] = rxep[3].mbuf->buf_iova;
263 		svuint64_t siova = svld1_u64(PG64_256BIT, iova);
264 		siova = svadd_n_u64_z(PG64_256BIT, siova, RTE_PKTMBUF_HEADROOM);
265 		svuint64_t ol_base = svdup_n_u64(0);
266 		svst1_scatter_u64offset_u64(PG64_256BIT,
267 			(uint64_t *)&rxdp[0].addr,
268 			svindex_u64(BD_FIELD_ADDR_OFFSET, BD_SIZE), siova);
269 		svst1_scatter_u64offset_u64(PG64_256BIT,
270 			(uint64_t *)&rxdp[0].addr,
271 			svindex_u64(BD_FIELD_OL_OFFSET, BD_SIZE), ol_base);
272 	}
273 
274 	rxq->rx_rearm_start += HNS3_DEFAULT_RXQ_REARM_THRESH;
275 	if (rxq->rx_rearm_start >= rxq->nb_rx_desc)
276 		rxq->rx_rearm_start = 0;
277 
278 	rxq->rx_rearm_nb -= HNS3_DEFAULT_RXQ_REARM_THRESH;
279 
280 	hns3_write_reg_opt(rxq->io_head_reg, HNS3_DEFAULT_RXQ_REARM_THRESH);
281 }
282 
283 uint16_t
284 hns3_recv_pkts_vec_sve(void *__restrict rx_queue,
285 		       struct rte_mbuf **__restrict rx_pkts,
286 		       uint16_t nb_pkts)
287 {
288 	struct hns3_rx_queue *rxq = rx_queue;
289 	struct hns3_desc *rxdp = &rxq->rx_ring[rxq->next_to_use];
290 	uint64_t bd_err_mask;  /* bit mask indicate whick pkts is error */
291 	uint16_t nb_rx;
292 
293 	rte_prefetch_non_temporal(rxdp);
294 
295 	nb_pkts = RTE_MIN(nb_pkts, HNS3_DEFAULT_RX_BURST);
296 	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, HNS3_SVE_DEFAULT_DESCS_PER_LOOP);
297 
298 	if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH)
299 		hns3_rxq_rearm_mbuf_sve(rxq);
300 
301 	if (unlikely(!(rxdp->rx.bd_base_info &
302 			rte_cpu_to_le_32(1u << HNS3_RXD_VLD_B))))
303 		return 0;
304 
305 	hns3_rx_prefetch_mbuf_sve(&rxq->sw_ring[rxq->next_to_use]);
306 
307 	bd_err_mask = 0;
308 	nb_rx = hns3_recv_burst_vec_sve(rxq, rx_pkts, nb_pkts, &bd_err_mask);
309 	if (unlikely(bd_err_mask))
310 		nb_rx = hns3_rx_reassemble_pkts(rx_pkts, nb_rx, bd_err_mask);
311 
312 	return nb_rx;
313 }
314 
315 static inline void
316 hns3_tx_free_buffers_sve(struct hns3_tx_queue *txq)
317 {
318 #define HNS3_SVE_CHECK_DESCS_PER_LOOP	8
319 #define TX_VLD_U8_ZIP_INDEX		svindex_u8(0, 4)
320 	svbool_t pg32 = svwhilelt_b32(0, HNS3_SVE_CHECK_DESCS_PER_LOOP);
321 	svuint32_t vld, vld2;
322 	svuint8_t vld_u8;
323 	uint64_t vld_all;
324 	struct hns3_desc *tx_desc;
325 	int i;
326 
327 	/*
328 	 * All mbufs can be released only when the VLD bits of all
329 	 * descriptors in a batch are cleared.
330 	 */
331 	/* do logical OR operation for all desc's valid field */
332 	vld = svdup_n_u32(0);
333 	tx_desc = &txq->tx_ring[txq->next_to_clean];
334 	for (i = 0; i < txq->tx_rs_thresh; i += HNS3_SVE_CHECK_DESCS_PER_LOOP,
335 				tx_desc += HNS3_SVE_CHECK_DESCS_PER_LOOP) {
336 		vld2 = svld1_gather_u32offset_u32(pg32, (uint32_t *)tx_desc,
337 				svindex_u32(BD_FIELD_VALID_OFFSET, BD_SIZE));
338 		vld = svorr_u32_z(pg32, vld, vld2);
339 	}
340 	/* shift left and then right to get all valid bit */
341 	vld = svlsl_n_u32_z(pg32, vld,
342 			    HNS3_UINT32_BIT - 1 - HNS3_TXD_VLD_B);
343 	vld = svreinterpret_u32_s32(svasr_n_s32_z(pg32,
344 		svreinterpret_s32_u32(vld), HNS3_UINT32_BIT - 1));
345 	/* use tbl to compress 32bit-lane to 8bit-lane */
346 	vld_u8 = svtbl_u8(svreinterpret_u8_u32(vld), TX_VLD_U8_ZIP_INDEX);
347 	/* dump compressed 64bit to variable */
348 	svst1_u64(PG64_64BIT, &vld_all, svreinterpret_u64_u8(vld_u8));
349 	if (vld_all > 0)
350 		return;
351 
352 	hns3_tx_bulk_free_buffers(txq);
353 }
354 
355 static inline void
356 hns3_tx_fill_hw_ring_sve(struct hns3_tx_queue *txq,
357 			 struct rte_mbuf **pkts,
358 			 uint16_t nb_pkts)
359 {
360 #define DATA_OFF_LEN_VAL_MASK	0xFFFF
361 	struct hns3_desc *txdp = &txq->tx_ring[txq->next_to_use];
362 	struct hns3_entry *tx_entry = &txq->sw_ring[txq->next_to_use];
363 	const uint64_t valid_bit = (BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B)) <<
364 				   HNS3_UINT32_BIT;
365 	svuint64_t base_addr, buf_iova, data_off, data_len, addr;
366 	svuint64_t offsets = svindex_u64(0, BD_SIZE);
367 	uint32_t i = 0;
368 	svbool_t pg = svwhilelt_b64_u32(i, nb_pkts);
369 
370 	do {
371 		base_addr = svld1_u64(pg, (uint64_t *)pkts);
372 		/* calc mbuf's field buf_iova address */
373 		buf_iova = svadd_n_u64_z(pg, base_addr,
374 					 offsetof(struct rte_mbuf, buf_iova));
375 		/* calc mbuf's field data_off address */
376 		data_off = svadd_n_u64_z(pg, base_addr,
377 					 offsetof(struct rte_mbuf, data_off));
378 		/* calc mbuf's field data_len address */
379 		data_len = svadd_n_u64_z(pg, base_addr,
380 					 offsetof(struct rte_mbuf, data_len));
381 		/* store mbuf to tx_entry */
382 		svst1_u64(pg, (uint64_t *)tx_entry, base_addr);
383 		/* read pkts->buf_iova */
384 		buf_iova = svld1_gather_u64base_u64(pg, buf_iova);
385 		/* read pkts->data_off's 64bit val  */
386 		data_off = svld1_gather_u64base_u64(pg, data_off);
387 		/* read pkts->data_len's 64bit val */
388 		data_len = svld1_gather_u64base_u64(pg, data_len);
389 		/* zero data_off high 48bit by svand ops */
390 		data_off = svand_n_u64_z(pg, data_off, DATA_OFF_LEN_VAL_MASK);
391 		/* zero data_len high 48bit by svand ops */
392 		data_len = svand_n_u64_z(pg, data_len, DATA_OFF_LEN_VAL_MASK);
393 		/* calc mbuf data region iova addr */
394 		addr = svadd_u64_z(pg, buf_iova, data_off);
395 		/* shift due data_len's offset is 2byte of BD's second 8byte */
396 		data_len = svlsl_n_u64_z(pg, data_len, HNS3_UINT16_BIT);
397 		/* save offset 0~7byte of every BD */
398 		svst1_scatter_u64offset_u64(pg, (uint64_t *)&txdp->addr,
399 					    offsets, addr);
400 		/* save offset 8~15byte of every BD */
401 		svst1_scatter_u64offset_u64(pg, (uint64_t *)&txdp->tx.vlan_tag,
402 					    offsets, data_len);
403 		/* save offset 16~23byte of every BD */
404 		svst1_scatter_u64offset_u64(pg,
405 				(uint64_t *)&txdp->tx.outer_vlan_tag,
406 				offsets, svdup_n_u64(0));
407 		/* save offset 24~31byte of every BD */
408 		svst1_scatter_u64offset_u64(pg, (uint64_t *)&txdp->tx.paylen,
409 					    offsets, svdup_n_u64(valid_bit));
410 
411 		/* update index for next loop */
412 		i += svcntd();
413 		pkts += svcntd();
414 		txdp += svcntd();
415 		tx_entry += svcntd();
416 		pg = svwhilelt_b64_u32(i, nb_pkts);
417 	} while (svptest_any(svptrue_b64(), pg));
418 }
419 
420 static uint16_t
421 hns3_xmit_fixed_burst_vec_sve(void *__restrict tx_queue,
422 			      struct rte_mbuf **__restrict tx_pkts,
423 			      uint16_t nb_pkts)
424 {
425 	struct hns3_tx_queue *txq = (struct hns3_tx_queue *)tx_queue;
426 	uint16_t nb_tx = 0;
427 
428 	if (txq->tx_bd_ready < txq->tx_free_thresh)
429 		hns3_tx_free_buffers_sve(txq);
430 
431 	nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts);
432 	if (unlikely(nb_pkts == 0)) {
433 		txq->queue_full_cnt++;
434 		return 0;
435 	}
436 
437 	if (txq->next_to_use + nb_pkts > txq->nb_tx_desc) {
438 		nb_tx = txq->nb_tx_desc - txq->next_to_use;
439 		hns3_tx_fill_hw_ring_sve(txq, tx_pkts, nb_tx);
440 		txq->next_to_use = 0;
441 	}
442 
443 	hns3_tx_fill_hw_ring_sve(txq, tx_pkts + nb_tx, nb_pkts - nb_tx);
444 	txq->next_to_use += nb_pkts - nb_tx;
445 
446 	txq->tx_bd_ready -= nb_pkts;
447 	hns3_write_reg_opt(txq->io_tail_reg, nb_pkts);
448 
449 	return nb_pkts;
450 }
451 
452 uint16_t
453 hns3_xmit_pkts_vec_sve(void *tx_queue,
454 		       struct rte_mbuf **tx_pkts,
455 		       uint16_t nb_pkts)
456 {
457 	struct hns3_tx_queue *txq = (struct hns3_tx_queue *)tx_queue;
458 	uint16_t ret, new_burst;
459 	uint16_t nb_tx = 0;
460 
461 	while (nb_pkts) {
462 		new_burst = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
463 		ret = hns3_xmit_fixed_burst_vec_sve(tx_queue, &tx_pkts[nb_tx],
464 						    new_burst);
465 		nb_tx += ret;
466 		nb_pkts -= ret;
467 		if (ret < new_burst)
468 			break;
469 	}
470 
471 	return nb_tx;
472 }
473