xref: /dpdk/drivers/net/intel/cpfl/cpfl_rxtx.c (revision c1d145834f287aa8cf53de914618a7312f2c360e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2023 Intel Corporation
3  */
4 
5 #include <ethdev_driver.h>
6 #include <rte_net.h>
7 #include <rte_vect.h>
8 
9 #include "cpfl_ethdev.h"
10 #include "cpfl_rxtx.h"
11 #include "cpfl_rxtx_vec_common.h"
12 
13 static inline void
14 cpfl_tx_hairpin_descq_reset(struct idpf_tx_queue *txq)
15 {
16 	uint32_t i, size;
17 
18 	if (!txq) {
19 		PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
20 		return;
21 	}
22 
23 	size = txq->nb_tx_desc * CPFL_P2P_DESC_LEN;
24 	for (i = 0; i < size; i++)
25 		((volatile char *)txq->desc_ring)[i] = 0;
26 }
27 
28 static inline void
29 cpfl_tx_hairpin_complq_reset(struct idpf_tx_queue *cq)
30 {
31 	uint32_t i, size;
32 
33 	if (!cq) {
34 		PMD_DRV_LOG(DEBUG, "Pointer to complq is NULL");
35 		return;
36 	}
37 
38 	size = cq->nb_tx_desc * CPFL_P2P_DESC_LEN;
39 	for (i = 0; i < size; i++)
40 		((volatile char *)cq->compl_ring)[i] = 0;
41 }
42 
43 static inline void
44 cpfl_rx_hairpin_descq_reset(struct idpf_rx_queue *rxq)
45 {
46 	uint16_t len;
47 	uint32_t i;
48 
49 	if (!rxq)
50 		return;
51 
52 	len = rxq->nb_rx_desc;
53 	for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
54 		((volatile char *)rxq->rx_ring)[i] = 0;
55 }
56 
57 static inline void
58 cpfl_rx_hairpin_bufq_reset(struct idpf_rx_queue *rxbq)
59 {
60 	uint16_t len;
61 	uint32_t i;
62 
63 	if (!rxbq)
64 		return;
65 
66 	len = rxbq->nb_rx_desc;
67 	for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
68 		((volatile char *)rxbq->rx_ring)[i] = 0;
69 
70 	rxbq->bufq1 = NULL;
71 	rxbq->bufq2 = NULL;
72 }
73 
74 static uint64_t
75 cpfl_rx_offload_convert(uint64_t offload)
76 {
77 	uint64_t ol = 0;
78 
79 	if ((offload & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) != 0)
80 		ol |= IDPF_RX_OFFLOAD_IPV4_CKSUM;
81 	if ((offload & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) != 0)
82 		ol |= IDPF_RX_OFFLOAD_UDP_CKSUM;
83 	if ((offload & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) != 0)
84 		ol |= IDPF_RX_OFFLOAD_TCP_CKSUM;
85 	if ((offload & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
86 		ol |= IDPF_RX_OFFLOAD_OUTER_IPV4_CKSUM;
87 	if ((offload & RTE_ETH_RX_OFFLOAD_TIMESTAMP) != 0)
88 		ol |= IDPF_RX_OFFLOAD_TIMESTAMP;
89 
90 	return ol;
91 }
92 
93 static uint64_t
94 cpfl_tx_offload_convert(uint64_t offload)
95 {
96 	uint64_t ol = 0;
97 
98 	if ((offload & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) != 0)
99 		ol |= IDPF_TX_OFFLOAD_IPV4_CKSUM;
100 	if ((offload & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) != 0)
101 		ol |= IDPF_TX_OFFLOAD_UDP_CKSUM;
102 	if ((offload & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) != 0)
103 		ol |= IDPF_TX_OFFLOAD_TCP_CKSUM;
104 	if ((offload & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) != 0)
105 		ol |= IDPF_TX_OFFLOAD_SCTP_CKSUM;
106 	if ((offload & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) != 0)
107 		ol |= IDPF_TX_OFFLOAD_MULTI_SEGS;
108 	if ((offload & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) != 0)
109 		ol |= IDPF_TX_OFFLOAD_MBUF_FAST_FREE;
110 
111 	return ol;
112 }
113 
114 static const struct idpf_rxq_ops def_rxq_ops = {
115 	.release_mbufs = idpf_qc_rxq_mbufs_release,
116 };
117 
118 static const struct idpf_txq_ops def_txq_ops = {
119 	.release_mbufs = idpf_qc_txq_mbufs_release,
120 };
121 
122 static const struct rte_memzone *
123 cpfl_dma_zone_reserve(struct rte_eth_dev *dev, uint16_t queue_idx,
124 		      uint16_t len, uint16_t queue_type,
125 		      unsigned int socket_id, bool splitq)
126 {
127 	char ring_name[RTE_MEMZONE_NAMESIZE];
128 	const struct rte_memzone *mz;
129 	uint32_t ring_size;
130 
131 	memset(ring_name, 0, RTE_MEMZONE_NAMESIZE);
132 	switch (queue_type) {
133 	case VIRTCHNL2_QUEUE_TYPE_TX:
134 		if (splitq)
135 			ring_size = RTE_ALIGN(len * sizeof(struct idpf_flex_tx_sched_desc),
136 					      CPFL_DMA_MEM_ALIGN);
137 		else
138 			ring_size = RTE_ALIGN(len * sizeof(struct idpf_base_tx_desc),
139 					      CPFL_DMA_MEM_ALIGN);
140 		memcpy(ring_name, "cpfl Tx ring", sizeof("cpfl Tx ring"));
141 		break;
142 	case VIRTCHNL2_QUEUE_TYPE_RX:
143 		if (splitq)
144 			ring_size = RTE_ALIGN(len * sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3),
145 					      CPFL_DMA_MEM_ALIGN);
146 		else
147 			ring_size = RTE_ALIGN(len * sizeof(struct virtchnl2_singleq_rx_buf_desc),
148 					      CPFL_DMA_MEM_ALIGN);
149 		memcpy(ring_name, "cpfl Rx ring", sizeof("cpfl Rx ring"));
150 		break;
151 	case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
152 		ring_size = RTE_ALIGN(len * sizeof(struct idpf_splitq_tx_compl_desc),
153 				      CPFL_DMA_MEM_ALIGN);
154 		memcpy(ring_name, "cpfl Tx compl ring", sizeof("cpfl Tx compl ring"));
155 		break;
156 	case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
157 		ring_size = RTE_ALIGN(len * sizeof(struct virtchnl2_splitq_rx_buf_desc),
158 				      CPFL_DMA_MEM_ALIGN);
159 		memcpy(ring_name, "cpfl Rx buf ring", sizeof("cpfl Rx buf ring"));
160 		break;
161 	default:
162 		PMD_INIT_LOG(ERR, "Invalid queue type");
163 		return NULL;
164 	}
165 
166 	mz = rte_eth_dma_zone_reserve(dev, ring_name, queue_idx,
167 				      ring_size, CPFL_RING_BASE_ALIGN,
168 				      socket_id);
169 	if (mz == NULL) {
170 		PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for ring");
171 		return NULL;
172 	}
173 
174 	/* Zero all the descriptors in the ring. */
175 	memset(mz->addr, 0, ring_size);
176 
177 	return mz;
178 }
179 
180 static void
181 cpfl_dma_zone_release(const struct rte_memzone *mz)
182 {
183 	rte_memzone_free(mz);
184 }
185 
186 static int
187 cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,
188 			 uint16_t queue_idx, uint16_t rx_free_thresh,
189 			 uint16_t nb_desc, unsigned int socket_id,
190 			 struct rte_mempool *mp, uint8_t bufq_id)
191 {
192 	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
193 	struct idpf_vport *vport = &cpfl_vport->base;
194 	struct idpf_adapter *base = vport->adapter;
195 	struct idpf_hw *hw = &base->hw;
196 	const struct rte_memzone *mz;
197 	struct idpf_rx_queue *bufq;
198 	uint16_t len;
199 	int ret;
200 
201 	bufq = rte_zmalloc_socket("cpfl bufq",
202 				   sizeof(struct idpf_rx_queue),
203 				   RTE_CACHE_LINE_SIZE,
204 				   socket_id);
205 	if (bufq == NULL) {
206 		PMD_INIT_LOG(ERR, "Failed to allocate memory for rx buffer queue.");
207 		ret = -ENOMEM;
208 		goto err_bufq1_alloc;
209 	}
210 
211 	bufq->mp = mp;
212 	bufq->nb_rx_desc = nb_desc;
213 	bufq->rx_free_thresh = rx_free_thresh;
214 	bufq->queue_id = vport->chunks_info.rx_buf_start_qid + queue_idx;
215 	bufq->port_id = dev->data->port_id;
216 	bufq->rx_hdr_len = 0;
217 	bufq->adapter = base;
218 
219 	len = rte_pktmbuf_data_room_size(bufq->mp) - RTE_PKTMBUF_HEADROOM;
220 	bufq->rx_buf_len = RTE_ALIGN_FLOOR(len, (1 << IDPF_RLAN_CTX_DBUF_S));
221 	bufq->rx_buf_len = RTE_MIN(bufq->rx_buf_len, IDPF_RX_MAX_DATA_BUF_SIZE);
222 
223 	/* Allocate a little more to support bulk allocate. */
224 	len = nb_desc + IDPF_RX_MAX_BURST;
225 
226 	mz = cpfl_dma_zone_reserve(dev, queue_idx, len,
227 				   VIRTCHNL2_QUEUE_TYPE_RX_BUFFER,
228 				   socket_id, true);
229 	if (mz == NULL) {
230 		ret = -ENOMEM;
231 		goto err_mz_reserve;
232 	}
233 
234 	bufq->rx_ring_phys_addr = mz->iova;
235 	bufq->rx_ring = mz->addr;
236 	bufq->mz = mz;
237 
238 	bufq->sw_ring =
239 		rte_zmalloc_socket("cpfl rx bufq sw ring",
240 				   sizeof(struct rte_mbuf *) * len,
241 				   RTE_CACHE_LINE_SIZE,
242 				   socket_id);
243 	if (bufq->sw_ring == NULL) {
244 		PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
245 		ret = -ENOMEM;
246 		goto err_sw_ring_alloc;
247 	}
248 
249 	idpf_qc_split_rx_bufq_reset(bufq);
250 	bufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +
251 			 queue_idx * vport->chunks_info.rx_buf_qtail_spacing);
252 	bufq->ops = &def_rxq_ops;
253 	bufq->q_set = true;
254 
255 	if (bufq_id == IDPF_RX_SPLIT_BUFQ1_ID) {
256 		rxq->bufq1 = bufq;
257 	} else if (bufq_id == IDPF_RX_SPLIT_BUFQ2_ID) {
258 		rxq->bufq2 = bufq;
259 	} else {
260 		PMD_INIT_LOG(ERR, "Invalid buffer queue index.");
261 		ret = -EINVAL;
262 		goto err_bufq_id;
263 	}
264 
265 	return 0;
266 
267 err_bufq_id:
268 	rte_free(bufq->sw_ring);
269 err_sw_ring_alloc:
270 	cpfl_dma_zone_release(mz);
271 err_mz_reserve:
272 	rte_free(bufq);
273 err_bufq1_alloc:
274 	return ret;
275 }
276 
277 static void
278 cpfl_rx_split_bufq_release(struct idpf_rx_queue *bufq)
279 {
280 	rte_free(bufq->sw_ring);
281 	cpfl_dma_zone_release(bufq->mz);
282 	rte_free(bufq);
283 }
284 
285 static void
286 cpfl_rx_queue_release(void *rxq)
287 {
288 	struct cpfl_rx_queue *cpfl_rxq = rxq;
289 	struct idpf_rx_queue *q = NULL;
290 
291 	if (cpfl_rxq == NULL)
292 		return;
293 
294 	q = &cpfl_rxq->base;
295 
296 	/* Split queue */
297 	if (!q->adapter->is_rx_singleq) {
298 		/* the mz is shared between Tx/Rx hairpin, let Rx_release
299 		 * free the buf, q->bufq1->mz and q->mz.
300 		 */
301 		if (!cpfl_rxq->hairpin_info.hairpin_q && q->bufq2)
302 			cpfl_rx_split_bufq_release(q->bufq2);
303 
304 		if (q->bufq1)
305 			cpfl_rx_split_bufq_release(q->bufq1);
306 
307 		rte_memzone_free(q->mz);
308 		rte_free(cpfl_rxq);
309 		return;
310 	}
311 
312 	/* Single queue */
313 	q->ops->release_mbufs(q);
314 	rte_free(q->sw_ring);
315 	rte_memzone_free(q->mz);
316 	rte_free(cpfl_rxq);
317 }
318 
319 static void
320 cpfl_tx_queue_release(void *txq)
321 {
322 	struct cpfl_tx_queue *cpfl_txq = txq;
323 	struct idpf_tx_queue *q = NULL;
324 
325 	if (cpfl_txq == NULL)
326 		return;
327 
328 	q = &cpfl_txq->base;
329 
330 	if (q->complq) {
331 		rte_memzone_free(q->complq->mz);
332 		rte_free(q->complq);
333 	}
334 
335 	q->ops->release_mbufs(q);
336 	rte_free(q->sw_ring);
337 	rte_memzone_free(q->mz);
338 	rte_free(cpfl_txq);
339 }
340 
341 int
342 cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
343 		    uint16_t nb_desc, unsigned int socket_id,
344 		    const struct rte_eth_rxconf *rx_conf,
345 		    struct rte_mempool *mp)
346 {
347 	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
348 	struct idpf_vport *vport = &cpfl_vport->base;
349 	struct idpf_adapter *base = vport->adapter;
350 	struct idpf_hw *hw = &base->hw;
351 	struct cpfl_rx_queue *cpfl_rxq;
352 	const struct rte_memzone *mz;
353 	struct idpf_rx_queue *rxq;
354 	uint16_t rx_free_thresh;
355 	uint64_t offloads;
356 	bool is_splitq;
357 	uint16_t len;
358 	int ret;
359 
360 	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
361 
362 	/* Check free threshold */
363 	rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
364 		CPFL_DEFAULT_RX_FREE_THRESH :
365 		rx_conf->rx_free_thresh;
366 	if (idpf_qc_rx_thresh_check(nb_desc, rx_free_thresh) != 0)
367 		return -EINVAL;
368 
369 	/* Free memory if needed */
370 	if (dev->data->rx_queues[queue_idx] != NULL) {
371 		cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
372 		dev->data->rx_queues[queue_idx] = NULL;
373 	}
374 
375 	/* Setup Rx queue */
376 	cpfl_rxq = rte_zmalloc_socket("cpfl rxq",
377 				 sizeof(struct cpfl_rx_queue),
378 				 RTE_CACHE_LINE_SIZE,
379 				 socket_id);
380 	if (cpfl_rxq == NULL) {
381 		PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
382 		ret = -ENOMEM;
383 		goto err_rxq_alloc;
384 	}
385 
386 	rxq = &cpfl_rxq->base;
387 
388 	is_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
389 
390 	rxq->mp = mp;
391 	rxq->nb_rx_desc = nb_desc;
392 	rxq->rx_free_thresh = rx_free_thresh;
393 	rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;
394 	rxq->port_id = dev->data->port_id;
395 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
396 	rxq->rx_hdr_len = 0;
397 	rxq->adapter = base;
398 	rxq->offloads = cpfl_rx_offload_convert(offloads);
399 
400 	len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
401 	rxq->rx_buf_len = len;
402 
403 	/* Allocate a little more to support bulk allocate. */
404 	len = nb_desc + IDPF_RX_MAX_BURST;
405 	mz = cpfl_dma_zone_reserve(dev, queue_idx, len, VIRTCHNL2_QUEUE_TYPE_RX,
406 				   socket_id, is_splitq);
407 	if (mz == NULL) {
408 		ret = -ENOMEM;
409 		goto err_mz_reserve;
410 	}
411 	rxq->rx_ring_phys_addr = mz->iova;
412 	rxq->rx_ring = mz->addr;
413 	rxq->mz = mz;
414 
415 	if (!is_splitq) {
416 		rxq->sw_ring = rte_zmalloc_socket("cpfl rxq sw ring",
417 						  sizeof(struct rte_mbuf *) * len,
418 						  RTE_CACHE_LINE_SIZE,
419 						  socket_id);
420 		if (rxq->sw_ring == NULL) {
421 			PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
422 			ret = -ENOMEM;
423 			goto err_sw_ring_alloc;
424 		}
425 
426 		idpf_qc_single_rx_queue_reset(rxq);
427 		rxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +
428 				queue_idx * vport->chunks_info.rx_qtail_spacing);
429 		rxq->ops = &def_rxq_ops;
430 	} else {
431 		idpf_qc_split_rx_descq_reset(rxq);
432 
433 		/* Setup Rx buffer queues */
434 		ret = cpfl_rx_split_bufq_setup(dev, rxq, 2 * queue_idx,
435 					       rx_free_thresh, nb_desc,
436 					       socket_id, mp, 1);
437 		if (ret != 0) {
438 			PMD_INIT_LOG(ERR, "Failed to setup buffer queue 1");
439 			ret = -EINVAL;
440 			goto err_bufq1_setup;
441 		}
442 
443 		ret = cpfl_rx_split_bufq_setup(dev, rxq, 2 * queue_idx + 1,
444 					       rx_free_thresh, nb_desc,
445 					       socket_id, mp, 2);
446 		if (ret != 0) {
447 			PMD_INIT_LOG(ERR, "Failed to setup buffer queue 2");
448 			ret = -EINVAL;
449 			goto err_bufq2_setup;
450 		}
451 	}
452 
453 	cpfl_vport->nb_data_rxq++;
454 	rxq->q_set = true;
455 	dev->data->rx_queues[queue_idx] = cpfl_rxq;
456 
457 	return 0;
458 
459 err_bufq2_setup:
460 	cpfl_rx_split_bufq_release(rxq->bufq1);
461 err_bufq1_setup:
462 err_sw_ring_alloc:
463 	cpfl_dma_zone_release(mz);
464 err_mz_reserve:
465 	rte_free(rxq);
466 err_rxq_alloc:
467 	return ret;
468 }
469 
470 static int
471 cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,
472 		     uint16_t queue_idx, uint16_t nb_desc,
473 		     unsigned int socket_id)
474 {
475 	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
476 	struct idpf_vport *vport = &cpfl_vport->base;
477 	const struct rte_memzone *mz;
478 	struct idpf_tx_queue *cq;
479 	int ret;
480 
481 	cq = rte_zmalloc_socket("cpfl splitq cq",
482 				sizeof(struct idpf_tx_queue),
483 				RTE_CACHE_LINE_SIZE,
484 				socket_id);
485 	if (cq == NULL) {
486 		PMD_INIT_LOG(ERR, "Failed to allocate memory for Tx compl queue");
487 		ret = -ENOMEM;
488 		goto err_cq_alloc;
489 	}
490 
491 	cq->nb_tx_desc = nb_desc;
492 	cq->queue_id = vport->chunks_info.tx_compl_start_qid + queue_idx;
493 	cq->port_id = dev->data->port_id;
494 	cq->txqs = dev->data->tx_queues;
495 	cq->tx_start_qid = vport->chunks_info.tx_start_qid;
496 
497 	mz = cpfl_dma_zone_reserve(dev, queue_idx, nb_desc,
498 				   VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION,
499 				   socket_id, true);
500 	if (mz == NULL) {
501 		ret = -ENOMEM;
502 		goto err_mz_reserve;
503 	}
504 	cq->tx_ring_phys_addr = mz->iova;
505 	cq->compl_ring = mz->addr;
506 	cq->mz = mz;
507 	idpf_qc_split_tx_complq_reset(cq);
508 
509 	txq->complq = cq;
510 
511 	return 0;
512 
513 err_mz_reserve:
514 	rte_free(cq);
515 err_cq_alloc:
516 	return ret;
517 }
518 
519 int
520 cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
521 		    uint16_t nb_desc, unsigned int socket_id,
522 		    const struct rte_eth_txconf *tx_conf)
523 {
524 	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
525 	struct idpf_vport *vport = &cpfl_vport->base;
526 	struct idpf_adapter *base = vport->adapter;
527 	uint16_t tx_rs_thresh, tx_free_thresh;
528 	struct cpfl_tx_queue *cpfl_txq;
529 	struct idpf_hw *hw = &base->hw;
530 	const struct rte_memzone *mz;
531 	struct idpf_tx_queue *txq;
532 	uint64_t offloads;
533 	uint16_t len;
534 	bool is_splitq;
535 	int ret;
536 
537 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
538 
539 	tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh > 0) ?
540 		tx_conf->tx_rs_thresh : CPFL_DEFAULT_TX_RS_THRESH);
541 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh > 0) ?
542 		tx_conf->tx_free_thresh : CPFL_DEFAULT_TX_FREE_THRESH);
543 	if (idpf_qc_tx_thresh_check(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
544 		return -EINVAL;
545 
546 	/* Free memory if needed. */
547 	if (dev->data->tx_queues[queue_idx] != NULL) {
548 		cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
549 		dev->data->tx_queues[queue_idx] = NULL;
550 	}
551 
552 	/* Allocate the TX queue data structure. */
553 	cpfl_txq = rte_zmalloc_socket("cpfl txq",
554 				 sizeof(struct cpfl_tx_queue),
555 				 RTE_CACHE_LINE_SIZE,
556 				 socket_id);
557 	if (cpfl_txq == NULL) {
558 		PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
559 		ret = -ENOMEM;
560 		goto err_txq_alloc;
561 	}
562 
563 	txq = &cpfl_txq->base;
564 
565 	is_splitq = !!(vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
566 
567 	txq->nb_tx_desc = nb_desc;
568 	txq->rs_thresh = tx_rs_thresh;
569 	txq->free_thresh = tx_free_thresh;
570 	txq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;
571 	txq->port_id = dev->data->port_id;
572 	txq->offloads = cpfl_tx_offload_convert(offloads);
573 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
574 
575 	if (is_splitq)
576 		len = 2 * nb_desc;
577 	else
578 		len = nb_desc;
579 	txq->sw_nb_desc = len;
580 
581 	/* Allocate TX hardware ring descriptors. */
582 	mz = cpfl_dma_zone_reserve(dev, queue_idx, nb_desc, VIRTCHNL2_QUEUE_TYPE_TX,
583 				   socket_id, is_splitq);
584 	if (mz == NULL) {
585 		ret = -ENOMEM;
586 		goto err_mz_reserve;
587 	}
588 	txq->tx_ring_phys_addr = mz->iova;
589 	txq->mz = mz;
590 
591 	txq->sw_ring = rte_zmalloc_socket("cpfl tx sw ring",
592 					  sizeof(struct idpf_tx_entry) * len,
593 					  RTE_CACHE_LINE_SIZE, socket_id);
594 	if (txq->sw_ring == NULL) {
595 		PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
596 		ret = -ENOMEM;
597 		goto err_sw_ring_alloc;
598 	}
599 
600 	if (!is_splitq) {
601 		txq->tx_ring = mz->addr;
602 		idpf_qc_single_tx_queue_reset(txq);
603 	} else {
604 		txq->desc_ring = mz->addr;
605 		idpf_qc_split_tx_descq_reset(txq);
606 
607 		/* Setup tx completion queue if split model */
608 		ret = cpfl_tx_complq_setup(dev, txq, queue_idx,
609 					   2 * nb_desc, socket_id);
610 		if (ret != 0)
611 			goto err_complq_setup;
612 	}
613 
614 	txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
615 			queue_idx * vport->chunks_info.tx_qtail_spacing);
616 	txq->ops = &def_txq_ops;
617 	cpfl_vport->nb_data_txq++;
618 	txq->q_set = true;
619 	dev->data->tx_queues[queue_idx] = cpfl_txq;
620 
621 	return 0;
622 
623 err_complq_setup:
624 err_sw_ring_alloc:
625 	cpfl_dma_zone_release(mz);
626 err_mz_reserve:
627 	rte_free(txq);
628 err_txq_alloc:
629 	return ret;
630 }
631 
632 static int
633 cpfl_rx_hairpin_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
634 			   uint16_t logic_qid, uint16_t nb_desc)
635 {
636 	struct cpfl_vport *cpfl_vport =
637 	    (struct cpfl_vport *)dev->data->dev_private;
638 	struct idpf_vport *vport = &cpfl_vport->base;
639 	struct idpf_adapter *adapter = vport->adapter;
640 	struct rte_mempool *mp;
641 	char pool_name[RTE_MEMPOOL_NAMESIZE];
642 
643 	mp = cpfl_vport->p2p_mp;
644 	if (!mp) {
645 		snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "p2p_mb_pool_%u",
646 			 dev->data->port_id);
647 		mp = rte_pktmbuf_pool_create(pool_name, CPFL_P2P_NB_MBUF * CPFL_MAX_P2P_NB_QUEUES,
648 					     CPFL_P2P_CACHE_SIZE, 0, CPFL_P2P_MBUF_SIZE,
649 					     dev->device->numa_node);
650 		if (!mp) {
651 			PMD_INIT_LOG(ERR, "Failed to allocate mbuf pool for p2p");
652 			return -ENOMEM;
653 		}
654 		cpfl_vport->p2p_mp = mp;
655 	}
656 
657 	bufq->mp = mp;
658 	bufq->nb_rx_desc = nb_desc;
659 	bufq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_buf_start_qid,
660 					 logic_qid);
661 	bufq->port_id = dev->data->port_id;
662 	bufq->adapter = adapter;
663 	bufq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
664 
665 	bufq->q_set = true;
666 	bufq->ops = &def_rxq_ops;
667 
668 	return 0;
669 }
670 
671 int
672 cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
673 			    uint16_t nb_desc,
674 			    const struct rte_eth_hairpin_conf *conf)
675 {
676 	struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data->dev_private;
677 	struct idpf_vport *vport = &cpfl_vport->base;
678 	struct idpf_adapter *adapter_base = vport->adapter;
679 	uint16_t logic_qid = cpfl_vport->nb_p2p_rxq;
680 	struct cpfl_rxq_hairpin_info *hairpin_info;
681 	struct cpfl_rx_queue *cpfl_rxq;
682 	struct idpf_rx_queue *bufq1 = NULL;
683 	struct idpf_rx_queue *rxq;
684 	uint16_t peer_port, peer_q;
685 	uint16_t qid;
686 	int ret;
687 
688 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
689 		PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
690 		return -EINVAL;
691 	}
692 
693 	if (conf->peer_count != 1) {
694 		PMD_INIT_LOG(ERR, "Can't support Rx hairpin queue peer count %d", conf->peer_count);
695 		return -EINVAL;
696 	}
697 
698 	peer_port = conf->peers[0].port;
699 	peer_q = conf->peers[0].queue;
700 
701 	if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
702 	    nb_desc > CPFL_MAX_RING_DESC ||
703 	    nb_desc < CPFL_MIN_RING_DESC) {
704 		PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is invalid", nb_desc);
705 		return -EINVAL;
706 	}
707 
708 	/* Free memory if needed */
709 	if (dev->data->rx_queues[queue_idx]) {
710 		cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
711 		dev->data->rx_queues[queue_idx] = NULL;
712 	}
713 
714 	/* Setup Rx description queue */
715 	cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
716 				 sizeof(struct cpfl_rx_queue),
717 				 RTE_CACHE_LINE_SIZE,
718 				 SOCKET_ID_ANY);
719 	if (!cpfl_rxq) {
720 		PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
721 		return -ENOMEM;
722 	}
723 
724 	rxq = &cpfl_rxq->base;
725 	hairpin_info = &cpfl_rxq->hairpin_info;
726 	rxq->nb_rx_desc = nb_desc * 2;
727 	rxq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid, logic_qid);
728 	rxq->port_id = dev->data->port_id;
729 	rxq->adapter = adapter_base;
730 	rxq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
731 	hairpin_info->hairpin_q = true;
732 	hairpin_info->peer_txp = peer_port;
733 	hairpin_info->peer_txq_id = peer_q;
734 
735 	if (conf->manual_bind != 0)
736 		cpfl_vport->p2p_manual_bind = true;
737 	else
738 		cpfl_vport->p2p_manual_bind = false;
739 
740 	if (cpfl_vport->p2p_rx_bufq == NULL) {
741 		bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
742 					   sizeof(struct idpf_rx_queue),
743 					   RTE_CACHE_LINE_SIZE,
744 					   SOCKET_ID_ANY);
745 		if (!bufq1) {
746 			PMD_INIT_LOG(ERR, "Failed to allocate memory for hairpin Rx buffer queue 1.");
747 			ret = -ENOMEM;
748 			goto err_alloc_bufq1;
749 		}
750 		qid = 2 * logic_qid;
751 		ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc);
752 		if (ret) {
753 			PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer queue 1");
754 			ret = -EINVAL;
755 			goto err_setup_bufq1;
756 		}
757 		cpfl_vport->p2p_rx_bufq = bufq1;
758 	}
759 
760 	rxq->bufq1 = cpfl_vport->p2p_rx_bufq;
761 	rxq->bufq2 = NULL;
762 
763 	cpfl_vport->nb_p2p_rxq++;
764 	rxq->q_set = true;
765 	dev->data->rx_queues[queue_idx] = cpfl_rxq;
766 
767 	return 0;
768 
769 err_setup_bufq1:
770 	rte_mempool_free(cpfl_vport->p2p_mp);
771 	rte_free(bufq1);
772 err_alloc_bufq1:
773 	rte_free(cpfl_rxq);
774 
775 	return ret;
776 }
777 
778 int
779 cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
780 			    uint16_t nb_desc,
781 			    const struct rte_eth_hairpin_conf *conf)
782 {
783 	struct cpfl_vport *cpfl_vport =
784 	    (struct cpfl_vport *)dev->data->dev_private;
785 
786 	struct idpf_vport *vport = &cpfl_vport->base;
787 	struct idpf_adapter *adapter_base = vport->adapter;
788 	uint16_t logic_qid = cpfl_vport->nb_p2p_txq;
789 	struct cpfl_txq_hairpin_info *hairpin_info;
790 	struct idpf_hw *hw = &adapter_base->hw;
791 	struct cpfl_tx_queue *cpfl_txq;
792 	struct idpf_tx_queue *txq, *cq;
793 	const struct rte_memzone *mz;
794 	uint32_t ring_size;
795 	uint16_t peer_port, peer_q;
796 	int ret;
797 
798 	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
799 		PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
800 		return -EINVAL;
801 	}
802 
803 	if (conf->peer_count != 1) {
804 		PMD_INIT_LOG(ERR, "Can't support Tx hairpin queue peer count %d", conf->peer_count);
805 		return -EINVAL;
806 	}
807 
808 	peer_port = conf->peers[0].port;
809 	peer_q = conf->peers[0].queue;
810 
811 	if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
812 	    nb_desc > CPFL_MAX_RING_DESC ||
813 	    nb_desc < CPFL_MIN_RING_DESC) {
814 		PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is invalid",
815 			     nb_desc);
816 		return -EINVAL;
817 	}
818 
819 	/* Free memory if needed. */
820 	if (dev->data->tx_queues[queue_idx]) {
821 		cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
822 		dev->data->tx_queues[queue_idx] = NULL;
823 	}
824 
825 	/* Allocate the TX queue data structure. */
826 	cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq",
827 				 sizeof(struct cpfl_tx_queue),
828 				 RTE_CACHE_LINE_SIZE,
829 				 SOCKET_ID_ANY);
830 	if (!cpfl_txq) {
831 		PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
832 		return -ENOMEM;
833 	}
834 
835 	txq = &cpfl_txq->base;
836 	hairpin_info = &cpfl_txq->hairpin_info;
837 	/* Txq ring length should be 2 times of Tx completion queue size. */
838 	txq->nb_tx_desc = nb_desc * 2;
839 	txq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_start_qid, logic_qid);
840 	txq->port_id = dev->data->port_id;
841 	hairpin_info->hairpin_q = true;
842 	hairpin_info->peer_rxp = peer_port;
843 	hairpin_info->peer_rxq_id = peer_q;
844 
845 	if (conf->manual_bind != 0)
846 		cpfl_vport->p2p_manual_bind = true;
847 	else
848 		cpfl_vport->p2p_manual_bind = false;
849 
850 	/* Always Tx hairpin queue allocates Tx HW ring */
851 	ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,
852 			      CPFL_DMA_MEM_ALIGN);
853 	mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", logic_qid,
854 				      ring_size + CPFL_P2P_RING_BUF,
855 				      CPFL_RING_BASE_ALIGN,
856 				      dev->device->numa_node);
857 	if (!mz) {
858 		PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
859 		ret = -ENOMEM;
860 		goto err_txq_mz_rsv;
861 	}
862 
863 	txq->tx_ring_phys_addr = mz->iova;
864 	txq->desc_ring = mz->addr;
865 	txq->mz = mz;
866 
867 	cpfl_tx_hairpin_descq_reset(txq);
868 	txq->qtx_tail = hw->hw_addr +
869 		cpfl_hw_qtail_get(cpfl_vport->p2p_q_chunks_info->tx_qtail_start,
870 				  logic_qid, cpfl_vport->p2p_q_chunks_info->tx_qtail_spacing);
871 	txq->ops = &def_txq_ops;
872 
873 	if (cpfl_vport->p2p_tx_complq == NULL) {
874 		cq = rte_zmalloc_socket("cpfl hairpin cq",
875 					sizeof(struct idpf_tx_queue),
876 					RTE_CACHE_LINE_SIZE,
877 					dev->device->numa_node);
878 		if (!cq) {
879 			PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
880 			ret = -ENOMEM;
881 			goto err_cq_alloc;
882 		}
883 
884 		cq->nb_tx_desc = nb_desc;
885 		cq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_compl_start_qid,
886 					       0);
887 		cq->port_id = dev->data->port_id;
888 
889 		/* Tx completion queue always allocates the HW ring */
890 		ring_size = RTE_ALIGN(cq->nb_tx_desc * CPFL_P2P_DESC_LEN,
891 				      CPFL_DMA_MEM_ALIGN);
892 		mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_compl_ring", logic_qid,
893 					      ring_size + CPFL_P2P_RING_BUF,
894 					      CPFL_RING_BASE_ALIGN,
895 					      dev->device->numa_node);
896 		if (!mz) {
897 			PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX completion queue");
898 			ret = -ENOMEM;
899 			goto err_cq_mz_rsv;
900 		}
901 		cq->tx_ring_phys_addr = mz->iova;
902 		cq->compl_ring = mz->addr;
903 		cq->mz = mz;
904 
905 		cpfl_tx_hairpin_complq_reset(cq);
906 		cpfl_vport->p2p_tx_complq = cq;
907 	}
908 
909 	txq->complq = cpfl_vport->p2p_tx_complq;
910 
911 	cpfl_vport->nb_p2p_txq++;
912 	txq->q_set = true;
913 	dev->data->tx_queues[queue_idx] = cpfl_txq;
914 
915 	return 0;
916 
917 err_cq_mz_rsv:
918 	rte_free(cq);
919 err_cq_alloc:
920 	cpfl_dma_zone_release(mz);
921 err_txq_mz_rsv:
922 	rte_free(cpfl_txq);
923 	return ret;
924 }
925 
926 int
927 cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport)
928 {
929 	struct idpf_rx_queue *rx_bufq = cpfl_vport->p2p_rx_bufq;
930 	struct virtchnl2_rxq_info rxq_info;
931 
932 	memset(&rxq_info, 0, sizeof(rxq_info));
933 
934 	rxq_info.type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
935 	rxq_info.queue_id = rx_bufq->queue_id;
936 	rxq_info.ring_len = rx_bufq->nb_rx_desc;
937 	rxq_info.dma_ring_addr = rx_bufq->rx_ring_phys_addr;
938 	rxq_info.desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
939 	rxq_info.rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
940 	rxq_info.model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
941 	rxq_info.data_buffer_size = rx_bufq->rx_buf_len;
942 	rxq_info.buffer_notif_stride = CPFL_RX_BUF_STRIDE;
943 
944 	return idpf_vc_rxq_config_by_info(&cpfl_vport->base, &rxq_info, 1);
945 }
946 
947 int
948 cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq)
949 {
950 	struct virtchnl2_rxq_info rxq_info;
951 	struct idpf_rx_queue *rxq = &cpfl_rxq->base;
952 
953 	memset(&rxq_info, 0, sizeof(rxq_info));
954 
955 	rxq_info.type = VIRTCHNL2_QUEUE_TYPE_RX;
956 	rxq_info.queue_id = rxq->queue_id;
957 	rxq_info.ring_len = rxq->nb_rx_desc;
958 	rxq_info.dma_ring_addr = rxq->rx_ring_phys_addr;
959 	rxq_info.rx_bufq1_id = rxq->bufq1->queue_id;
960 	rxq_info.max_pkt_size = vport->max_pkt_len;
961 	rxq_info.desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
962 	rxq_info.qflags |= VIRTCHNL2_RX_DESC_SIZE_16BYTE;
963 
964 	rxq_info.data_buffer_size = rxq->rx_buf_len;
965 	rxq_info.model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
966 	rxq_info.rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
967 
968 	PMD_DRV_LOG(NOTICE, "hairpin: vport %u, Rxq id 0x%x",
969 		vport->vport_id, rxq_info.queue_id);
970 
971 	return idpf_vc_rxq_config_by_info(vport, &rxq_info, 1);
972 }
973 
974 int
975 cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport)
976 {
977 	struct idpf_tx_queue *tx_complq = cpfl_vport->p2p_tx_complq;
978 	struct virtchnl2_txq_info txq_info;
979 
980 	memset(&txq_info, 0, sizeof(txq_info));
981 
982 	txq_info.dma_ring_addr = tx_complq->tx_ring_phys_addr;
983 	txq_info.type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
984 	txq_info.queue_id = tx_complq->queue_id;
985 	txq_info.ring_len = tx_complq->nb_tx_desc;
986 	txq_info.peer_rx_queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
987 	txq_info.model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
988 	txq_info.sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
989 
990 	return idpf_vc_txq_config_by_info(&cpfl_vport->base, &txq_info, 1);
991 }
992 
993 int
994 cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq)
995 {
996 	struct idpf_tx_queue *txq = &cpfl_txq->base;
997 	struct virtchnl2_txq_info txq_info;
998 
999 	memset(&txq_info, 0, sizeof(txq_info));
1000 
1001 	txq_info.dma_ring_addr = txq->tx_ring_phys_addr;
1002 	txq_info.type = VIRTCHNL2_QUEUE_TYPE_TX;
1003 	txq_info.queue_id = txq->queue_id;
1004 	txq_info.ring_len = txq->nb_tx_desc;
1005 	txq_info.tx_compl_queue_id = txq->complq->queue_id;
1006 	txq_info.relative_queue_id = txq->queue_id;
1007 	txq_info.peer_rx_queue_id = cpfl_txq->hairpin_info.peer_rxq_id;
1008 	txq_info.model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
1009 	txq_info.sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
1010 
1011 	return idpf_vc_txq_config_by_info(vport, &txq_info, 1);
1012 }
1013 
1014 int
1015 cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on)
1016 {
1017 	struct idpf_vport *vport = &cpfl_vport->base;
1018 	uint32_t type;
1019 	int err, queue_id;
1020 
1021 	type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
1022 	queue_id = cpfl_vport->p2p_tx_complq->queue_id;
1023 	err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
1024 
1025 	return err;
1026 }
1027 
1028 int
1029 cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on)
1030 {
1031 	struct idpf_vport *vport = &cpfl_vport->base;
1032 	uint32_t type;
1033 	int err, queue_id;
1034 
1035 	type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
1036 	queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
1037 	err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
1038 
1039 	return err;
1040 }
1041 
1042 int
1043 cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t logic_qid,
1044 			       bool rx, bool on)
1045 {
1046 	struct idpf_vport *vport = &cpfl_vport->base;
1047 	uint32_t type;
1048 	int err, queue_id;
1049 
1050 	type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
1051 
1052 	if (type == VIRTCHNL2_QUEUE_TYPE_RX)
1053 		queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid, logic_qid);
1054 	else
1055 		queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_start_qid, logic_qid);
1056 	err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
1057 	if (err)
1058 		return err;
1059 
1060 	return err;
1061 }
1062 
1063 static int
1064 cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq)
1065 {
1066 	volatile struct virtchnl2_p2p_rx_buf_desc *rxd;
1067 	struct rte_mbuf *mbuf = NULL;
1068 	uint64_t dma_addr;
1069 	uint16_t i;
1070 
1071 	for (i = 0; i < rxq->nb_rx_desc; i++) {
1072 		mbuf = rte_mbuf_raw_alloc(rxq->mp);
1073 		if (unlikely(!mbuf)) {
1074 			PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
1075 			return -ENOMEM;
1076 		}
1077 
1078 		rte_mbuf_refcnt_set(mbuf, 1);
1079 		mbuf->next = NULL;
1080 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
1081 		mbuf->nb_segs = 1;
1082 		mbuf->port = rxq->port_id;
1083 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
1084 
1085 		rxd = &((volatile struct virtchnl2_p2p_rx_buf_desc *)(rxq->rx_ring))[i];
1086 		rxd->reserve0 = 0;
1087 		rxd->pkt_addr = dma_addr;
1088 	}
1089 
1090 	rxq->nb_rx_hold = 0;
1091 	/* The value written in the RX buffer queue tail register, must be a multiple of 8.*/
1092 	rxq->rx_tail = rxq->nb_rx_desc - CPFL_HAIRPIN_Q_TAIL_AUX_VALUE;
1093 
1094 	return 0;
1095 }
1096 
1097 int
1098 cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1099 {
1100 	struct cpfl_rx_queue *cpfl_rxq;
1101 	struct idpf_rx_queue *rxq;
1102 	uint16_t max_pkt_len;
1103 	uint32_t frame_size;
1104 	int err;
1105 
1106 	if (rx_queue_id >= dev->data->nb_rx_queues)
1107 		return -EINVAL;
1108 
1109 	cpfl_rxq = dev->data->rx_queues[rx_queue_id];
1110 	rxq = &cpfl_rxq->base;
1111 
1112 	if (rxq == NULL || !rxq->q_set) {
1113 		PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
1114 					rx_queue_id);
1115 		return -EINVAL;
1116 	}
1117 
1118 	frame_size = dev->data->mtu + CPFL_ETH_OVERHEAD;
1119 
1120 	max_pkt_len =
1121 	    RTE_MIN((uint32_t)CPFL_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
1122 		    frame_size);
1123 
1124 	rxq->max_pkt_len = max_pkt_len;
1125 	if ((dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
1126 	    frame_size > rxq->rx_buf_len)
1127 		dev->data->scattered_rx = 1;
1128 
1129 	err = idpf_qc_ts_mbuf_register(rxq);
1130 	if (err != 0) {
1131 		PMD_DRV_LOG(ERR, "fail to register timestamp mbuf %u",
1132 			    rx_queue_id);
1133 		return -EIO;
1134 	}
1135 
1136 	if (rxq->adapter->is_rx_singleq) {
1137 		/* Single queue */
1138 		err = idpf_qc_single_rxq_mbufs_alloc(rxq);
1139 		if (err != 0) {
1140 			PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
1141 			return err;
1142 		}
1143 
1144 		rte_wmb();
1145 
1146 		/* Init the RX tail register. */
1147 		IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1148 	} else {
1149 		/* Split queue */
1150 		if (cpfl_rxq->hairpin_info.hairpin_q) {
1151 			err = cpfl_alloc_split_p2p_rxq_mbufs(rxq->bufq1);
1152 			if (err != 0) {
1153 				PMD_DRV_LOG(ERR, "Failed to allocate p2p RX buffer queue mbuf");
1154 				return err;
1155 			}
1156 		} else {
1157 			err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
1158 			if (err != 0) {
1159 				PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
1160 				return err;
1161 			}
1162 			err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
1163 			if (err != 0) {
1164 				PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
1165 				return err;
1166 			}
1167 		}
1168 
1169 		rte_wmb();
1170 
1171 		/* Init the RX tail register. */
1172 		IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1->rx_tail);
1173 		if (rxq->bufq2)
1174 			IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
1175 	}
1176 
1177 	return err;
1178 }
1179 
1180 int
1181 cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1182 {
1183 	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
1184 	struct idpf_vport *vport = &cpfl_vport->base;
1185 	struct cpfl_rx_queue *cpfl_rxq = dev->data->rx_queues[rx_queue_id];
1186 	struct idpf_rx_queue *rxq = &cpfl_rxq->base;
1187 	int err = 0;
1188 
1189 	err = idpf_vc_rxq_config(vport, rxq);
1190 	if (err != 0) {
1191 		PMD_DRV_LOG(ERR, "Fail to configure Rx queue %u", rx_queue_id);
1192 		return err;
1193 	}
1194 
1195 	err = cpfl_rx_queue_init(dev, rx_queue_id);
1196 	if (err != 0) {
1197 		PMD_DRV_LOG(ERR, "Failed to init RX queue %u",
1198 			    rx_queue_id);
1199 		return err;
1200 	}
1201 
1202 	/* Ready to switch the queue on */
1203 	err = idpf_vc_queue_switch(vport, rx_queue_id, true, true,
1204 							VIRTCHNL2_QUEUE_TYPE_RX);
1205 	if (err != 0) {
1206 		PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
1207 			    rx_queue_id);
1208 	} else {
1209 		rxq->q_started = true;
1210 		dev->data->rx_queue_state[rx_queue_id] =
1211 			RTE_ETH_QUEUE_STATE_STARTED;
1212 	}
1213 
1214 	return err;
1215 }
1216 
1217 int
1218 cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1219 {
1220 	struct cpfl_tx_queue *cpfl_txq;
1221 
1222 	if (tx_queue_id >= dev->data->nb_tx_queues)
1223 		return -EINVAL;
1224 
1225 	cpfl_txq = dev->data->tx_queues[tx_queue_id];
1226 
1227 	/* Init the RX tail register. */
1228 	IDPF_PCI_REG_WRITE(cpfl_txq->base.qtx_tail, 0);
1229 
1230 	return 0;
1231 }
1232 
1233 int
1234 cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1235 {
1236 	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
1237 	struct idpf_vport *vport = &cpfl_vport->base;
1238 	struct cpfl_tx_queue *cpfl_txq =
1239 		dev->data->tx_queues[tx_queue_id];
1240 	int err = 0;
1241 
1242 	err = idpf_vc_txq_config(vport, &cpfl_txq->base);
1243 	if (err != 0) {
1244 		PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id);
1245 		return err;
1246 	}
1247 
1248 	err = cpfl_tx_queue_init(dev, tx_queue_id);
1249 	if (err != 0) {
1250 		PMD_DRV_LOG(ERR, "Failed to init TX queue %u",
1251 			    tx_queue_id);
1252 		return err;
1253 	}
1254 
1255 	/* Ready to switch the queue on */
1256 	err = idpf_vc_queue_switch(vport, tx_queue_id, false, true,
1257 							VIRTCHNL2_QUEUE_TYPE_TX);
1258 	if (err != 0) {
1259 		PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
1260 			    tx_queue_id);
1261 	} else {
1262 		cpfl_txq->base.q_started = true;
1263 		dev->data->tx_queue_state[tx_queue_id] =
1264 			RTE_ETH_QUEUE_STATE_STARTED;
1265 	}
1266 
1267 	return err;
1268 }
1269 
1270 int
1271 cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1272 {
1273 	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
1274 	struct idpf_vport *vport = &cpfl_vport->base;
1275 	struct cpfl_rx_queue *cpfl_rxq;
1276 	struct idpf_rx_queue *rxq;
1277 	int err;
1278 
1279 	if (rx_queue_id >= dev->data->nb_rx_queues)
1280 		return -EINVAL;
1281 
1282 	cpfl_rxq = dev->data->rx_queues[rx_queue_id];
1283 	if (cpfl_rxq->hairpin_info.hairpin_q)
1284 		err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
1285 						     rx_queue_id - cpfl_vport->nb_data_txq,
1286 						     true, false);
1287 	else
1288 		err = idpf_vc_queue_switch(vport, rx_queue_id, true, false,
1289 								VIRTCHNL2_QUEUE_TYPE_RX);
1290 	if (err != 0) {
1291 		PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
1292 			    rx_queue_id);
1293 		return err;
1294 	}
1295 
1296 	rxq = &cpfl_rxq->base;
1297 	rxq->q_started = false;
1298 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
1299 		rxq->ops->release_mbufs(rxq);
1300 		idpf_qc_single_rx_queue_reset(rxq);
1301 	} else {
1302 		rxq->bufq1->ops->release_mbufs(rxq->bufq1);
1303 		if (rxq->bufq2)
1304 			rxq->bufq2->ops->release_mbufs(rxq->bufq2);
1305 		if (cpfl_rxq->hairpin_info.hairpin_q) {
1306 			cpfl_rx_hairpin_descq_reset(rxq);
1307 			cpfl_rx_hairpin_bufq_reset(rxq->bufq1);
1308 		} else {
1309 			idpf_qc_split_rx_queue_reset(rxq);
1310 		}
1311 	}
1312 	if (!cpfl_rxq->hairpin_info.hairpin_q)
1313 		dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1314 
1315 	return 0;
1316 }
1317 
1318 int
1319 cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1320 {
1321 	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
1322 	struct idpf_vport *vport = &cpfl_vport->base;
1323 	struct cpfl_tx_queue *cpfl_txq;
1324 	struct idpf_tx_queue *txq;
1325 	int err;
1326 
1327 	if (tx_queue_id >= dev->data->nb_tx_queues)
1328 		return -EINVAL;
1329 
1330 	cpfl_txq = dev->data->tx_queues[tx_queue_id];
1331 
1332 	if (cpfl_txq->hairpin_info.hairpin_q)
1333 		err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
1334 						     tx_queue_id - cpfl_vport->nb_data_txq,
1335 						     false, false);
1336 	else
1337 		err = idpf_vc_queue_switch(vport, tx_queue_id, false, false,
1338 								VIRTCHNL2_QUEUE_TYPE_TX);
1339 	if (err != 0) {
1340 		PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
1341 			    tx_queue_id);
1342 		return err;
1343 	}
1344 
1345 	txq = &cpfl_txq->base;
1346 	txq->q_started = false;
1347 	txq->ops->release_mbufs(txq);
1348 	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
1349 		idpf_qc_single_tx_queue_reset(txq);
1350 	} else {
1351 		if (cpfl_txq->hairpin_info.hairpin_q) {
1352 			cpfl_tx_hairpin_descq_reset(txq);
1353 			cpfl_tx_hairpin_complq_reset(txq->complq);
1354 		} else {
1355 			idpf_qc_split_tx_descq_reset(txq);
1356 			idpf_qc_split_tx_complq_reset(txq->complq);
1357 		}
1358 	}
1359 
1360 	if (!cpfl_txq->hairpin_info.hairpin_q)
1361 		dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1362 
1363 	return 0;
1364 }
1365 
1366 void
1367 cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1368 {
1369 	cpfl_rx_queue_release(dev->data->rx_queues[qid]);
1370 }
1371 
1372 void
1373 cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1374 {
1375 	cpfl_tx_queue_release(dev->data->tx_queues[qid]);
1376 }
1377 
1378 void
1379 cpfl_stop_queues(struct rte_eth_dev *dev)
1380 {
1381 	struct cpfl_vport *cpfl_vport =
1382 		(struct cpfl_vport *)dev->data->dev_private;
1383 	struct cpfl_rx_queue *cpfl_rxq;
1384 	struct cpfl_tx_queue *cpfl_txq;
1385 	int i;
1386 
1387 	if (cpfl_vport->p2p_tx_complq != NULL) {
1388 		if (cpfl_switch_hairpin_complq(cpfl_vport, false) != 0)
1389 			PMD_DRV_LOG(ERR, "Failed to stop hairpin Tx complq");
1390 	}
1391 
1392 	if (cpfl_vport->p2p_rx_bufq != NULL) {
1393 		if (cpfl_switch_hairpin_bufq(cpfl_vport, false) != 0)
1394 			PMD_DRV_LOG(ERR, "Failed to stop hairpin Rx bufq");
1395 	}
1396 
1397 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1398 		cpfl_rxq = dev->data->rx_queues[i];
1399 		if (cpfl_rxq == NULL)
1400 			continue;
1401 
1402 		if (cpfl_rx_queue_stop(dev, i) != 0)
1403 			PMD_DRV_LOG(WARNING, "Fail to stop Rx queue %d", i);
1404 	}
1405 
1406 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1407 		cpfl_txq = dev->data->tx_queues[i];
1408 		if (cpfl_txq == NULL)
1409 			continue;
1410 
1411 		if (cpfl_tx_queue_stop(dev, i) != 0)
1412 			PMD_DRV_LOG(WARNING, "Fail to stop Tx queue %d", i);
1413 	}
1414 }
1415 
1416 void
1417 cpfl_set_rx_function(struct rte_eth_dev *dev)
1418 {
1419 	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
1420 	struct idpf_vport *vport = &cpfl_vport->base;
1421 #ifdef RTE_ARCH_X86
1422 	struct cpfl_rx_queue *cpfl_rxq;
1423 	int i;
1424 
1425 	if (cpfl_rx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH &&
1426 	    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
1427 		vport->rx_vec_allowed = true;
1428 
1429 		if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
1430 #ifdef CC_AVX512_SUPPORT
1431 			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
1432 			    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
1433 			    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512DQ))
1434 				vport->rx_use_avx512 = true;
1435 #else
1436 		PMD_DRV_LOG(NOTICE,
1437 			    "AVX512 is not supported in build env");
1438 #endif /* CC_AVX512_SUPPORT */
1439 	} else {
1440 		vport->rx_vec_allowed = false;
1441 	}
1442 #endif /* RTE_ARCH_X86 */
1443 
1444 #ifdef RTE_ARCH_X86
1445 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
1446 		if (vport->rx_vec_allowed) {
1447 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
1448 				cpfl_rxq = dev->data->rx_queues[i];
1449 				if (cpfl_rxq->hairpin_info.hairpin_q)
1450 					continue;
1451 				(void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
1452 			}
1453 #ifdef CC_AVX512_SUPPORT
1454 			if (vport->rx_use_avx512) {
1455 				PMD_DRV_LOG(NOTICE,
1456 					    "Using Split AVX512 Vector Rx (port %d).",
1457 					    dev->data->port_id);
1458 				dev->rx_pkt_burst = idpf_dp_splitq_recv_pkts_avx512;
1459 				return;
1460 			}
1461 #endif /* CC_AVX512_SUPPORT */
1462 		}
1463 		PMD_DRV_LOG(NOTICE,
1464 			    "Using Split Scalar Rx (port %d).",
1465 			    dev->data->port_id);
1466 		dev->rx_pkt_burst = idpf_dp_splitq_recv_pkts;
1467 	} else {
1468 		if (vport->rx_vec_allowed) {
1469 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
1470 				cpfl_rxq = dev->data->rx_queues[i];
1471 				(void)idpf_qc_singleq_rx_vec_setup(&cpfl_rxq->base);
1472 			}
1473 #ifdef CC_AVX512_SUPPORT
1474 			if (vport->rx_use_avx512) {
1475 				PMD_DRV_LOG(NOTICE,
1476 					    "Using Single AVX512 Vector Rx (port %d).",
1477 					    dev->data->port_id);
1478 				dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts_avx512;
1479 				return;
1480 			}
1481 #endif /* CC_AVX512_SUPPORT */
1482 		}
1483 		if (dev->data->scattered_rx) {
1484 			PMD_DRV_LOG(NOTICE,
1485 				    "Using Single Scalar Scatterd Rx (port %d).",
1486 				    dev->data->port_id);
1487 			dev->rx_pkt_burst = idpf_dp_singleq_recv_scatter_pkts;
1488 			return;
1489 		}
1490 		PMD_DRV_LOG(NOTICE,
1491 			    "Using Single Scalar Rx (port %d).",
1492 			    dev->data->port_id);
1493 		dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts;
1494 	}
1495 #else
1496 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
1497 		PMD_DRV_LOG(NOTICE,
1498 			    "Using Split Scalar Rx (port %d).",
1499 			    dev->data->port_id);
1500 		dev->rx_pkt_burst = idpf_dp_splitq_recv_pkts;
1501 	} else {
1502 		if (dev->data->scattered_rx) {
1503 			PMD_DRV_LOG(NOTICE,
1504 				    "Using Single Scalar Scatterd Rx (port %d).",
1505 				    dev->data->port_id);
1506 			dev->rx_pkt_burst = idpf_dp_singleq_recv_scatter_pkts;
1507 			return;
1508 		}
1509 		PMD_DRV_LOG(NOTICE,
1510 			    "Using Single Scalar Rx (port %d).",
1511 			    dev->data->port_id);
1512 		dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts;
1513 	}
1514 #endif /* RTE_ARCH_X86 */
1515 }
1516 
1517 void
1518 cpfl_set_tx_function(struct rte_eth_dev *dev)
1519 {
1520 	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
1521 	struct idpf_vport *vport = &cpfl_vport->base;
1522 #ifdef RTE_ARCH_X86
1523 #ifdef CC_AVX512_SUPPORT
1524 	struct cpfl_tx_queue *cpfl_txq;
1525 	int i;
1526 #endif /* CC_AVX512_SUPPORT */
1527 
1528 	if (cpfl_tx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH &&
1529 	    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
1530 		vport->tx_vec_allowed = true;
1531 		if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
1532 #ifdef CC_AVX512_SUPPORT
1533 		{
1534 			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
1535 			    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
1536 				vport->tx_use_avx512 = true;
1537 			if (vport->tx_use_avx512) {
1538 				for (i = 0; i < dev->data->nb_tx_queues; i++) {
1539 					cpfl_txq = dev->data->tx_queues[i];
1540 					idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
1541 				}
1542 			}
1543 		}
1544 #else
1545 		PMD_DRV_LOG(NOTICE,
1546 			    "AVX512 is not supported in build env");
1547 #endif /* CC_AVX512_SUPPORT */
1548 	} else {
1549 		vport->tx_vec_allowed = false;
1550 	}
1551 #endif /* RTE_ARCH_X86 */
1552 
1553 #ifdef RTE_ARCH_X86
1554 	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
1555 		if (vport->tx_vec_allowed) {
1556 #ifdef CC_AVX512_SUPPORT
1557 			if (vport->tx_use_avx512) {
1558 				PMD_DRV_LOG(NOTICE,
1559 					    "Using Split AVX512 Vector Tx (port %d).",
1560 					    dev->data->port_id);
1561 				dev->tx_pkt_burst = idpf_dp_splitq_xmit_pkts_avx512;
1562 				dev->tx_pkt_prepare = idpf_dp_prep_pkts;
1563 				return;
1564 			}
1565 #endif /* CC_AVX512_SUPPORT */
1566 		}
1567 		PMD_DRV_LOG(NOTICE,
1568 			    "Using Split Scalar Tx (port %d).",
1569 			    dev->data->port_id);
1570 		dev->tx_pkt_burst = idpf_dp_splitq_xmit_pkts;
1571 		dev->tx_pkt_prepare = idpf_dp_prep_pkts;
1572 	} else {
1573 		if (vport->tx_vec_allowed) {
1574 #ifdef CC_AVX512_SUPPORT
1575 			if (vport->tx_use_avx512) {
1576 				for (i = 0; i < dev->data->nb_tx_queues; i++) {
1577 					cpfl_txq = dev->data->tx_queues[i];
1578 					if (cpfl_txq == NULL)
1579 						continue;
1580 					idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
1581 				}
1582 				PMD_DRV_LOG(NOTICE,
1583 					    "Using Single AVX512 Vector Tx (port %d).",
1584 					    dev->data->port_id);
1585 				dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts_avx512;
1586 				dev->tx_pkt_prepare = idpf_dp_prep_pkts;
1587 				return;
1588 			}
1589 #endif /* CC_AVX512_SUPPORT */
1590 		}
1591 		PMD_DRV_LOG(NOTICE,
1592 			    "Using Single Scalar Tx (port %d).",
1593 			    dev->data->port_id);
1594 		dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts;
1595 		dev->tx_pkt_prepare = idpf_dp_prep_pkts;
1596 	}
1597 #else
1598 	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
1599 		PMD_DRV_LOG(NOTICE,
1600 			    "Using Split Scalar Tx (port %d).",
1601 			    dev->data->port_id);
1602 		dev->tx_pkt_burst = idpf_dp_splitq_xmit_pkts;
1603 		dev->tx_pkt_prepare = idpf_dp_prep_pkts;
1604 	} else {
1605 		PMD_DRV_LOG(NOTICE,
1606 			    "Using Single Scalar Tx (port %d).",
1607 			    dev->data->port_id);
1608 		dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts;
1609 		dev->tx_pkt_prepare = idpf_dp_prep_pkts;
1610 	}
1611 #endif /* RTE_ARCH_X86 */
1612 }
1613