xref: /dpdk/drivers/net/intel/cpfl/cpfl_rxtx_vec_common.h (revision c1d145834f287aa8cf53de914618a7312f2c360e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2023 Intel Corporation
3  */
4 
5 #ifndef _CPFL_RXTX_VEC_COMMON_H_
6 #define _CPFL_RXTX_VEC_COMMON_H_
7 #include <stdint.h>
8 #include <ethdev_driver.h>
9 #include <rte_malloc.h>
10 
11 #include "cpfl_ethdev.h"
12 #include "cpfl_rxtx.h"
13 
14 #define CPFL_SCALAR_PATH		0
15 #define CPFL_VECTOR_PATH		1
16 #define CPFL_RX_NO_VECTOR_FLAGS (		\
17 		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |	\
18 		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |	\
19 		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |	\
20 		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |	\
21 		RTE_ETH_RX_OFFLOAD_TIMESTAMP)
22 #define CPFL_TX_NO_VECTOR_FLAGS (		\
23 		RTE_ETH_TX_OFFLOAD_TCP_TSO |	\
24 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |	\
25 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |		\
26 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |		\
27 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |	\
28 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
29 
30 static inline int
31 cpfl_rx_vec_queue_default(struct idpf_rx_queue *rxq)
32 {
33 	if (rxq == NULL)
34 		return CPFL_SCALAR_PATH;
35 
36 	if (rte_is_power_of_2(rxq->nb_rx_desc) == 0)
37 		return CPFL_SCALAR_PATH;
38 
39 	if (rxq->rx_free_thresh < IDPF_VPMD_RX_MAX_BURST)
40 		return CPFL_SCALAR_PATH;
41 
42 	if ((rxq->nb_rx_desc % rxq->rx_free_thresh) != 0)
43 		return CPFL_SCALAR_PATH;
44 
45 	if ((rxq->offloads & CPFL_RX_NO_VECTOR_FLAGS) != 0)
46 		return CPFL_SCALAR_PATH;
47 
48 	return CPFL_VECTOR_PATH;
49 }
50 
51 static inline int
52 cpfl_tx_vec_queue_default(struct idpf_tx_queue *txq)
53 {
54 	if (txq == NULL)
55 		return CPFL_SCALAR_PATH;
56 
57 	if (txq->rs_thresh < IDPF_VPMD_TX_MAX_BURST ||
58 	    (txq->rs_thresh & 3) != 0)
59 		return CPFL_SCALAR_PATH;
60 
61 	if ((txq->offloads & CPFL_TX_NO_VECTOR_FLAGS) != 0)
62 		return CPFL_SCALAR_PATH;
63 
64 	return CPFL_VECTOR_PATH;
65 }
66 
67 static inline int
68 cpfl_rx_splitq_vec_default(struct idpf_rx_queue *rxq)
69 {
70 	if (rxq->bufq2->rx_buf_len < rxq->max_pkt_len)
71 		return CPFL_SCALAR_PATH;
72 
73 	return CPFL_VECTOR_PATH;
74 }
75 
76 static inline int
77 cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
78 {
79 	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
80 	struct idpf_vport *vport = &cpfl_vport->base;
81 	struct cpfl_rx_queue *cpfl_rxq;
82 	int i, default_ret, splitq_ret, ret = CPFL_SCALAR_PATH;
83 
84 	if (dev->data->scattered_rx)
85 		return CPFL_SCALAR_PATH;
86 
87 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
88 		cpfl_rxq = dev->data->rx_queues[i];
89 		default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
90 		if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
91 			if (cpfl_rxq->hairpin_info.hairpin_q)
92 				continue;
93 			splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
94 			ret = splitq_ret && default_ret;
95 		} else {
96 			ret = default_ret;
97 		}
98 		if (ret == CPFL_SCALAR_PATH)
99 			return CPFL_SCALAR_PATH;
100 	}
101 
102 	return CPFL_VECTOR_PATH;
103 }
104 
105 static inline int
106 cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
107 {
108 	int i;
109 	struct cpfl_tx_queue *cpfl_txq;
110 	int ret = 0;
111 
112 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
113 		cpfl_txq = dev->data->tx_queues[i];
114 		if (cpfl_txq->hairpin_info.hairpin_q)
115 			continue;
116 		ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
117 		if (ret == CPFL_SCALAR_PATH)
118 			return CPFL_SCALAR_PATH;
119 	}
120 
121 	return CPFL_VECTOR_PATH;
122 }
123 
124 #endif /*_CPFL_RXTX_VEC_COMMON_H_*/
125