xref: /dpdk/drivers/net/cnxk/cnxk_ethdev_dp.h (revision 1bf0fd7e4f5357238e68ab2f53892aad3ba53748)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022 Marvell.
3  */
4 #ifndef __CNXK_ETHDEV_DP_H__
5 #define __CNXK_ETHDEV_DP_H__
6 
7 #include <rte_security_driver.h>
8 #include <rte_mbuf.h>
9 
10 /* If PTP is enabled additional SEND MEM DESC is required which
11  * takes 2 words, hence max 7 iova address are possible
12  */
13 #if defined(RTE_LIBRTE_IEEE1588)
14 #define CNXK_NIX_TX_NB_SEG_MAX 7
15 #else
16 #define CNXK_NIX_TX_NB_SEG_MAX 9
17 #endif
18 
19 #define CNXK_NIX_TX_MSEG_SG_DWORDS                                             \
20 	((RTE_ALIGN_MUL_CEIL(CNXK_NIX_TX_NB_SEG_MAX, 3) / 3) +                 \
21 	 CNXK_NIX_TX_NB_SEG_MAX)
22 
23 /* Default mark value used when none is provided. */
24 #define CNXK_FLOW_ACTION_FLAG_DEFAULT 0xffff
25 #define CNXK_NIX_TIMESYNC_RX_OFFSET 8
26 
27 #define PTYPE_NON_TUNNEL_WIDTH	  16
28 #define PTYPE_TUNNEL_WIDTH	  12
29 #define PTYPE_NON_TUNNEL_ARRAY_SZ BIT(PTYPE_NON_TUNNEL_WIDTH)
30 #define PTYPE_TUNNEL_ARRAY_SZ	  BIT(PTYPE_TUNNEL_WIDTH)
31 #define PTYPE_ARRAY_SZ                                                         \
32 	((PTYPE_NON_TUNNEL_ARRAY_SZ + PTYPE_TUNNEL_ARRAY_SZ) * sizeof(uint16_t))
33 
34 /* NIX_RX_PARSE_S's ERRCODE + ERRLEV (12 bits) */
35 #define ERRCODE_ERRLEN_WIDTH 12
36 #define ERR_ARRAY_SZ	     ((BIT(ERRCODE_ERRLEN_WIDTH)) * sizeof(uint32_t))
37 
38 #define SA_BASE_TBL_SZ	(RTE_MAX_ETHPORTS * sizeof(uintptr_t))
39 #define MEMPOOL_TBL_SZ	(RTE_MAX_ETHPORTS * sizeof(uintptr_t))
40 
41 #define CNXK_NIX_UDP_TUN_BITMASK                                               \
42 	((1ull << (RTE_MBUF_F_TX_TUNNEL_VXLAN >> 45)) |                               \
43 	 (1ull << (RTE_MBUF_F_TX_TUNNEL_GENEVE >> 45)))
44 
45 /* Subtype from inline outbound error event */
46 #define CNXK_ETHDEV_SEC_OUTB_EV_SUB 0xFFUL
47 
48 /* SPI will be in 20 bits of tag */
49 #define CNXK_ETHDEV_SPI_TAG_MASK 0xFFFFFUL
50 
51 #define CNXK_NIX_PFC_CHAN_COUNT 16
52 
53 #define CNXK_TM_MARK_VLAN_DEI BIT_ULL(0)
54 #define CNXK_TM_MARK_IP_DSCP  BIT_ULL(1)
55 #define CNXK_TM_MARK_IP_ECN   BIT_ULL(2)
56 
57 #define CNXK_TM_MARK_MASK                                                      \
58 	(CNXK_TM_MARK_VLAN_DEI | CNXK_TM_MARK_IP_DSCP | CNXK_TM_MARK_IP_ECN)
59 
60 #define CNXK_TX_MARK_FMT_MASK (0xFFFFFFFFFFFFull)
61 
62 #define CNXK_NIX_CQ_ENTRY_SZ 128
63 #define CQE_SZ(x)            ((x) * CNXK_NIX_CQ_ENTRY_SZ)
64 
65 struct cnxk_eth_txq_comp {
66 	uintptr_t desc_base;
67 	uintptr_t cq_door;
68 	int64_t *cq_status;
69 	uint64_t wdata;
70 	uint32_t head;
71 	uint32_t qmask;
72 	uint32_t nb_desc_mask;
73 	uint32_t available;
74 	uint32_t sqe_id;
75 	bool ena;
76 	struct rte_mbuf **ptr;
77 	rte_spinlock_t ext_buf_lock;
78 };
79 
80 struct cnxk_timesync_info {
81 	uint8_t rx_ready;
82 	uint64_t rx_tstamp;
83 	uint64_t rx_tstamp_dynflag;
84 	int tstamp_dynfield_offset;
85 	rte_iova_t tx_tstamp_iova;
86 	uint64_t *tx_tstamp;
87 } __plt_cache_aligned;
88 
89 struct cnxk_ethdev_inj_cfg {
90 	uintptr_t lmt_base;
91 	uint64_t io_addr;
92 	uint64_t sa_base;
93 	uint64_t cmd_w0;
94 } __plt_cache_aligned;
95 
96 /* Inlines */
97 static __rte_always_inline uint64_t
98 cnxk_pktmbuf_detach(struct rte_mbuf *m, uint64_t *aura)
99 {
100 	struct rte_mempool *mp = m->pool;
101 	uint32_t mbuf_size, buf_len;
102 	struct rte_mbuf *md;
103 	uint16_t priv_size;
104 	uint16_t refcount;
105 
106 	/* Update refcount of direct mbuf */
107 	md = rte_mbuf_from_indirect(m);
108 	if (aura)
109 		*aura = roc_npa_aura_handle_to_aura(md->pool->pool_id);
110 	refcount = rte_mbuf_refcnt_update(md, -1);
111 
112 	priv_size = rte_pktmbuf_priv_size(mp);
113 	mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
114 	buf_len = rte_pktmbuf_data_room_size(mp);
115 
116 	m->priv_size = priv_size;
117 	m->buf_addr = (char *)m + mbuf_size;
118 	rte_mbuf_iova_set(m, rte_mempool_virt2iova(m) + mbuf_size);
119 	m->buf_len = (uint16_t)buf_len;
120 	rte_pktmbuf_reset_headroom(m);
121 	m->data_len = 0;
122 	m->ol_flags = 0;
123 	m->next = NULL;
124 	m->nb_segs = 1;
125 
126 	/* Now indirect mbuf is safe to free */
127 	rte_pktmbuf_free(m);
128 
129 	if (refcount == 0) {
130 		rte_mbuf_refcnt_set(md, 1);
131 		md->data_len = 0;
132 		md->ol_flags = 0;
133 		md->next = NULL;
134 		md->nb_segs = 1;
135 		return 0;
136 	} else {
137 		return 1;
138 	}
139 }
140 
141 static __rte_always_inline uint64_t
142 cnxk_nix_prefree_seg(struct rte_mbuf *m, uint64_t *aura)
143 {
144 	if (likely(rte_mbuf_refcnt_read(m) == 1)) {
145 		if (!RTE_MBUF_DIRECT(m))
146 			return cnxk_pktmbuf_detach(m, aura);
147 
148 		m->next = NULL;
149 		m->nb_segs = 1;
150 		return 0;
151 	} else if (rte_mbuf_refcnt_update(m, -1) == 0) {
152 		if (!RTE_MBUF_DIRECT(m))
153 			return cnxk_pktmbuf_detach(m, aura);
154 
155 		rte_mbuf_refcnt_set(m, 1);
156 		m->next = NULL;
157 		m->nb_segs = 1;
158 		return 0;
159 	}
160 
161 	/* Mbuf is having refcount more than 1 so need not to be freed */
162 	return 1;
163 }
164 
165 static inline rte_mbuf_timestamp_t *
166 cnxk_nix_timestamp_dynfield(struct rte_mbuf *mbuf,
167 			    struct cnxk_timesync_info *info)
168 {
169 	return RTE_MBUF_DYNFIELD(mbuf, info->tstamp_dynfield_offset,
170 				 rte_mbuf_timestamp_t *);
171 }
172 
173 static __rte_always_inline uintptr_t
174 cnxk_nix_sa_base_get(uint16_t port, const void *lookup_mem)
175 {
176 	uintptr_t sa_base_tbl;
177 
178 	sa_base_tbl = (uintptr_t)lookup_mem;
179 	sa_base_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ;
180 	return *((const uintptr_t *)sa_base_tbl + port);
181 }
182 
183 static __rte_always_inline uintptr_t
184 cnxk_nix_inl_metapool_get(uint16_t port, const void *lookup_mem)
185 {
186 	uintptr_t metapool_tbl;
187 
188 	metapool_tbl = (uintptr_t)lookup_mem;
189 	metapool_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_BASE_TBL_SZ;
190 	return *((const uintptr_t *)metapool_tbl + port);
191 }
192 
193 #endif /* __CNXK_ETHDEV_DP_H__ */
194