1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
3 */
4
5 #ifndef __OCTEONTX_RXTX_H__
6 #define __OCTEONTX_RXTX_H__
7
8 #include <ethdev_driver.h>
9
10 #define OFFLOAD_FLAGS \
11 uint16_t rx_offload_flags; \
12 uint16_t tx_offload_flags
13
14 #define BIT(nr) (1UL << (nr))
15
16 #define OCCTX_RX_OFFLOAD_NONE (0)
17 #define OCCTX_RX_MULTI_SEG_F BIT(0)
18 #define OCCTX_RX_OFFLOAD_CSUM_F BIT(1)
19 #define OCCTX_RX_VLAN_FLTR_F BIT(2)
20
21 #define OCCTX_TX_OFFLOAD_NONE (0)
22 #define OCCTX_TX_MULTI_SEG_F BIT(0)
23 #define OCCTX_TX_OFFLOAD_L3_L4_CSUM_F BIT(1)
24 #define OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(2)
25 #define OCCTX_TX_OFFLOAD_MBUF_NOFF_F BIT(3)
26
27 /* Packet type table */
28 #define PTYPE_SIZE OCCTX_PKI_LTYPE_LAST
29
30 /* octeontx send header sub descriptor structure */
31 union octeontx_send_hdr_w0_u {
32 uint64_t u;
33 struct {
34 uint64_t total : 16;
35 uint64_t markptr : 8;
36 uint64_t l3ptr : 8;
37 uint64_t l4ptr : 8;
38 uint64_t ii : 1;
39 uint64_t shp_dis : 1;
40 uint64_t ckle : 1;
41 uint64_t cklf : 2;
42 uint64_t ckl3 : 1;
43 uint64_t ckl4 : 2;
44 uint64_t p : 1;
45 uint64_t format : 7;
46 uint64_t tstamp : 1;
47 uint64_t tso_eom : 1;
48 uint64_t df : 1;
49 uint64_t tso : 1;
50 uint64_t n2 : 1;
51 uint64_t scntn1 : 3;
52 };
53 };
54
55 union octeontx_send_hdr_w1_u {
56 uint64_t u;
57 struct {
58 uint64_t tso_mss : 14;
59 uint64_t shp_ra : 2;
60 uint64_t tso_sb : 8;
61 uint64_t leptr : 8;
62 uint64_t lfptr : 8;
63 uint64_t shp_chg : 9;
64 uint64_t tso_fn : 7;
65 uint64_t l2len : 8;
66 };
67 };
68
69 struct octeontx_send_hdr_s {
70 union octeontx_send_hdr_w0_u w0;
71 union octeontx_send_hdr_w1_u w1;
72 };
73
74 static const alignas(RTE_CACHE_LINE_SIZE) uint32_t
75 ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
76 [LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
77 [LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
78 [LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
79 [LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
80 [LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
81 [LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
82 [LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
83 [LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
84 [LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
85 [LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
86
87 [LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
88 [LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
89 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
90 [LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
91 [LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
92 [LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
93 [LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
94 [LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
95 [LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
96 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
97 [LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
98 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
99 [LC_IPV4][LE_NONE][LF_NVGRE] =
100 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
101
102 [LC_IPV4_OPT][LE_NONE][LF_NONE] =
103 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
104 [LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
105 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
106 [LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
107 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
108 [LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
109 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
110 [LC_IPV4_OPT][LE_NONE][LF_TCP] =
111 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
112 [LC_IPV4_OPT][LE_NONE][LF_UDP] =
113 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
114 [LC_IPV4_OPT][LE_NONE][LF_GRE] =
115 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
116 [LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
117 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
118 [LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
119 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
120 [LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
121 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
122
123 [LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
124 [LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
125 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
126 [LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
127 [LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
128 [LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
129 [LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
130 [LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
131 [LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
132 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
133 [LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
134 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
135 [LC_IPV6][LE_NONE][LF_NVGRE] =
136 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
137 [LC_IPV6_OPT][LE_NONE][LF_NONE] =
138 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
139 [LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
140 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
141 [LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
142 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
143 [LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
144 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
145 [LC_IPV6_OPT][LE_NONE][LF_TCP] =
146 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
147 [LC_IPV6_OPT][LE_NONE][LF_UDP] =
148 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
149 [LC_IPV6_OPT][LE_NONE][LF_GRE] =
150 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
151 [LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
152 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
153 [LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
154 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
155 [LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
156 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
157
158 };
159
160
161 static __rte_always_inline uint64_t
octeontx_pktmbuf_detach(struct rte_mbuf * m,struct rte_mbuf ** m_tofree)162 octeontx_pktmbuf_detach(struct rte_mbuf *m, struct rte_mbuf **m_tofree)
163 {
164 struct rte_mempool *mp = m->pool;
165 uint32_t mbuf_size, buf_len;
166 struct rte_mbuf *md;
167 uint16_t priv_size;
168 uint16_t refcount;
169
170 /* Update refcount of direct mbuf */
171 md = rte_mbuf_from_indirect(m);
172 /* The real data will be in the direct buffer, inform callers this */
173 *m_tofree = md;
174 refcount = rte_mbuf_refcnt_update(md, -1);
175
176 priv_size = rte_pktmbuf_priv_size(mp);
177 mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
178 buf_len = rte_pktmbuf_data_room_size(mp);
179
180 m->priv_size = priv_size;
181 m->buf_addr = (char *)m + mbuf_size;
182 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
183 m->buf_len = (uint16_t)buf_len;
184 rte_pktmbuf_reset_headroom(m);
185 m->data_len = 0;
186 m->ol_flags = 0;
187 m->next = NULL;
188 m->nb_segs = 1;
189
190 /* Now indirect mbuf is safe to free */
191 rte_pktmbuf_free(m);
192
193 if (refcount == 0) {
194 rte_mbuf_refcnt_set(md, 1);
195 md->data_len = 0;
196 md->ol_flags = 0;
197 md->next = NULL;
198 md->nb_segs = 1;
199 return 0;
200 } else {
201 return 1;
202 }
203 }
204
205 static __rte_always_inline uint64_t
octeontx_prefree_seg(struct rte_mbuf * m,struct rte_mbuf ** m_tofree)206 octeontx_prefree_seg(struct rte_mbuf *m, struct rte_mbuf **m_tofree)
207 {
208 if (likely(rte_mbuf_refcnt_read(m) == 1)) {
209 if (!RTE_MBUF_DIRECT(m))
210 return octeontx_pktmbuf_detach(m, m_tofree);
211
212 m->next = NULL;
213 m->nb_segs = 1;
214 return 0;
215 } else if (rte_mbuf_refcnt_update(m, -1) == 0) {
216 if (!RTE_MBUF_DIRECT(m))
217 return octeontx_pktmbuf_detach(m, m_tofree);
218
219 rte_mbuf_refcnt_set(m, 1);
220 m->next = NULL;
221 m->nb_segs = 1;
222 return 0;
223 }
224
225 /* Mbuf is having refcount more than 1 so need not to be freed */
226 return 1;
227 }
228
229 static __rte_always_inline void
octeontx_tx_checksum_offload(uint64_t * cmd_buf,const uint16_t flags,struct rte_mbuf * m)230 octeontx_tx_checksum_offload(uint64_t *cmd_buf, const uint16_t flags,
231 struct rte_mbuf *m)
232 {
233 struct octeontx_send_hdr_s *send_hdr =
234 (struct octeontx_send_hdr_s *)cmd_buf;
235 uint64_t ol_flags = m->ol_flags;
236
237 /* PKO Checksum L4 Algorithm Enumeration
238 * 0x0 - No checksum
239 * 0x1 - UDP L4 checksum
240 * 0x2 - TCP L4 checksum
241 * 0x3 - SCTP L4 checksum
242 */
243 const uint8_t csum = (!(((ol_flags ^ RTE_MBUF_F_TX_UDP_CKSUM) >> 52) & 0x3) +
244 (!(((ol_flags ^ RTE_MBUF_F_TX_TCP_CKSUM) >> 52) & 0x3) * 2) +
245 (!(((ol_flags ^ RTE_MBUF_F_TX_SCTP_CKSUM) >> 52) & 0x3) * 3));
246
247 const uint8_t is_tunnel_parsed = (!!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GTP) ||
248 !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE) ||
249 !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN) ||
250 !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GRE) ||
251 !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GENEVE) ||
252 !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_IP) ||
253 !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_IPIP));
254
255 const uint8_t csum_outer = (!!(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) ||
256 !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_UDP));
257 const uint8_t outer_l2_len = m->outer_l2_len;
258 const uint8_t l2_len = m->l2_len;
259
260 if ((flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
261 (flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)) {
262 if (is_tunnel_parsed) {
263 /* Outer L3 */
264 send_hdr->w0.l3ptr = outer_l2_len;
265 send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len;
266 /* Set clk3 for PKO to calculate IPV4 header checksum */
267 send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4);
268
269 /* Outer L4 */
270 send_hdr->w0.ckl4 = csum_outer;
271
272 /* Inner L3 */
273 send_hdr->w1.leptr = send_hdr->w0.l4ptr + l2_len;
274 send_hdr->w1.lfptr = send_hdr->w1.leptr + m->l3_len;
275 /* Set clke for PKO to calculate inner IPV4 header
276 * checksum.
277 */
278 send_hdr->w0.ckle = !!(ol_flags & RTE_MBUF_F_TX_IPV4);
279
280 /* Inner L4 */
281 send_hdr->w0.cklf = csum;
282 } else {
283 /* Inner L3 */
284 send_hdr->w0.l3ptr = l2_len;
285 send_hdr->w0.l4ptr = l2_len + m->l3_len;
286 /* Set clk3 for PKO to calculate IPV4 header checksum */
287 send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_IPV4);
288
289 /* Inner L4 */
290 send_hdr->w0.ckl4 = csum;
291 }
292 } else if (flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) {
293 /* Outer L3 */
294 send_hdr->w0.l3ptr = outer_l2_len;
295 send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len;
296 /* Set clk3 for PKO to calculate IPV4 header checksum */
297 send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4);
298
299 /* Outer L4 */
300 send_hdr->w0.ckl4 = csum_outer;
301 } else if (flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F) {
302 /* Inner L3 */
303 send_hdr->w0.l3ptr = l2_len;
304 send_hdr->w0.l4ptr = l2_len + m->l3_len;
305 /* Set clk3 for PKO to calculate IPV4 header checksum */
306 send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_IPV4);
307
308 /* Inner L4 */
309 send_hdr->w0.ckl4 = csum;
310 }
311 }
312
313 static __rte_always_inline uint16_t
__octeontx_xmit_prepare(struct rte_mbuf * tx_pkt,uint64_t * cmd_buf,const uint16_t flag)314 __octeontx_xmit_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
315 const uint16_t flag)
316 {
317 uint16_t gaura_id, nb_desc = 0;
318 struct rte_mbuf *m_tofree;
319 rte_iova_t iova;
320 uint16_t data_len;
321
322 m_tofree = tx_pkt;
323
324 data_len = tx_pkt->data_len;
325 iova = rte_mbuf_data_iova(tx_pkt);
326
327 /* Setup PKO_SEND_HDR_S */
328 cmd_buf[nb_desc++] = tx_pkt->data_len & 0xffff;
329 cmd_buf[nb_desc++] = 0x0;
330
331 /* Enable tx checksum offload */
332 if ((flag & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) ||
333 (flag & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F))
334 octeontx_tx_checksum_offload(cmd_buf, flag, tx_pkt);
335
336 /* SEND_HDR[DF] bit controls if buffer is to be freed or
337 * not, as SG_DESC[I] and SEND_HDR[II] are clear.
338 */
339 if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)
340 cmd_buf[0] |= (octeontx_prefree_seg(tx_pkt, &m_tofree) <<
341 58);
342
343 /* Mark mempool object as "put" since it is freed by PKO */
344 if (!(cmd_buf[0] & (1ULL << 58)))
345 RTE_MEMPOOL_CHECK_COOKIES(m_tofree->pool, (void **)&m_tofree,
346 1, 0);
347 /* Get the gaura Id */
348 gaura_id =
349 octeontx_fpa_bufpool_gaura((uintptr_t)m_tofree->pool->pool_id);
350
351 /* Setup PKO_SEND_BUFLINK_S */
352 cmd_buf[nb_desc++] = PKO_SEND_BUFLINK_SUBDC |
353 PKO_SEND_BUFLINK_LDTYPE(0x1ull) |
354 PKO_SEND_BUFLINK_GAUAR((long)gaura_id) |
355 data_len;
356 cmd_buf[nb_desc++] = iova;
357
358 return nb_desc;
359 }
360
361 static __rte_always_inline uint16_t
__octeontx_xmit_mseg_prepare(struct rte_mbuf * tx_pkt,uint64_t * cmd_buf,const uint16_t flag)362 __octeontx_xmit_mseg_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
363 const uint16_t flag)
364 {
365 uint16_t nb_segs, nb_desc = 0;
366 uint16_t gaura_id;
367 struct rte_mbuf *m_next = NULL, *m_tofree;
368 rte_iova_t iova;
369 uint16_t data_len;
370
371 nb_segs = tx_pkt->nb_segs;
372 /* Setup PKO_SEND_HDR_S */
373 cmd_buf[nb_desc++] = tx_pkt->pkt_len & 0xffff;
374 cmd_buf[nb_desc++] = 0x0;
375
376 /* Enable tx checksum offload */
377 if ((flag & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) ||
378 (flag & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F))
379 octeontx_tx_checksum_offload(cmd_buf, flag, tx_pkt);
380
381 do {
382 m_next = tx_pkt->next;
383 /* Get TX parameters up front, octeontx_prefree_seg might change
384 * them
385 */
386 m_tofree = tx_pkt;
387 data_len = tx_pkt->data_len;
388 iova = rte_mbuf_data_iova(tx_pkt);
389
390 /* Setup PKO_SEND_GATHER_S */
391 cmd_buf[nb_desc] = 0;
392
393 /* SG_DESC[I] bit controls if buffer is to be freed or
394 * not, as SEND_HDR[DF] and SEND_HDR[II] are clear.
395 */
396 if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F) {
397 cmd_buf[nb_desc] |=
398 (octeontx_prefree_seg(tx_pkt, &m_tofree) << 57);
399 }
400
401 /* To handle case where mbufs belong to diff pools, like
402 * fragmentation
403 */
404 gaura_id = octeontx_fpa_bufpool_gaura((uintptr_t)
405 m_tofree->pool->pool_id);
406
407 /* Setup PKO_SEND_GATHER_S */
408 cmd_buf[nb_desc] |= PKO_SEND_GATHER_SUBDC |
409 PKO_SEND_GATHER_LDTYPE(0x1ull) |
410 PKO_SEND_GATHER_GAUAR((long)gaura_id) |
411 data_len;
412
413 /* Mark mempool object as "put" since it is freed by
414 * PKO.
415 */
416 if (!(cmd_buf[nb_desc] & (1ULL << 57))) {
417 tx_pkt->next = NULL;
418 RTE_MEMPOOL_CHECK_COOKIES(m_tofree->pool,
419 (void **)&m_tofree, 1, 0);
420 }
421 nb_desc++;
422
423 cmd_buf[nb_desc++] = iova;
424
425 nb_segs--;
426 tx_pkt = m_next;
427 } while (nb_segs);
428
429 return nb_desc;
430 }
431
432 static __rte_always_inline uint16_t
__octeontx_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts,uint64_t * cmd_buf,const uint16_t flags)433 __octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
434 uint16_t nb_pkts, uint64_t *cmd_buf,
435 const uint16_t flags)
436 {
437 struct octeontx_txq *txq = tx_queue;
438 octeontx_dq_t *dq = &txq->dq;
439 uint16_t count = 0, nb_desc;
440 rte_io_wmb();
441
442 while (count < nb_pkts) {
443 if (unlikely(*((volatile int64_t *)dq->fc_status_va) < 0))
444 break;
445
446 if (flags & OCCTX_TX_MULTI_SEG_F) {
447 nb_desc = __octeontx_xmit_mseg_prepare(tx_pkts[count],
448 cmd_buf, flags);
449 } else {
450 nb_desc = __octeontx_xmit_prepare(tx_pkts[count],
451 cmd_buf, flags);
452 }
453
454 octeontx_reg_lmtst(dq->lmtline_va, dq->ioreg_va, cmd_buf,
455 nb_desc);
456
457 count++;
458 }
459 return count;
460 }
461
462 uint16_t
463 octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
464
465 #define L3L4CSUM_F OCCTX_TX_OFFLOAD_L3_L4_CSUM_F
466 #define OL3OL4CSUM_F OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F
467 #define NOFF_F OCCTX_TX_OFFLOAD_MBUF_NOFF_F
468 #define MULT_F OCCTX_TX_MULTI_SEG_F
469
470 /* [L3L4CSUM_F] [OL3OL4CSUM_F] [NOFF] [MULTI_SEG] */
471 #define OCCTX_TX_FASTPATH_MODES \
472 T(no_offload, 0, 0, 0, 0, 4, \
473 OCCTX_TX_OFFLOAD_NONE) \
474 T(mseg, 0, 0, 0, 1, 14, \
475 MULT_F) \
476 T(l3l4csum, 0, 0, 1, 0, 4, \
477 L3L4CSUM_F) \
478 T(l3l4csum_mseg, 0, 0, 1, 1, 14, \
479 L3L4CSUM_F | MULT_F) \
480 T(ol3ol4csum, 0, 1, 0, 0, 4, \
481 OL3OL4CSUM_F) \
482 T(ol3l4csum_mseg, 0, 1, 0, 1, 14, \
483 OL3OL4CSUM_F | MULT_F) \
484 T(ol3l4csum_l3l4csum, 0, 1, 1, 0, 4, \
485 OL3OL4CSUM_F | L3L4CSUM_F) \
486 T(ol3l4csum_l3l4csum_mseg, 0, 1, 1, 1, 14, \
487 OL3OL4CSUM_F | L3L4CSUM_F | MULT_F) \
488 T(noff, 1, 0, 0, 0, 4, \
489 NOFF_F) \
490 T(noff_mseg, 1, 0, 0, 1, 14, \
491 NOFF_F | MULT_F) \
492 T(noff_l3l4csum, 1, 0, 1, 0, 4, \
493 NOFF_F | L3L4CSUM_F) \
494 T(noff_l3l4csum_mseg, 1, 0, 1, 1, 14, \
495 NOFF_F | L3L4CSUM_F | MULT_F) \
496 T(noff_ol3ol4csum, 1, 1, 0, 0, 4, \
497 NOFF_F | OL3OL4CSUM_F) \
498 T(noff_ol3ol4csum_mseg, 1, 1, 0, 1, 14, \
499 NOFF_F | OL3OL4CSUM_F | MULT_F) \
500 T(noff_ol3ol4csum_l3l4csum, 1, 1, 1, 0, 4, \
501 NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
502 T(noff_ol3ol4csum_l3l4csum_mseg, 1, 1, 1, 1, 14, \
503 NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F | \
504 MULT_F)
505
506 /* RX offload macros */
507 #define VLAN_FLTR_F OCCTX_RX_VLAN_FLTR_F
508 #define CSUM_F OCCTX_RX_OFFLOAD_CSUM_F
509 #define MULT_RX_F OCCTX_RX_MULTI_SEG_F
510
511 /* [VLAN_FLTR] [CSUM_F] [MULTI_SEG] */
512 #define OCCTX_RX_FASTPATH_MODES \
513 R(no_offload, 0, 0, 0, OCCTX_RX_OFFLOAD_NONE) \
514 R(mseg, 0, 0, 1, MULT_RX_F) \
515 R(csum, 0, 1, 0, CSUM_F) \
516 R(csum_mseg, 0, 1, 1, CSUM_F | MULT_RX_F) \
517 R(vlan, 1, 0, 0, VLAN_FLTR_F) \
518 R(vlan_mseg, 1, 0, 1, VLAN_FLTR_F | MULT_RX_F) \
519 R(vlan_csum, 1, 1, 0, VLAN_FLTR_F | CSUM_F) \
520 R(vlan_csum_mseg, 1, 1, 1, CSUM_F | VLAN_FLTR_F | \
521 MULT_RX_F)
522
523 #endif /* __OCTEONTX_RXTX_H__ */
524