xref: /dpdk/drivers/event/octeontx/ssovf_worker.c (revision 3cdcc0c17c6f62c4355b3adcc3191db8e7546d52)
1aaf4363eSJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause
2aaf4363eSJerin Jacob  * Copyright(c) 2017 Cavium, Inc
332ff2639SJerin Jacob  */
432ff2639SJerin Jacob 
532ff2639SJerin Jacob #include "ssovf_worker.h"
6f10d322eSJerin Jacob 
7c0583d98SJerin Jacob static __rte_always_inline void
8f10d322eSJerin Jacob ssows_new_event(struct ssows *ws, const struct rte_event *ev)
9f10d322eSJerin Jacob {
10f10d322eSJerin Jacob 	const uint64_t event_ptr = ev->u64;
11f10d322eSJerin Jacob 	const uint32_t tag = (uint32_t)ev->event;
12f10d322eSJerin Jacob 	const uint8_t new_tt = ev->sched_type;
13f10d322eSJerin Jacob 	const uint8_t grp = ev->queue_id;
14f10d322eSJerin Jacob 
15f10d322eSJerin Jacob 	ssows_add_work(ws, event_ptr, tag, new_tt, grp);
16f10d322eSJerin Jacob }
17f10d322eSJerin Jacob 
18c0583d98SJerin Jacob static __rte_always_inline void
19f10d322eSJerin Jacob ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
20f10d322eSJerin Jacob {
21f10d322eSJerin Jacob 	const uint8_t cur_tt = ws->cur_tt;
22f10d322eSJerin Jacob 	const uint8_t new_tt = ev->sched_type;
23f10d322eSJerin Jacob 	const uint32_t tag = (uint32_t)ev->event;
24f10d322eSJerin Jacob 	/*
25f10d322eSJerin Jacob 	 * cur_tt/new_tt     SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
26f10d322eSJerin Jacob 	 *
27f10d322eSJerin Jacob 	 * SSO_SYNC_ORDERED        norm           norm             untag
28f10d322eSJerin Jacob 	 * SSO_SYNC_ATOMIC         norm           norm		   untag
29f10d322eSJerin Jacob 	 * SSO_SYNC_UNTAGGED       full           full             NOOP
30f10d322eSJerin Jacob 	 */
31f10d322eSJerin Jacob 	if (unlikely(cur_tt == SSO_SYNC_UNTAGGED)) {
32f10d322eSJerin Jacob 		if (new_tt != SSO_SYNC_UNTAGGED) {
33f10d322eSJerin Jacob 			ssows_swtag_full(ws, ev->u64, tag,
34f10d322eSJerin Jacob 				new_tt, grp);
35f10d322eSJerin Jacob 		}
36f10d322eSJerin Jacob 	} else {
37f10d322eSJerin Jacob 		if (likely(new_tt != SSO_SYNC_UNTAGGED))
38f10d322eSJerin Jacob 			ssows_swtag_norm(ws, tag, new_tt);
39f10d322eSJerin Jacob 		else
40f10d322eSJerin Jacob 			ssows_swtag_untag(ws);
41f10d322eSJerin Jacob 	}
42f10d322eSJerin Jacob 	ws->swtag_req = 1;
43f10d322eSJerin Jacob }
44f10d322eSJerin Jacob 
45f10d322eSJerin Jacob #define OCT_EVENT_TYPE_GRP_FWD (RTE_EVENT_TYPE_MAX - 1)
46f10d322eSJerin Jacob 
47c0583d98SJerin Jacob static __rte_always_inline void
48f10d322eSJerin Jacob ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
49f10d322eSJerin Jacob {
50f10d322eSJerin Jacob 	const uint64_t event_ptr = ev->u64;
51f10d322eSJerin Jacob 	const uint32_t tag = (uint32_t)ev->event;
52f10d322eSJerin Jacob 	const uint8_t cur_tt = ws->cur_tt;
53f10d322eSJerin Jacob 	const uint8_t new_tt = ev->sched_type;
54f10d322eSJerin Jacob 
55f10d322eSJerin Jacob 	if (cur_tt == SSO_SYNC_ORDERED) {
56f10d322eSJerin Jacob 		/* Create unique tag based on custom event type and new grp */
57f10d322eSJerin Jacob 		uint32_t newtag = OCT_EVENT_TYPE_GRP_FWD << 28;
58f10d322eSJerin Jacob 
59f10d322eSJerin Jacob 		newtag |= grp << 20;
60f10d322eSJerin Jacob 		newtag |= tag;
61f10d322eSJerin Jacob 		ssows_swtag_norm(ws, newtag, SSO_SYNC_ATOMIC);
62f10d322eSJerin Jacob 		rte_smp_wmb();
63f10d322eSJerin Jacob 		ssows_swtag_wait(ws);
64f10d322eSJerin Jacob 	} else {
65f10d322eSJerin Jacob 		rte_smp_wmb();
66f10d322eSJerin Jacob 	}
67f10d322eSJerin Jacob 	ssows_add_work(ws, event_ptr, tag, new_tt, grp);
68f10d322eSJerin Jacob }
69f10d322eSJerin Jacob 
70c0583d98SJerin Jacob static __rte_always_inline void
71f10d322eSJerin Jacob ssows_forward_event(struct ssows *ws, const struct rte_event *ev)
72f10d322eSJerin Jacob {
73f10d322eSJerin Jacob 	const uint8_t grp = ev->queue_id;
74f10d322eSJerin Jacob 
75f10d322eSJerin Jacob 	/* Group hasn't changed, Use SWTAG to forward the event */
76f10d322eSJerin Jacob 	if (ws->cur_grp == grp)
77f10d322eSJerin Jacob 		ssows_fwd_swtag(ws, ev, grp);
78f10d322eSJerin Jacob 	else
79f10d322eSJerin Jacob 	/*
80f10d322eSJerin Jacob 	 * Group has been changed for group based work pipelining,
81f10d322eSJerin Jacob 	 * Use deschedule/add_work operation to transfer the event to
82f10d322eSJerin Jacob 	 * new group/core
83f10d322eSJerin Jacob 	 */
84f10d322eSJerin Jacob 		ssows_fwd_group(ws, ev, grp);
85f10d322eSJerin Jacob }
86f10d322eSJerin Jacob 
87c0583d98SJerin Jacob static __rte_always_inline void
88f10d322eSJerin Jacob ssows_release_event(struct ssows *ws)
89f10d322eSJerin Jacob {
90f10d322eSJerin Jacob 	if (likely(ws->cur_tt != SSO_SYNC_UNTAGGED))
91f10d322eSJerin Jacob 		ssows_swtag_untag(ws);
92f10d322eSJerin Jacob }
93f10d322eSJerin Jacob 
94cf55f04aSHarman Kalra #define R(name, f2, f1, f0, flags)					     \
9556a96aa4SHarman Kalra static uint16_t __rte_noinline	__rte_hot				     \
9656a96aa4SHarman Kalra ssows_deq_ ##name(void *port, struct rte_event *ev, uint64_t timeout_ticks)  \
9756a96aa4SHarman Kalra {									     \
9856a96aa4SHarman Kalra 	struct ssows *ws = port;					     \
9956a96aa4SHarman Kalra 									     \
10056a96aa4SHarman Kalra 	RTE_SET_USED(timeout_ticks);					     \
10156a96aa4SHarman Kalra 									     \
10256a96aa4SHarman Kalra 	if (ws->swtag_req) {						     \
10356a96aa4SHarman Kalra 		ws->swtag_req = 0;					     \
10456a96aa4SHarman Kalra 		ssows_swtag_wait(ws);					     \
10556a96aa4SHarman Kalra 		return 1;						     \
10656a96aa4SHarman Kalra 	} else {							     \
10756a96aa4SHarman Kalra 		return ssows_get_work(ws, ev, flags);		             \
10856a96aa4SHarman Kalra 	}								     \
10956a96aa4SHarman Kalra }									     \
11056a96aa4SHarman Kalra 									     \
11156a96aa4SHarman Kalra static uint16_t __rte_hot						     \
11256a96aa4SHarman Kalra ssows_deq_burst_ ##name(void *port, struct rte_event ev[],		     \
11356a96aa4SHarman Kalra 			 uint16_t nb_events, uint64_t timeout_ticks)	     \
11456a96aa4SHarman Kalra {									     \
11556a96aa4SHarman Kalra 	RTE_SET_USED(nb_events);					     \
11656a96aa4SHarman Kalra 									     \
11756a96aa4SHarman Kalra 	return ssows_deq_ ##name(port, ev, timeout_ticks);		     \
11856a96aa4SHarman Kalra }									     \
11956a96aa4SHarman Kalra 									     \
12056a96aa4SHarman Kalra static uint16_t __rte_hot						     \
12156a96aa4SHarman Kalra ssows_deq_timeout_ ##name(void *port, struct rte_event *ev,		     \
12256a96aa4SHarman Kalra 			  uint64_t timeout_ticks)			     \
12356a96aa4SHarman Kalra {									     \
12456a96aa4SHarman Kalra 	struct ssows *ws = port;					     \
12556a96aa4SHarman Kalra 	uint64_t iter;							     \
12656a96aa4SHarman Kalra 	uint16_t ret = 1;						     \
12756a96aa4SHarman Kalra 									     \
12856a96aa4SHarman Kalra 	if (ws->swtag_req) {						     \
12956a96aa4SHarman Kalra 		ws->swtag_req = 0;					     \
13056a96aa4SHarman Kalra 		ssows_swtag_wait(ws);					     \
13156a96aa4SHarman Kalra 	} else {							     \
13256a96aa4SHarman Kalra 		ret = ssows_get_work(ws, ev, flags);			     \
13356a96aa4SHarman Kalra 		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)   \
13456a96aa4SHarman Kalra 			ret = ssows_get_work(ws, ev, flags);		     \
13556a96aa4SHarman Kalra 	}								     \
13656a96aa4SHarman Kalra 	return ret;							     \
13756a96aa4SHarman Kalra }									     \
13856a96aa4SHarman Kalra 									     \
13956a96aa4SHarman Kalra static uint16_t __rte_hot						     \
14056a96aa4SHarman Kalra ssows_deq_timeout_burst_ ##name(void *port, struct rte_event ev[],	     \
14156a96aa4SHarman Kalra 				uint16_t nb_events, uint64_t timeout_ticks)  \
14256a96aa4SHarman Kalra {									     \
14356a96aa4SHarman Kalra 	RTE_SET_USED(nb_events);					     \
14456a96aa4SHarman Kalra 									     \
14556a96aa4SHarman Kalra 	return ssows_deq_timeout_ ##name(port, ev, timeout_ticks);	     \
146d80f50e8SJerin Jacob }
147d80f50e8SJerin Jacob 
14856a96aa4SHarman Kalra SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
14956a96aa4SHarman Kalra #undef R
150844d302dSHarman Kalra 
151*3cdcc0c1SMattias Rönnblom uint16_t __rte_hot
152*3cdcc0c1SMattias Rönnblom ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
153f10d322eSJerin Jacob {
154f10d322eSJerin Jacob 	struct ssows *ws = port;
155f10d322eSJerin Jacob 	uint16_t ret = 1;
156f10d322eSJerin Jacob 
157*3cdcc0c1SMattias Rönnblom 	RTE_SET_USED(nb_events);
158*3cdcc0c1SMattias Rönnblom 
159f10d322eSJerin Jacob 	switch (ev->op) {
160f10d322eSJerin Jacob 	case RTE_EVENT_OP_NEW:
1616ac67eb3SJerin Jacob 		rte_smp_wmb();
162f10d322eSJerin Jacob 		ssows_new_event(ws, ev);
163f10d322eSJerin Jacob 		break;
164f10d322eSJerin Jacob 	case RTE_EVENT_OP_FORWARD:
165f10d322eSJerin Jacob 		ssows_forward_event(ws, ev);
166f10d322eSJerin Jacob 		break;
167f10d322eSJerin Jacob 	case RTE_EVENT_OP_RELEASE:
168f10d322eSJerin Jacob 		ssows_release_event(ws);
169f10d322eSJerin Jacob 		break;
170f10d322eSJerin Jacob 	default:
171f10d322eSJerin Jacob 		ret = 0;
172f10d322eSJerin Jacob 	}
173f10d322eSJerin Jacob 	return ret;
174f10d322eSJerin Jacob }
175f10d322eSJerin Jacob 
176e3866e73SThomas Monjalon uint16_t __rte_hot
177ddb3175fSJerin Jacob ssows_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
178ddb3175fSJerin Jacob {
179ddb3175fSJerin Jacob 	uint16_t i;
180ddb3175fSJerin Jacob 	struct ssows *ws = port;
181ddb3175fSJerin Jacob 
182ddb3175fSJerin Jacob 	rte_smp_wmb();
183ddb3175fSJerin Jacob 	for (i = 0; i < nb_events; i++)
184ddb3175fSJerin Jacob 		ssows_new_event(ws,  &ev[i]);
185ddb3175fSJerin Jacob 
186ddb3175fSJerin Jacob 	return nb_events;
187ddb3175fSJerin Jacob }
188e88d2befSJerin Jacob 
189e3866e73SThomas Monjalon uint16_t __rte_hot
190e88d2befSJerin Jacob ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
191e88d2befSJerin Jacob {
192e88d2befSJerin Jacob 	struct ssows *ws = port;
193e88d2befSJerin Jacob 	RTE_SET_USED(nb_events);
194e88d2befSJerin Jacob 
195e88d2befSJerin Jacob 	ssows_forward_event(ws,  ev);
196e88d2befSJerin Jacob 
197e88d2befSJerin Jacob 	return 1;
198e88d2befSJerin Jacob }
199e88d2befSJerin Jacob 
200f61808eaSJerin Jacob void
2018384f0e0SJerin Jacob ssows_flush_events(struct ssows *ws, uint8_t queue_id,
2028384f0e0SJerin Jacob 				ssows_handle_event_t fn, void *arg)
203f61808eaSJerin Jacob {
204f61808eaSJerin Jacob 	uint32_t reg_off;
2058384f0e0SJerin Jacob 	struct rte_event ev;
2068384f0e0SJerin Jacob 	uint64_t enable, aq_cnt = 1, cq_ds_cnt = 1;
2076461abf7SPavan Nikhilesh 	uint64_t get_work0, get_work1;
2086461abf7SPavan Nikhilesh 	uint64_t sched_type_queue;
209d8dd3165SPavan Nikhilesh 	uint8_t *base = ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
210f61808eaSJerin Jacob 
211f61808eaSJerin Jacob 	enable = ssovf_read64(base + SSO_VHGRP_QCTL);
212f61808eaSJerin Jacob 	if (!enable)
213f61808eaSJerin Jacob 		return;
214f61808eaSJerin Jacob 
215f61808eaSJerin Jacob 	reg_off = SSOW_VHWS_OP_GET_WORK0;
216f61808eaSJerin Jacob 	reg_off |= 1 << 17; /* Grouped */
217f61808eaSJerin Jacob 	reg_off |= 1 << 16; /* WAIT */
218f61808eaSJerin Jacob 	reg_off |= queue_id << 4; /* INDEX_GGRP_MASK(group number) */
219f61808eaSJerin Jacob 	while (aq_cnt || cq_ds_cnt) {
220f61808eaSJerin Jacob 		aq_cnt = ssovf_read64(base + SSO_VHGRP_AQ_CNT);
221f61808eaSJerin Jacob 		cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT);
222f61808eaSJerin Jacob 		/* Extract cq and ds count */
223f61808eaSJerin Jacob 		cq_ds_cnt &= 0x1FFF1FFF0000;
2246461abf7SPavan Nikhilesh 
2256461abf7SPavan Nikhilesh 		ssovf_load_pair(get_work0, get_work1, ws->base + reg_off);
2266461abf7SPavan Nikhilesh 
2276461abf7SPavan Nikhilesh 		sched_type_queue = (get_work0 >> 32) & 0xfff;
2286461abf7SPavan Nikhilesh 		ws->cur_tt = sched_type_queue & 0x3;
2296461abf7SPavan Nikhilesh 		ws->cur_grp = sched_type_queue >> 2;
2306461abf7SPavan Nikhilesh 		sched_type_queue = sched_type_queue << 38;
2316461abf7SPavan Nikhilesh 		ev.event = sched_type_queue | (get_work0 & 0xffffffff);
2326461abf7SPavan Nikhilesh 		if (get_work1 && ev.event_type == RTE_EVENT_TYPE_ETHDEV)
2336461abf7SPavan Nikhilesh 			ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
234844d302dSHarman Kalra 					(ev.event >> 20) & 0x7F,
235844d302dSHarman Kalra 					OCCTX_RX_OFFLOAD_NONE |
236cf55f04aSHarman Kalra 					OCCTX_RX_MULTI_SEG_F,
237cf55f04aSHarman Kalra 					ws->lookup_mem);
2386461abf7SPavan Nikhilesh 		else
2396461abf7SPavan Nikhilesh 			ev.u64 = get_work1;
2406461abf7SPavan Nikhilesh 
2418384f0e0SJerin Jacob 		if (fn != NULL && ev.u64 != 0)
2428384f0e0SJerin Jacob 			fn(arg, ev);
243f61808eaSJerin Jacob 	}
244f61808eaSJerin Jacob }
245f61808eaSJerin Jacob 
246f61808eaSJerin Jacob void
247f61808eaSJerin Jacob ssows_reset(struct ssows *ws)
248f61808eaSJerin Jacob {
249f61808eaSJerin Jacob 	uint64_t tag;
250f61808eaSJerin Jacob 	uint64_t pend_tag;
251f61808eaSJerin Jacob 	uint8_t pend_tt;
252f61808eaSJerin Jacob 	uint8_t tt;
253f61808eaSJerin Jacob 
254f61808eaSJerin Jacob 	tag = ssovf_read64(ws->base + SSOW_VHWS_TAG);
255f61808eaSJerin Jacob 	pend_tag = ssovf_read64(ws->base + SSOW_VHWS_PENDTAG);
256f61808eaSJerin Jacob 
257f61808eaSJerin Jacob 	if (pend_tag & (1ULL << 63)) { /* Tagswitch pending */
258f61808eaSJerin Jacob 		pend_tt = (pend_tag >> 32) & 0x3;
259f61808eaSJerin Jacob 		if (pend_tt == SSO_SYNC_ORDERED || pend_tt == SSO_SYNC_ATOMIC)
260f61808eaSJerin Jacob 			ssows_desched(ws);
261f61808eaSJerin Jacob 	} else {
262f61808eaSJerin Jacob 		tt = (tag >> 32) & 0x3;
263f61808eaSJerin Jacob 		if (tt == SSO_SYNC_ORDERED || tt == SSO_SYNC_ATOMIC)
264f61808eaSJerin Jacob 			ssows_swtag_untag(ws);
265f61808eaSJerin Jacob 	}
266f61808eaSJerin Jacob }
2671dedffebSPavan Nikhilesh 
268844d302dSHarman Kalra static __rte_always_inline uint16_t
269844d302dSHarman Kalra __sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
27056a96aa4SHarman Kalra 			       uint16_t nb_events, uint64_t *cmd,
27156a96aa4SHarman Kalra 			       const uint16_t flag)
2721dedffebSPavan Nikhilesh {
2731dedffebSPavan Nikhilesh 	uint16_t port_id;
2741dedffebSPavan Nikhilesh 	uint16_t queue_id;
2751dedffebSPavan Nikhilesh 	struct rte_mbuf *m;
2761dedffebSPavan Nikhilesh 	struct rte_eth_dev *ethdev;
2771dedffebSPavan Nikhilesh 	struct ssows *ws = port;
2781dedffebSPavan Nikhilesh 	struct octeontx_txq *txq;
2791dedffebSPavan Nikhilesh 
28050ea81d2SHarman Kalra 	RTE_SET_USED(nb_events);
2811dedffebSPavan Nikhilesh 	switch (ev->sched_type) {
2821dedffebSPavan Nikhilesh 	case SSO_SYNC_ORDERED:
2831dedffebSPavan Nikhilesh 		ssows_swtag_norm(ws, ev->event, SSO_SYNC_ATOMIC);
284f0f5d844SPhil Yang 		rte_io_wmb();
2851dedffebSPavan Nikhilesh 		ssows_swtag_wait(ws);
2861dedffebSPavan Nikhilesh 		break;
2871dedffebSPavan Nikhilesh 	case SSO_SYNC_UNTAGGED:
2881dedffebSPavan Nikhilesh 		ssows_swtag_full(ws, ev->u64, ev->event, SSO_SYNC_ATOMIC,
2891dedffebSPavan Nikhilesh 				ev->queue_id);
290f0f5d844SPhil Yang 		rte_io_wmb();
2911dedffebSPavan Nikhilesh 		ssows_swtag_wait(ws);
2921dedffebSPavan Nikhilesh 		break;
2931dedffebSPavan Nikhilesh 	case SSO_SYNC_ATOMIC:
294f0f5d844SPhil Yang 		rte_io_wmb();
2951dedffebSPavan Nikhilesh 		break;
2961dedffebSPavan Nikhilesh 	}
2971dedffebSPavan Nikhilesh 
2981dedffebSPavan Nikhilesh 	m = ev[0].mbuf;
2991dedffebSPavan Nikhilesh 	port_id = m->port;
3001dedffebSPavan Nikhilesh 	queue_id = rte_event_eth_tx_adapter_txq_get(m);
3011dedffebSPavan Nikhilesh 	ethdev = &rte_eth_devices[port_id];
3021dedffebSPavan Nikhilesh 	txq = ethdev->data->tx_queues[queue_id];
3031dedffebSPavan Nikhilesh 
30450ea81d2SHarman Kalra 	return __octeontx_xmit_pkts(txq, &m, 1, cmd, flag);
305844d302dSHarman Kalra }
306844d302dSHarman Kalra 
30756a96aa4SHarman Kalra #define T(name, f3, f2, f1, f0, sz, flags)				     \
30856a96aa4SHarman Kalra static uint16_t __rte_noinline	__rte_hot				     \
30956a96aa4SHarman Kalra sso_event_tx_adapter_enqueue_ ## name(void *port, struct rte_event ev[],     \
31056a96aa4SHarman Kalra 				  uint16_t nb_events)			     \
31156a96aa4SHarman Kalra {									     \
31256a96aa4SHarman Kalra 	uint64_t cmd[sz];						     \
31356a96aa4SHarman Kalra 	return __sso_event_tx_adapter_enqueue(port, ev, nb_events, cmd,	     \
31456a96aa4SHarman Kalra 					      flags);			     \
315844d302dSHarman Kalra }
316844d302dSHarman Kalra 
31756a96aa4SHarman Kalra SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
31856a96aa4SHarman Kalra #undef T
31956a96aa4SHarman Kalra 
32044a2cebbSShijith Thotton static uint16_t __rte_hot
32144a2cebbSShijith Thotton ssow_crypto_adapter_enqueue(void *port, struct rte_event ev[],
32244a2cebbSShijith Thotton 			    uint16_t nb_events)
32344a2cebbSShijith Thotton {
32444a2cebbSShijith Thotton 	RTE_SET_USED(nb_events);
32544a2cebbSShijith Thotton 
32644a2cebbSShijith Thotton 	return otx_crypto_adapter_enqueue(port, ev->event_ptr);
32744a2cebbSShijith Thotton }
32844a2cebbSShijith Thotton 
32956a96aa4SHarman Kalra void
33056a96aa4SHarman Kalra ssovf_fastpath_fns_set(struct rte_eventdev *dev)
331844d302dSHarman Kalra {
33256a96aa4SHarman Kalra 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
33356a96aa4SHarman Kalra 
33456a96aa4SHarman Kalra 	dev->enqueue_burst = ssows_enq_burst;
33556a96aa4SHarman Kalra 	dev->enqueue_new_burst = ssows_enq_new_burst;
33656a96aa4SHarman Kalra 	dev->enqueue_forward_burst = ssows_enq_fwd_burst;
33756a96aa4SHarman Kalra 
33844a2cebbSShijith Thotton 	dev->ca_enqueue = ssow_crypto_adapter_enqueue;
33944a2cebbSShijith Thotton 
34023d06e37SPavan Nikhilesh 	const event_tx_adapter_enqueue_t ssow_txa_enqueue[2][2][2][2] = {
34156a96aa4SHarman Kalra #define T(name, f3, f2, f1, f0, sz, flags)				\
34256a96aa4SHarman Kalra 	[f3][f2][f1][f0] =  sso_event_tx_adapter_enqueue_ ##name,
34356a96aa4SHarman Kalra 
34456a96aa4SHarman Kalra 		SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
34556a96aa4SHarman Kalra #undef T
34656a96aa4SHarman Kalra 	};
34756a96aa4SHarman Kalra 
34856a96aa4SHarman Kalra 	dev->txa_enqueue = ssow_txa_enqueue
34956a96aa4SHarman Kalra 		[!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)]
350cf55f04aSHarman Kalra 		[!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
351cf55f04aSHarman Kalra 		[!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)]
35256a96aa4SHarman Kalra 		[!!(edev->tx_offload_flags & OCCTX_TX_MULTI_SEG_F)];
35356a96aa4SHarman Kalra 
35456a96aa4SHarman Kalra 	dev->txa_enqueue_same_dest = dev->txa_enqueue;
35556a96aa4SHarman Kalra 
35656a96aa4SHarman Kalra 	/* Assigning dequeue func pointers */
357cf55f04aSHarman Kalra 	const event_dequeue_burst_t ssow_deq_burst[2][2][2] = {
358cf55f04aSHarman Kalra #define R(name, f2, f1, f0, flags)					\
359cf55f04aSHarman Kalra 	[f2][f1][f0] =  ssows_deq_burst_ ##name,
36056a96aa4SHarman Kalra 
36156a96aa4SHarman Kalra SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
36256a96aa4SHarman Kalra #undef R
36356a96aa4SHarman Kalra 	};
36456a96aa4SHarman Kalra 
36556a96aa4SHarman Kalra 	dev->dequeue_burst = ssow_deq_burst
36645231cc6SVamsi Attunuru 		[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
367cf55f04aSHarman Kalra 		[!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
36856a96aa4SHarman Kalra 		[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
36956a96aa4SHarman Kalra 
37056a96aa4SHarman Kalra 	if (edev->is_timeout_deq) {
371cf55f04aSHarman Kalra 	const event_dequeue_burst_t ssow_deq_timeout_burst[2][2][2] = {
372cf55f04aSHarman Kalra #define R(name, f2, f1, f0, flags)					\
373cf55f04aSHarman Kalra 	[f2][f1][f0] =  ssows_deq_timeout_burst_ ##name,
37456a96aa4SHarman Kalra 
37556a96aa4SHarman Kalra SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
37656a96aa4SHarman Kalra #undef R
37756a96aa4SHarman Kalra 		};
37856a96aa4SHarman Kalra 
37956a96aa4SHarman Kalra 	dev->dequeue_burst = ssow_deq_timeout_burst
38045231cc6SVamsi Attunuru 		[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
381cf55f04aSHarman Kalra 		[!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
38256a96aa4SHarman Kalra 		[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
38356a96aa4SHarman Kalra 	}
3841dedffebSPavan Nikhilesh }
385cf55f04aSHarman Kalra 
386cf55f04aSHarman Kalra static void
387cf55f04aSHarman Kalra octeontx_create_rx_ol_flags_array(void *mem)
388cf55f04aSHarman Kalra {
389cf55f04aSHarman Kalra 	uint16_t idx, errcode, errlev;
390cf55f04aSHarman Kalra 	uint32_t val, *ol_flags;
391cf55f04aSHarman Kalra 
392cf55f04aSHarman Kalra 	/* Skip ptype array memory */
393cf55f04aSHarman Kalra 	ol_flags = (uint32_t *)mem;
394cf55f04aSHarman Kalra 
395cf55f04aSHarman Kalra 	for (idx = 0; idx < BIT(ERRCODE_ERRLEN_WIDTH); idx++) {
396cf55f04aSHarman Kalra 		errcode = idx & 0xff;
397cf55f04aSHarman Kalra 		errlev = (idx & 0x700) >> 8;
398cf55f04aSHarman Kalra 
399daa02b5cSOlivier Matz 		val = RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
400daa02b5cSOlivier Matz 		val |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
401daa02b5cSOlivier Matz 		val |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN;
402cf55f04aSHarman Kalra 
403cf55f04aSHarman Kalra 		switch (errlev) {
404cf55f04aSHarman Kalra 		case OCCTX_ERRLEV_RE:
405cf55f04aSHarman Kalra 			if (errcode) {
406daa02b5cSOlivier Matz 				val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
407daa02b5cSOlivier Matz 				val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
408cf55f04aSHarman Kalra 			} else {
409daa02b5cSOlivier Matz 				val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
410daa02b5cSOlivier Matz 				val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
411cf55f04aSHarman Kalra 			}
412cf55f04aSHarman Kalra 			break;
413cf55f04aSHarman Kalra 		case OCCTX_ERRLEV_LC:
414cf55f04aSHarman Kalra 			if (errcode == OCCTX_EC_IP4_CSUM) {
415daa02b5cSOlivier Matz 				val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
416daa02b5cSOlivier Matz 				val |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
417cf55f04aSHarman Kalra 			} else {
418daa02b5cSOlivier Matz 				val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
419cf55f04aSHarman Kalra 			}
420cf55f04aSHarman Kalra 			break;
421cf55f04aSHarman Kalra 		case OCCTX_ERRLEV_LD:
422cf55f04aSHarman Kalra 			/* Check if parsed packet is neither IPv4 or IPV6 */
423cf55f04aSHarman Kalra 			if (errcode == OCCTX_EC_IP4_NOT)
424cf55f04aSHarman Kalra 				break;
425daa02b5cSOlivier Matz 			val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
426cf55f04aSHarman Kalra 			if (errcode == OCCTX_EC_L4_CSUM)
427daa02b5cSOlivier Matz 				val |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
428cf55f04aSHarman Kalra 			else
429daa02b5cSOlivier Matz 				val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
430cf55f04aSHarman Kalra 			break;
431cf55f04aSHarman Kalra 		case OCCTX_ERRLEV_LE:
432cf55f04aSHarman Kalra 			if (errcode == OCCTX_EC_IP4_CSUM)
433daa02b5cSOlivier Matz 				val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
434cf55f04aSHarman Kalra 			else
435daa02b5cSOlivier Matz 				val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
436cf55f04aSHarman Kalra 			break;
437cf55f04aSHarman Kalra 		case OCCTX_ERRLEV_LF:
438cf55f04aSHarman Kalra 			/* Check if parsed packet is neither IPv4 or IPV6 */
439cf55f04aSHarman Kalra 			if (errcode == OCCTX_EC_IP4_NOT)
440cf55f04aSHarman Kalra 				break;
441daa02b5cSOlivier Matz 			val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
442cf55f04aSHarman Kalra 			if (errcode == OCCTX_EC_L4_CSUM)
443daa02b5cSOlivier Matz 				val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
444cf55f04aSHarman Kalra 			else
445daa02b5cSOlivier Matz 				val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
446cf55f04aSHarman Kalra 			break;
447cf55f04aSHarman Kalra 		}
448cf55f04aSHarman Kalra 
449cf55f04aSHarman Kalra 		ol_flags[idx] = val;
450cf55f04aSHarman Kalra 	}
451cf55f04aSHarman Kalra }
452cf55f04aSHarman Kalra 
453cf55f04aSHarman Kalra void *
454cf55f04aSHarman Kalra octeontx_fastpath_lookup_mem_get(void)
455cf55f04aSHarman Kalra {
456cf55f04aSHarman Kalra 	const char name[] = OCCTX_FASTPATH_LOOKUP_MEM;
457cf55f04aSHarman Kalra 	const struct rte_memzone *mz;
458cf55f04aSHarman Kalra 	void *mem;
459cf55f04aSHarman Kalra 
460cf55f04aSHarman Kalra 	mz = rte_memzone_lookup(name);
461cf55f04aSHarman Kalra 	if (mz != NULL)
462cf55f04aSHarman Kalra 		return mz->addr;
463cf55f04aSHarman Kalra 
464cf55f04aSHarman Kalra 	/* Request for the first time */
465cf55f04aSHarman Kalra 	mz = rte_memzone_reserve_aligned(name, LOOKUP_ARRAY_SZ,
466cf55f04aSHarman Kalra 					 SOCKET_ID_ANY, 0, OCCTX_ALIGN);
467cf55f04aSHarman Kalra 	if (mz != NULL) {
468cf55f04aSHarman Kalra 		mem = mz->addr;
469cf55f04aSHarman Kalra 		/* Form the rx ol_flags based on errcode */
470cf55f04aSHarman Kalra 		octeontx_create_rx_ol_flags_array(mem);
471cf55f04aSHarman Kalra 		return mem;
472cf55f04aSHarman Kalra 	}
473cf55f04aSHarman Kalra 	return NULL;
474cf55f04aSHarman Kalra }
475