xref: /dpdk/drivers/event/octeontx/ssovf_worker.h (revision d430b921a4d7c5449f6e99b5ba40671b27d87adb)
1aaf4363eSJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause
2aaf4363eSJerin Jacob  * Copyright(c) 2017 Cavium, Inc
332ff2639SJerin Jacob  */
432ff2639SJerin Jacob 
5bd063651SFerruh Yigit #include <arpa/inet.h>
6bd063651SFerruh Yigit 
744a2cebbSShijith Thotton #ifndef _SSOVF_WORKER_H_
844a2cebbSShijith Thotton #define _SSOVF_WORKER_H_
944a2cebbSShijith Thotton 
1032ff2639SJerin Jacob #include <rte_common.h>
119a8269d5SJerin Jacob #include <rte_branch_prediction.h>
1232ff2639SJerin Jacob 
1389ee1e94SSantosh Shukla #include <octeontx_mbox.h>
1432ff2639SJerin Jacob 
15d0d65498SPavan Nikhilesh #include "ssovf_evdev.h"
16d0d65498SPavan Nikhilesh #include "octeontx_rxtx.h"
1744a2cebbSShijith Thotton #include "otx_cryptodev_ops.h"
18d0d65498SPavan Nikhilesh 
19cf55f04aSHarman Kalra /* Alignment */
20cf55f04aSHarman Kalra #define OCCTX_ALIGN  128
21cf55f04aSHarman Kalra 
22cf55f04aSHarman Kalra /* Fastpath lookup */
23cf55f04aSHarman Kalra #define OCCTX_FASTPATH_LOOKUP_MEM	"octeontx_fastpath_lookup_mem"
24cf55f04aSHarman Kalra 
25cf55f04aSHarman Kalra /* WQE's ERRCODE + ERRLEV (11 bits) */
26cf55f04aSHarman Kalra #define ERRCODE_ERRLEN_WIDTH		11
27cf55f04aSHarman Kalra #define ERR_ARRAY_SZ			((BIT(ERRCODE_ERRLEN_WIDTH)) *\
28cf55f04aSHarman Kalra 					sizeof(uint32_t))
29cf55f04aSHarman Kalra 
30cf55f04aSHarman Kalra #define LOOKUP_ARRAY_SZ			(ERR_ARRAY_SZ)
31cf55f04aSHarman Kalra 
32cf55f04aSHarman Kalra #define OCCTX_EC_IP4_NOT		0x41
33cf55f04aSHarman Kalra #define OCCTX_EC_IP4_CSUM		0x42
34cf55f04aSHarman Kalra #define OCCTX_EC_L4_CSUM		0x62
35cf55f04aSHarman Kalra 
36cf55f04aSHarman Kalra enum OCCTX_ERRLEV_E {
37cf55f04aSHarman Kalra 	OCCTX_ERRLEV_RE = 0,
38cf55f04aSHarman Kalra 	OCCTX_ERRLEV_LA = 1,
39cf55f04aSHarman Kalra 	OCCTX_ERRLEV_LB = 2,
40cf55f04aSHarman Kalra 	OCCTX_ERRLEV_LC = 3,
41cf55f04aSHarman Kalra 	OCCTX_ERRLEV_LD = 4,
42cf55f04aSHarman Kalra 	OCCTX_ERRLEV_LE = 5,
43cf55f04aSHarman Kalra 	OCCTX_ERRLEV_LF = 6,
44cf55f04aSHarman Kalra 	OCCTX_ERRLEV_LG = 7,
45cf55f04aSHarman Kalra };
46cf55f04aSHarman Kalra 
4732ff2639SJerin Jacob enum {
4832ff2639SJerin Jacob 	SSO_SYNC_ORDERED,
4932ff2639SJerin Jacob 	SSO_SYNC_ATOMIC,
5032ff2639SJerin Jacob 	SSO_SYNC_UNTAGGED,
5132ff2639SJerin Jacob 	SSO_SYNC_EMPTY
5232ff2639SJerin Jacob };
5332ff2639SJerin Jacob 
5432ff2639SJerin Jacob /* SSO Operations */
5532ff2639SJerin Jacob 
56cf55f04aSHarman Kalra static __rte_always_inline uint32_t
ssovf_octeontx_rx_olflags_get(const void * const lookup_mem,const uint64_t in)57cf55f04aSHarman Kalra ssovf_octeontx_rx_olflags_get(const void * const lookup_mem, const uint64_t in)
58cf55f04aSHarman Kalra {
59cf55f04aSHarman Kalra 	const uint32_t * const ol_flags = (const uint32_t *)lookup_mem;
60cf55f04aSHarman Kalra 
61cf55f04aSHarman Kalra 	return ol_flags[(in & 0x7ff)];
62cf55f04aSHarman Kalra }
63cf55f04aSHarman Kalra 
64844d302dSHarman Kalra static __rte_always_inline void
ssovf_octeontx_wqe_xtract_mseg(octtx_wqe_t * wqe,struct rte_mbuf * mbuf)65844d302dSHarman Kalra ssovf_octeontx_wqe_xtract_mseg(octtx_wqe_t *wqe,
66844d302dSHarman Kalra 			       struct rte_mbuf *mbuf)
67844d302dSHarman Kalra {
68844d302dSHarman Kalra 	octtx_pki_buflink_t *buflink;
69844d302dSHarman Kalra 	rte_iova_t *iova_list;
70844d302dSHarman Kalra 	uint8_t nb_segs;
71844d302dSHarman Kalra 	uint64_t bytes_left = wqe->s.w1.len - wqe->s.w5.size;
72844d302dSHarman Kalra 
73844d302dSHarman Kalra 	nb_segs = wqe->s.w0.bufs;
74844d302dSHarman Kalra 
75844d302dSHarman Kalra 	buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
76844d302dSHarman Kalra 					  sizeof(octtx_pki_buflink_t));
77844d302dSHarman Kalra 
78844d302dSHarman Kalra 	while (--nb_segs) {
79844d302dSHarman Kalra 		iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
80844d302dSHarman Kalra 		mbuf->next = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
81844d302dSHarman Kalra 			      - (OCTTX_PACKET_LATER_SKIP / 128);
82844d302dSHarman Kalra 		mbuf = mbuf->next;
83844d302dSHarman Kalra 
84844d302dSHarman Kalra 		mbuf->data_off = sizeof(octtx_pki_buflink_t);
85844d302dSHarman Kalra 
86ad276d5cSAndrew Rybchenko 		RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
87844d302dSHarman Kalra 		if (nb_segs == 1)
88844d302dSHarman Kalra 			mbuf->data_len = bytes_left;
89844d302dSHarman Kalra 		else
90844d302dSHarman Kalra 			mbuf->data_len = buflink->w0.s.size;
91844d302dSHarman Kalra 
92844d302dSHarman Kalra 		bytes_left = bytes_left - buflink->w0.s.size;
93844d302dSHarman Kalra 		buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
94844d302dSHarman Kalra 
95844d302dSHarman Kalra 	}
96844d302dSHarman Kalra }
97844d302dSHarman Kalra 
98d0d65498SPavan Nikhilesh static __rte_always_inline struct rte_mbuf *
ssovf_octeontx_wqe_to_pkt(uint64_t work,uint16_t port_info,const uint16_t flag,const void * lookup_mem)99844d302dSHarman Kalra ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info,
100cf55f04aSHarman Kalra 			  const uint16_t flag, const void *lookup_mem)
101d0d65498SPavan Nikhilesh {
102d0d65498SPavan Nikhilesh 	struct rte_mbuf *mbuf;
103d0d65498SPavan Nikhilesh 	octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
104d0d65498SPavan Nikhilesh 
105d0d65498SPavan Nikhilesh 	/* Get mbuf from wqe */
1064a212166SPavan Nikhilesh 	mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
107ab0e481bSPavan Nikhilesh 	rte_prefetch_non_temporal(mbuf);
108d0d65498SPavan Nikhilesh 	mbuf->packet_type =
109d0d65498SPavan Nikhilesh 		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
110d0d65498SPavan Nikhilesh 	mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
111d0d65498SPavan Nikhilesh 	mbuf->ol_flags = 0;
112844d302dSHarman Kalra 	mbuf->pkt_len = wqe->s.w1.len;
113844d302dSHarman Kalra 
114cf55f04aSHarman Kalra 	if (!!(flag & OCCTX_RX_OFFLOAD_CSUM_F))
115cf55f04aSHarman Kalra 		mbuf->ol_flags = ssovf_octeontx_rx_olflags_get(lookup_mem,
116cf55f04aSHarman Kalra 							       wqe->w[2]);
117cf55f04aSHarman Kalra 
118844d302dSHarman Kalra 	if (!!(flag & OCCTX_RX_MULTI_SEG_F)) {
119844d302dSHarman Kalra 		mbuf->nb_segs = wqe->s.w0.bufs;
120844d302dSHarman Kalra 		mbuf->data_len = wqe->s.w5.size;
121844d302dSHarman Kalra 		ssovf_octeontx_wqe_xtract_mseg(wqe, mbuf);
122844d302dSHarman Kalra 	} else {
123844d302dSHarman Kalra 		mbuf->nb_segs = 1;
124844d302dSHarman Kalra 		mbuf->data_len = mbuf->pkt_len;
125844d302dSHarman Kalra 	}
126844d302dSHarman Kalra 
12745231cc6SVamsi Attunuru 	if (!!(flag & OCCTX_RX_VLAN_FLTR_F)) {
12845231cc6SVamsi Attunuru 		if (likely(wqe->s.w2.vv)) {
129daa02b5cSOlivier Matz 			mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
13045231cc6SVamsi Attunuru 			mbuf->vlan_tci =
13145231cc6SVamsi Attunuru 				ntohs(*((uint16_t *)((char *)mbuf->buf_addr +
13245231cc6SVamsi Attunuru 					mbuf->data_off + wqe->s.w4.vlptr + 2)));
13345231cc6SVamsi Attunuru 		}
13445231cc6SVamsi Attunuru 	}
13545231cc6SVamsi Attunuru 
1369b429833SPavan Nikhilesh 	mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
137d0d65498SPavan Nikhilesh 	rte_mbuf_refcnt_set(mbuf, 1);
1381dedffebSPavan Nikhilesh 
139d0d65498SPavan Nikhilesh 	return mbuf;
140d0d65498SPavan Nikhilesh }
141d0d65498SPavan Nikhilesh 
1424a212166SPavan Nikhilesh static __rte_always_inline void
ssovf_octeontx_wqe_free(uint64_t work)1434a212166SPavan Nikhilesh ssovf_octeontx_wqe_free(uint64_t work)
1444a212166SPavan Nikhilesh {
1454a212166SPavan Nikhilesh 	octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
146844d302dSHarman Kalra 	uint8_t nb_segs = wqe->s.w0.bufs;
147844d302dSHarman Kalra 	octtx_pki_buflink_t *buflink;
148844d302dSHarman Kalra 	struct rte_mbuf *mbuf, *head;
149844d302dSHarman Kalra 	rte_iova_t *iova_list;
1504a212166SPavan Nikhilesh 
1514a212166SPavan Nikhilesh 	mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
152844d302dSHarman Kalra 	buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
153844d302dSHarman Kalra 					  sizeof(octtx_pki_buflink_t));
154844d302dSHarman Kalra 	head = mbuf;
155844d302dSHarman Kalra 	while (--nb_segs) {
156844d302dSHarman Kalra 		iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
157844d302dSHarman Kalra 		mbuf = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
158844d302dSHarman Kalra 			- (OCTTX_PACKET_LATER_SKIP / 128);
159844d302dSHarman Kalra 
160844d302dSHarman Kalra 		mbuf->next = NULL;
1614a212166SPavan Nikhilesh 		rte_pktmbuf_free(mbuf);
162844d302dSHarman Kalra 		buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
163844d302dSHarman Kalra 	}
164844d302dSHarman Kalra 	rte_pktmbuf_free(head);
1654a212166SPavan Nikhilesh }
1664a212166SPavan Nikhilesh 
167c0583d98SJerin Jacob static __rte_always_inline uint16_t
ssows_get_work(struct ssows * ws,struct rte_event * ev,const uint16_t flag)168844d302dSHarman Kalra ssows_get_work(struct ssows *ws, struct rte_event *ev, const uint16_t flag)
16932ff2639SJerin Jacob {
17032ff2639SJerin Jacob 	uint64_t get_work0, get_work1;
17132ff2639SJerin Jacob 	uint64_t sched_type_queue;
17232ff2639SJerin Jacob 
17332ff2639SJerin Jacob 	ssovf_load_pair(get_work0, get_work1, ws->getwork);
17432ff2639SJerin Jacob 
17532ff2639SJerin Jacob 	sched_type_queue = (get_work0 >> 32) & 0xfff;
17632ff2639SJerin Jacob 	ws->cur_tt = sched_type_queue & 0x3;
17732ff2639SJerin Jacob 	ws->cur_grp = sched_type_queue >> 2;
17832ff2639SJerin Jacob 	sched_type_queue = sched_type_queue << 38;
17932ff2639SJerin Jacob 	ev->event = sched_type_queue | (get_work0 & 0xffffffff);
1804a212166SPavan Nikhilesh 
18144a2cebbSShijith Thotton 	if (get_work1) {
182*d430b921SHarman Kalra 		if (ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
183*d430b921SHarman Kalra 			uint16_t port = (ev->event >> 20) & 0x7F;
184*d430b921SHarman Kalra 
185*d430b921SHarman Kalra 			ev->sub_event_type = 0;
186*d430b921SHarman Kalra 			ev->mbuf = ssovf_octeontx_wqe_to_pkt(
187*d430b921SHarman Kalra 				get_work1, port, flag, ws->lookup_mem);
188*d430b921SHarman Kalra 		} else if (ev->event_type == RTE_EVENT_TYPE_CRYPTODEV) {
18944a2cebbSShijith Thotton 			get_work1 = otx_crypto_adapter_dequeue(get_work1);
19044a2cebbSShijith Thotton 			ev->u64 = get_work1;
191*d430b921SHarman Kalra 		} else {
192*d430b921SHarman Kalra 			if (unlikely((get_work0 & 0xFFFFFFFF) == 0xFFFFFFFF)) {
1934a212166SPavan Nikhilesh 				ssovf_octeontx_wqe_free(get_work1);
1944a212166SPavan Nikhilesh 				return 0;
195d0d65498SPavan Nikhilesh 			}
196*d430b921SHarman Kalra 			ev->u64 = get_work1;
197*d430b921SHarman Kalra 		}
198*d430b921SHarman Kalra 	}
199d0d65498SPavan Nikhilesh 
20032ff2639SJerin Jacob 	return !!get_work1;
20132ff2639SJerin Jacob }
20232ff2639SJerin Jacob 
203c0583d98SJerin Jacob static __rte_always_inline void
ssows_add_work(struct ssows * ws,const uint64_t event_ptr,const uint32_t tag,const uint8_t new_tt,const uint8_t grp)20432ff2639SJerin Jacob ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
20532ff2639SJerin Jacob 			const uint8_t new_tt, const uint8_t grp)
20632ff2639SJerin Jacob {
20732ff2639SJerin Jacob 	uint64_t add_work0;
20832ff2639SJerin Jacob 
20932ff2639SJerin Jacob 	add_work0 = tag | ((uint64_t)(new_tt) << 32);
21032ff2639SJerin Jacob 	ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
21132ff2639SJerin Jacob }
21232ff2639SJerin Jacob 
213c0583d98SJerin Jacob static __rte_always_inline void
ssows_swtag_full(struct ssows * ws,const uint64_t event_ptr,const uint32_t tag,const uint8_t new_tt,const uint8_t grp)21432ff2639SJerin Jacob ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
21532ff2639SJerin Jacob 			const uint8_t new_tt, const uint8_t grp)
21632ff2639SJerin Jacob {
21732ff2639SJerin Jacob 	uint64_t swtag_full0;
21832ff2639SJerin Jacob 
21932ff2639SJerin Jacob 	swtag_full0 = tag | ((uint64_t)(new_tt & 0x3) << 32) |
22032ff2639SJerin Jacob 				((uint64_t)grp << 34);
22132ff2639SJerin Jacob 	ssovf_store_pair(swtag_full0, event_ptr, (ws->base +
22232ff2639SJerin Jacob 				SSOW_VHWS_OP_SWTAG_FULL0));
22332ff2639SJerin Jacob }
22432ff2639SJerin Jacob 
225c0583d98SJerin Jacob static __rte_always_inline void
ssows_swtag_desched(struct ssows * ws,uint32_t tag,uint8_t new_tt,uint8_t grp)22632ff2639SJerin Jacob ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
22732ff2639SJerin Jacob {
22832ff2639SJerin Jacob 	uint64_t val;
22932ff2639SJerin Jacob 
23032ff2639SJerin Jacob 	val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
23132ff2639SJerin Jacob 	ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
23232ff2639SJerin Jacob }
23332ff2639SJerin Jacob 
234c0583d98SJerin Jacob static __rte_always_inline void
ssows_swtag_norm(struct ssows * ws,uint32_t tag,uint8_t new_tt)23532ff2639SJerin Jacob ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
23632ff2639SJerin Jacob {
23732ff2639SJerin Jacob 	uint64_t val;
23832ff2639SJerin Jacob 
23932ff2639SJerin Jacob 	val = tag | ((uint64_t)(new_tt & 0x3) << 32);
24032ff2639SJerin Jacob 	ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
24132ff2639SJerin Jacob }
24232ff2639SJerin Jacob 
243c0583d98SJerin Jacob static __rte_always_inline void
ssows_swtag_untag(struct ssows * ws)24432ff2639SJerin Jacob ssows_swtag_untag(struct ssows *ws)
24532ff2639SJerin Jacob {
24632ff2639SJerin Jacob 	ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
24732ff2639SJerin Jacob 	ws->cur_tt = SSO_SYNC_UNTAGGED;
24832ff2639SJerin Jacob }
24932ff2639SJerin Jacob 
250c0583d98SJerin Jacob static __rte_always_inline void
ssows_upd_wqp(struct ssows * ws,uint8_t grp,uint64_t event_ptr)25132ff2639SJerin Jacob ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
25232ff2639SJerin Jacob {
25332ff2639SJerin Jacob 	ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
25432ff2639SJerin Jacob 				SSOW_VHWS_OP_UPD_WQP_GRP0));
25532ff2639SJerin Jacob }
25632ff2639SJerin Jacob 
257c0583d98SJerin Jacob static __rte_always_inline void
ssows_desched(struct ssows * ws)25832ff2639SJerin Jacob ssows_desched(struct ssows *ws)
25932ff2639SJerin Jacob {
26032ff2639SJerin Jacob 	ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
26132ff2639SJerin Jacob }
26232ff2639SJerin Jacob 
263c0583d98SJerin Jacob static __rte_always_inline void
ssows_swtag_wait(struct ssows * ws)26432ff2639SJerin Jacob ssows_swtag_wait(struct ssows *ws)
26532ff2639SJerin Jacob {
26632ff2639SJerin Jacob 	/* Wait for the SWTAG/SWTAG_FULL operation */
26732ff2639SJerin Jacob 	while (ssovf_read64(ws->base + SSOW_VHWS_SWTP))
26832ff2639SJerin Jacob 	;
26932ff2639SJerin Jacob }
27044a2cebbSShijith Thotton 
27144a2cebbSShijith Thotton static __rte_always_inline void
ssows_head_wait(struct ssows * ws)27244a2cebbSShijith Thotton ssows_head_wait(struct ssows *ws)
27344a2cebbSShijith Thotton {
27444a2cebbSShijith Thotton 	while (!(ssovf_read64(ws->base + SSOW_VHWS_TAG) & (1ULL << 35)))
27544a2cebbSShijith Thotton 		;
27644a2cebbSShijith Thotton }
27744a2cebbSShijith Thotton #endif /* _SSOVF_WORKER_H_ */
278