xref: /dpdk/drivers/event/octeontx/ssovf_worker.h (revision d430b921a4d7c5449f6e99b5ba40671b27d87adb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <arpa/inet.h>
6 
7 #ifndef _SSOVF_WORKER_H_
8 #define _SSOVF_WORKER_H_
9 
10 #include <rte_common.h>
11 #include <rte_branch_prediction.h>
12 
13 #include <octeontx_mbox.h>
14 
15 #include "ssovf_evdev.h"
16 #include "octeontx_rxtx.h"
17 #include "otx_cryptodev_ops.h"
18 
19 /* Alignment */
20 #define OCCTX_ALIGN  128
21 
22 /* Fastpath lookup */
23 #define OCCTX_FASTPATH_LOOKUP_MEM	"octeontx_fastpath_lookup_mem"
24 
25 /* WQE's ERRCODE + ERRLEV (11 bits) */
26 #define ERRCODE_ERRLEN_WIDTH		11
27 #define ERR_ARRAY_SZ			((BIT(ERRCODE_ERRLEN_WIDTH)) *\
28 					sizeof(uint32_t))
29 
30 #define LOOKUP_ARRAY_SZ			(ERR_ARRAY_SZ)
31 
32 #define OCCTX_EC_IP4_NOT		0x41
33 #define OCCTX_EC_IP4_CSUM		0x42
34 #define OCCTX_EC_L4_CSUM		0x62
35 
36 enum OCCTX_ERRLEV_E {
37 	OCCTX_ERRLEV_RE = 0,
38 	OCCTX_ERRLEV_LA = 1,
39 	OCCTX_ERRLEV_LB = 2,
40 	OCCTX_ERRLEV_LC = 3,
41 	OCCTX_ERRLEV_LD = 4,
42 	OCCTX_ERRLEV_LE = 5,
43 	OCCTX_ERRLEV_LF = 6,
44 	OCCTX_ERRLEV_LG = 7,
45 };
46 
47 enum {
48 	SSO_SYNC_ORDERED,
49 	SSO_SYNC_ATOMIC,
50 	SSO_SYNC_UNTAGGED,
51 	SSO_SYNC_EMPTY
52 };
53 
54 /* SSO Operations */
55 
56 static __rte_always_inline uint32_t
ssovf_octeontx_rx_olflags_get(const void * const lookup_mem,const uint64_t in)57 ssovf_octeontx_rx_olflags_get(const void * const lookup_mem, const uint64_t in)
58 {
59 	const uint32_t * const ol_flags = (const uint32_t *)lookup_mem;
60 
61 	return ol_flags[(in & 0x7ff)];
62 }
63 
64 static __rte_always_inline void
ssovf_octeontx_wqe_xtract_mseg(octtx_wqe_t * wqe,struct rte_mbuf * mbuf)65 ssovf_octeontx_wqe_xtract_mseg(octtx_wqe_t *wqe,
66 			       struct rte_mbuf *mbuf)
67 {
68 	octtx_pki_buflink_t *buflink;
69 	rte_iova_t *iova_list;
70 	uint8_t nb_segs;
71 	uint64_t bytes_left = wqe->s.w1.len - wqe->s.w5.size;
72 
73 	nb_segs = wqe->s.w0.bufs;
74 
75 	buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
76 					  sizeof(octtx_pki_buflink_t));
77 
78 	while (--nb_segs) {
79 		iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
80 		mbuf->next = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
81 			      - (OCTTX_PACKET_LATER_SKIP / 128);
82 		mbuf = mbuf->next;
83 
84 		mbuf->data_off = sizeof(octtx_pki_buflink_t);
85 
86 		RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
87 		if (nb_segs == 1)
88 			mbuf->data_len = bytes_left;
89 		else
90 			mbuf->data_len = buflink->w0.s.size;
91 
92 		bytes_left = bytes_left - buflink->w0.s.size;
93 		buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
94 
95 	}
96 }
97 
98 static __rte_always_inline struct rte_mbuf *
ssovf_octeontx_wqe_to_pkt(uint64_t work,uint16_t port_info,const uint16_t flag,const void * lookup_mem)99 ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info,
100 			  const uint16_t flag, const void *lookup_mem)
101 {
102 	struct rte_mbuf *mbuf;
103 	octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
104 
105 	/* Get mbuf from wqe */
106 	mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
107 	rte_prefetch_non_temporal(mbuf);
108 	mbuf->packet_type =
109 		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
110 	mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
111 	mbuf->ol_flags = 0;
112 	mbuf->pkt_len = wqe->s.w1.len;
113 
114 	if (!!(flag & OCCTX_RX_OFFLOAD_CSUM_F))
115 		mbuf->ol_flags = ssovf_octeontx_rx_olflags_get(lookup_mem,
116 							       wqe->w[2]);
117 
118 	if (!!(flag & OCCTX_RX_MULTI_SEG_F)) {
119 		mbuf->nb_segs = wqe->s.w0.bufs;
120 		mbuf->data_len = wqe->s.w5.size;
121 		ssovf_octeontx_wqe_xtract_mseg(wqe, mbuf);
122 	} else {
123 		mbuf->nb_segs = 1;
124 		mbuf->data_len = mbuf->pkt_len;
125 	}
126 
127 	if (!!(flag & OCCTX_RX_VLAN_FLTR_F)) {
128 		if (likely(wqe->s.w2.vv)) {
129 			mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
130 			mbuf->vlan_tci =
131 				ntohs(*((uint16_t *)((char *)mbuf->buf_addr +
132 					mbuf->data_off + wqe->s.w4.vlptr + 2)));
133 		}
134 	}
135 
136 	mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
137 	rte_mbuf_refcnt_set(mbuf, 1);
138 
139 	return mbuf;
140 }
141 
142 static __rte_always_inline void
ssovf_octeontx_wqe_free(uint64_t work)143 ssovf_octeontx_wqe_free(uint64_t work)
144 {
145 	octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
146 	uint8_t nb_segs = wqe->s.w0.bufs;
147 	octtx_pki_buflink_t *buflink;
148 	struct rte_mbuf *mbuf, *head;
149 	rte_iova_t *iova_list;
150 
151 	mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
152 	buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
153 					  sizeof(octtx_pki_buflink_t));
154 	head = mbuf;
155 	while (--nb_segs) {
156 		iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
157 		mbuf = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
158 			- (OCTTX_PACKET_LATER_SKIP / 128);
159 
160 		mbuf->next = NULL;
161 		rte_pktmbuf_free(mbuf);
162 		buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
163 	}
164 	rte_pktmbuf_free(head);
165 }
166 
167 static __rte_always_inline uint16_t
ssows_get_work(struct ssows * ws,struct rte_event * ev,const uint16_t flag)168 ssows_get_work(struct ssows *ws, struct rte_event *ev, const uint16_t flag)
169 {
170 	uint64_t get_work0, get_work1;
171 	uint64_t sched_type_queue;
172 
173 	ssovf_load_pair(get_work0, get_work1, ws->getwork);
174 
175 	sched_type_queue = (get_work0 >> 32) & 0xfff;
176 	ws->cur_tt = sched_type_queue & 0x3;
177 	ws->cur_grp = sched_type_queue >> 2;
178 	sched_type_queue = sched_type_queue << 38;
179 	ev->event = sched_type_queue | (get_work0 & 0xffffffff);
180 
181 	if (get_work1) {
182 		if (ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
183 			uint16_t port = (ev->event >> 20) & 0x7F;
184 
185 			ev->sub_event_type = 0;
186 			ev->mbuf = ssovf_octeontx_wqe_to_pkt(
187 				get_work1, port, flag, ws->lookup_mem);
188 		} else if (ev->event_type == RTE_EVENT_TYPE_CRYPTODEV) {
189 			get_work1 = otx_crypto_adapter_dequeue(get_work1);
190 			ev->u64 = get_work1;
191 		} else {
192 			if (unlikely((get_work0 & 0xFFFFFFFF) == 0xFFFFFFFF)) {
193 				ssovf_octeontx_wqe_free(get_work1);
194 				return 0;
195 			}
196 			ev->u64 = get_work1;
197 		}
198 	}
199 
200 	return !!get_work1;
201 }
202 
203 static __rte_always_inline void
ssows_add_work(struct ssows * ws,const uint64_t event_ptr,const uint32_t tag,const uint8_t new_tt,const uint8_t grp)204 ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
205 			const uint8_t new_tt, const uint8_t grp)
206 {
207 	uint64_t add_work0;
208 
209 	add_work0 = tag | ((uint64_t)(new_tt) << 32);
210 	ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
211 }
212 
213 static __rte_always_inline void
ssows_swtag_full(struct ssows * ws,const uint64_t event_ptr,const uint32_t tag,const uint8_t new_tt,const uint8_t grp)214 ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
215 			const uint8_t new_tt, const uint8_t grp)
216 {
217 	uint64_t swtag_full0;
218 
219 	swtag_full0 = tag | ((uint64_t)(new_tt & 0x3) << 32) |
220 				((uint64_t)grp << 34);
221 	ssovf_store_pair(swtag_full0, event_ptr, (ws->base +
222 				SSOW_VHWS_OP_SWTAG_FULL0));
223 }
224 
225 static __rte_always_inline void
ssows_swtag_desched(struct ssows * ws,uint32_t tag,uint8_t new_tt,uint8_t grp)226 ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
227 {
228 	uint64_t val;
229 
230 	val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
231 	ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
232 }
233 
234 static __rte_always_inline void
ssows_swtag_norm(struct ssows * ws,uint32_t tag,uint8_t new_tt)235 ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
236 {
237 	uint64_t val;
238 
239 	val = tag | ((uint64_t)(new_tt & 0x3) << 32);
240 	ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
241 }
242 
243 static __rte_always_inline void
ssows_swtag_untag(struct ssows * ws)244 ssows_swtag_untag(struct ssows *ws)
245 {
246 	ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
247 	ws->cur_tt = SSO_SYNC_UNTAGGED;
248 }
249 
250 static __rte_always_inline void
ssows_upd_wqp(struct ssows * ws,uint8_t grp,uint64_t event_ptr)251 ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
252 {
253 	ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
254 				SSOW_VHWS_OP_UPD_WQP_GRP0));
255 }
256 
257 static __rte_always_inline void
ssows_desched(struct ssows * ws)258 ssows_desched(struct ssows *ws)
259 {
260 	ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
261 }
262 
263 static __rte_always_inline void
ssows_swtag_wait(struct ssows * ws)264 ssows_swtag_wait(struct ssows *ws)
265 {
266 	/* Wait for the SWTAG/SWTAG_FULL operation */
267 	while (ssovf_read64(ws->base + SSOW_VHWS_SWTP))
268 	;
269 }
270 
271 static __rte_always_inline void
ssows_head_wait(struct ssows * ws)272 ssows_head_wait(struct ssows *ws)
273 {
274 	while (!(ssovf_read64(ws->base + SSOW_VHWS_TAG) & (1ULL << 35)))
275 		;
276 }
277 #endif /* _SSOVF_WORKER_H_ */
278