xref: /dpdk/drivers/event/octeontx/ssovf_worker.h (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <rte_common.h>
6 #include <rte_branch_prediction.h>
7 
8 #include <octeontx_mbox.h>
9 
10 #include "ssovf_evdev.h"
11 #include "octeontx_rxtx.h"
12 
13 /* Alignment */
14 #define OCCTX_ALIGN  128
15 
16 /* Fastpath lookup */
17 #define OCCTX_FASTPATH_LOOKUP_MEM	"octeontx_fastpath_lookup_mem"
18 
19 /* WQE's ERRCODE + ERRLEV (11 bits) */
20 #define ERRCODE_ERRLEN_WIDTH		11
21 #define ERR_ARRAY_SZ			((BIT(ERRCODE_ERRLEN_WIDTH)) *\
22 					sizeof(uint32_t))
23 
24 #define LOOKUP_ARRAY_SZ			(ERR_ARRAY_SZ)
25 
26 #define OCCTX_EC_IP4_NOT		0x41
27 #define OCCTX_EC_IP4_CSUM		0x42
28 #define OCCTX_EC_L4_CSUM		0x62
29 
30 enum OCCTX_ERRLEV_E {
31 	OCCTX_ERRLEV_RE = 0,
32 	OCCTX_ERRLEV_LA = 1,
33 	OCCTX_ERRLEV_LB = 2,
34 	OCCTX_ERRLEV_LC = 3,
35 	OCCTX_ERRLEV_LD = 4,
36 	OCCTX_ERRLEV_LE = 5,
37 	OCCTX_ERRLEV_LF = 6,
38 	OCCTX_ERRLEV_LG = 7,
39 };
40 
41 enum {
42 	SSO_SYNC_ORDERED,
43 	SSO_SYNC_ATOMIC,
44 	SSO_SYNC_UNTAGGED,
45 	SSO_SYNC_EMPTY
46 };
47 
48 /* SSO Operations */
49 
50 static __rte_always_inline uint32_t
51 ssovf_octeontx_rx_olflags_get(const void * const lookup_mem, const uint64_t in)
52 {
53 	const uint32_t * const ol_flags = (const uint32_t *)lookup_mem;
54 
55 	return ol_flags[(in & 0x7ff)];
56 }
57 
58 static __rte_always_inline void
59 ssovf_octeontx_wqe_xtract_mseg(octtx_wqe_t *wqe,
60 			       struct rte_mbuf *mbuf)
61 {
62 	octtx_pki_buflink_t *buflink;
63 	rte_iova_t *iova_list;
64 	uint8_t nb_segs;
65 	uint64_t bytes_left = wqe->s.w1.len - wqe->s.w5.size;
66 
67 	nb_segs = wqe->s.w0.bufs;
68 
69 	buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
70 					  sizeof(octtx_pki_buflink_t));
71 
72 	while (--nb_segs) {
73 		iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
74 		mbuf->next = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
75 			      - (OCTTX_PACKET_LATER_SKIP / 128);
76 		mbuf = mbuf->next;
77 
78 		mbuf->data_off = sizeof(octtx_pki_buflink_t);
79 
80 		__mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
81 		if (nb_segs == 1)
82 			mbuf->data_len = bytes_left;
83 		else
84 			mbuf->data_len = buflink->w0.s.size;
85 
86 		bytes_left = bytes_left - buflink->w0.s.size;
87 		buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
88 
89 	}
90 }
91 
92 static __rte_always_inline struct rte_mbuf *
93 ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info,
94 			  const uint16_t flag, const void *lookup_mem)
95 {
96 	struct rte_mbuf *mbuf;
97 	octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
98 
99 	/* Get mbuf from wqe */
100 	mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
101 	rte_prefetch_non_temporal(mbuf);
102 	mbuf->packet_type =
103 		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
104 	mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
105 	mbuf->ol_flags = 0;
106 	mbuf->pkt_len = wqe->s.w1.len;
107 
108 	if (!!(flag & OCCTX_RX_OFFLOAD_CSUM_F))
109 		mbuf->ol_flags = ssovf_octeontx_rx_olflags_get(lookup_mem,
110 							       wqe->w[2]);
111 
112 	if (!!(flag & OCCTX_RX_MULTI_SEG_F)) {
113 		mbuf->nb_segs = wqe->s.w0.bufs;
114 		mbuf->data_len = wqe->s.w5.size;
115 		ssovf_octeontx_wqe_xtract_mseg(wqe, mbuf);
116 	} else {
117 		mbuf->nb_segs = 1;
118 		mbuf->data_len = mbuf->pkt_len;
119 	}
120 
121 	if (!!(flag & OCCTX_RX_VLAN_FLTR_F)) {
122 		if (likely(wqe->s.w2.vv)) {
123 			mbuf->ol_flags |= PKT_RX_VLAN;
124 			mbuf->vlan_tci =
125 				ntohs(*((uint16_t *)((char *)mbuf->buf_addr +
126 					mbuf->data_off + wqe->s.w4.vlptr + 2)));
127 		}
128 	}
129 
130 	mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
131 	rte_mbuf_refcnt_set(mbuf, 1);
132 
133 	return mbuf;
134 }
135 
136 static __rte_always_inline void
137 ssovf_octeontx_wqe_free(uint64_t work)
138 {
139 	octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
140 	uint8_t nb_segs = wqe->s.w0.bufs;
141 	octtx_pki_buflink_t *buflink;
142 	struct rte_mbuf *mbuf, *head;
143 	rte_iova_t *iova_list;
144 
145 	mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
146 	buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
147 					  sizeof(octtx_pki_buflink_t));
148 	head = mbuf;
149 	while (--nb_segs) {
150 		iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
151 		mbuf = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
152 			- (OCTTX_PACKET_LATER_SKIP / 128);
153 
154 		mbuf->next = NULL;
155 		rte_pktmbuf_free(mbuf);
156 		buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
157 	}
158 	rte_pktmbuf_free(head);
159 }
160 
161 static __rte_always_inline uint16_t
162 ssows_get_work(struct ssows *ws, struct rte_event *ev, const uint16_t flag)
163 {
164 	uint64_t get_work0, get_work1;
165 	uint64_t sched_type_queue;
166 
167 	ssovf_load_pair(get_work0, get_work1, ws->getwork);
168 
169 	sched_type_queue = (get_work0 >> 32) & 0xfff;
170 	ws->cur_tt = sched_type_queue & 0x3;
171 	ws->cur_grp = sched_type_queue >> 2;
172 	sched_type_queue = sched_type_queue << 38;
173 	ev->event = sched_type_queue | (get_work0 & 0xffffffff);
174 
175 	if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
176 		ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
177 				(ev->event >> 20) & 0x7F, flag, ws->lookup_mem);
178 	} else if (unlikely((get_work0 & 0xFFFFFFFF) == 0xFFFFFFFF)) {
179 		ssovf_octeontx_wqe_free(get_work1);
180 		return 0;
181 	} else {
182 		ev->u64 = get_work1;
183 	}
184 
185 	return !!get_work1;
186 }
187 
188 static __rte_always_inline void
189 ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
190 			const uint8_t new_tt, const uint8_t grp)
191 {
192 	uint64_t add_work0;
193 
194 	add_work0 = tag | ((uint64_t)(new_tt) << 32);
195 	ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
196 }
197 
198 static __rte_always_inline void
199 ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
200 			const uint8_t new_tt, const uint8_t grp)
201 {
202 	uint64_t swtag_full0;
203 
204 	swtag_full0 = tag | ((uint64_t)(new_tt & 0x3) << 32) |
205 				((uint64_t)grp << 34);
206 	ssovf_store_pair(swtag_full0, event_ptr, (ws->base +
207 				SSOW_VHWS_OP_SWTAG_FULL0));
208 }
209 
210 static __rte_always_inline void
211 ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
212 {
213 	uint64_t val;
214 
215 	val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
216 	ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
217 }
218 
219 static __rte_always_inline void
220 ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
221 {
222 	uint64_t val;
223 
224 	val = tag | ((uint64_t)(new_tt & 0x3) << 32);
225 	ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
226 }
227 
228 static __rte_always_inline void
229 ssows_swtag_untag(struct ssows *ws)
230 {
231 	ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
232 	ws->cur_tt = SSO_SYNC_UNTAGGED;
233 }
234 
235 static __rte_always_inline void
236 ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
237 {
238 	ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
239 				SSOW_VHWS_OP_UPD_WQP_GRP0));
240 }
241 
242 static __rte_always_inline void
243 ssows_desched(struct ssows *ws)
244 {
245 	ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
246 }
247 
248 static __rte_always_inline void
249 ssows_swtag_wait(struct ssows *ws)
250 {
251 	/* Wait for the SWTAG/SWTAG_FULL operation */
252 	while (ssovf_read64(ws->base + SSOW_VHWS_SWTP))
253 	;
254 }
255