xref: /dpdk/drivers/event/octeontx/ssovf_worker.c (revision 3cdcc0c17c6f62c4355b3adcc3191db8e7546d52)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include "ssovf_worker.h"
6 
7 static __rte_always_inline void
8 ssows_new_event(struct ssows *ws, const struct rte_event *ev)
9 {
10 	const uint64_t event_ptr = ev->u64;
11 	const uint32_t tag = (uint32_t)ev->event;
12 	const uint8_t new_tt = ev->sched_type;
13 	const uint8_t grp = ev->queue_id;
14 
15 	ssows_add_work(ws, event_ptr, tag, new_tt, grp);
16 }
17 
18 static __rte_always_inline void
19 ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
20 {
21 	const uint8_t cur_tt = ws->cur_tt;
22 	const uint8_t new_tt = ev->sched_type;
23 	const uint32_t tag = (uint32_t)ev->event;
24 	/*
25 	 * cur_tt/new_tt     SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
26 	 *
27 	 * SSO_SYNC_ORDERED        norm           norm             untag
28 	 * SSO_SYNC_ATOMIC         norm           norm		   untag
29 	 * SSO_SYNC_UNTAGGED       full           full             NOOP
30 	 */
31 	if (unlikely(cur_tt == SSO_SYNC_UNTAGGED)) {
32 		if (new_tt != SSO_SYNC_UNTAGGED) {
33 			ssows_swtag_full(ws, ev->u64, tag,
34 				new_tt, grp);
35 		}
36 	} else {
37 		if (likely(new_tt != SSO_SYNC_UNTAGGED))
38 			ssows_swtag_norm(ws, tag, new_tt);
39 		else
40 			ssows_swtag_untag(ws);
41 	}
42 	ws->swtag_req = 1;
43 }
44 
45 #define OCT_EVENT_TYPE_GRP_FWD (RTE_EVENT_TYPE_MAX - 1)
46 
47 static __rte_always_inline void
48 ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
49 {
50 	const uint64_t event_ptr = ev->u64;
51 	const uint32_t tag = (uint32_t)ev->event;
52 	const uint8_t cur_tt = ws->cur_tt;
53 	const uint8_t new_tt = ev->sched_type;
54 
55 	if (cur_tt == SSO_SYNC_ORDERED) {
56 		/* Create unique tag based on custom event type and new grp */
57 		uint32_t newtag = OCT_EVENT_TYPE_GRP_FWD << 28;
58 
59 		newtag |= grp << 20;
60 		newtag |= tag;
61 		ssows_swtag_norm(ws, newtag, SSO_SYNC_ATOMIC);
62 		rte_smp_wmb();
63 		ssows_swtag_wait(ws);
64 	} else {
65 		rte_smp_wmb();
66 	}
67 	ssows_add_work(ws, event_ptr, tag, new_tt, grp);
68 }
69 
70 static __rte_always_inline void
71 ssows_forward_event(struct ssows *ws, const struct rte_event *ev)
72 {
73 	const uint8_t grp = ev->queue_id;
74 
75 	/* Group hasn't changed, Use SWTAG to forward the event */
76 	if (ws->cur_grp == grp)
77 		ssows_fwd_swtag(ws, ev, grp);
78 	else
79 	/*
80 	 * Group has been changed for group based work pipelining,
81 	 * Use deschedule/add_work operation to transfer the event to
82 	 * new group/core
83 	 */
84 		ssows_fwd_group(ws, ev, grp);
85 }
86 
87 static __rte_always_inline void
88 ssows_release_event(struct ssows *ws)
89 {
90 	if (likely(ws->cur_tt != SSO_SYNC_UNTAGGED))
91 		ssows_swtag_untag(ws);
92 }
93 
94 #define R(name, f2, f1, f0, flags)					     \
95 static uint16_t __rte_noinline	__rte_hot				     \
96 ssows_deq_ ##name(void *port, struct rte_event *ev, uint64_t timeout_ticks)  \
97 {									     \
98 	struct ssows *ws = port;					     \
99 									     \
100 	RTE_SET_USED(timeout_ticks);					     \
101 									     \
102 	if (ws->swtag_req) {						     \
103 		ws->swtag_req = 0;					     \
104 		ssows_swtag_wait(ws);					     \
105 		return 1;						     \
106 	} else {							     \
107 		return ssows_get_work(ws, ev, flags);		             \
108 	}								     \
109 }									     \
110 									     \
111 static uint16_t __rte_hot						     \
112 ssows_deq_burst_ ##name(void *port, struct rte_event ev[],		     \
113 			 uint16_t nb_events, uint64_t timeout_ticks)	     \
114 {									     \
115 	RTE_SET_USED(nb_events);					     \
116 									     \
117 	return ssows_deq_ ##name(port, ev, timeout_ticks);		     \
118 }									     \
119 									     \
120 static uint16_t __rte_hot						     \
121 ssows_deq_timeout_ ##name(void *port, struct rte_event *ev,		     \
122 			  uint64_t timeout_ticks)			     \
123 {									     \
124 	struct ssows *ws = port;					     \
125 	uint64_t iter;							     \
126 	uint16_t ret = 1;						     \
127 									     \
128 	if (ws->swtag_req) {						     \
129 		ws->swtag_req = 0;					     \
130 		ssows_swtag_wait(ws);					     \
131 	} else {							     \
132 		ret = ssows_get_work(ws, ev, flags);			     \
133 		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)   \
134 			ret = ssows_get_work(ws, ev, flags);		     \
135 	}								     \
136 	return ret;							     \
137 }									     \
138 									     \
139 static uint16_t __rte_hot						     \
140 ssows_deq_timeout_burst_ ##name(void *port, struct rte_event ev[],	     \
141 				uint16_t nb_events, uint64_t timeout_ticks)  \
142 {									     \
143 	RTE_SET_USED(nb_events);					     \
144 									     \
145 	return ssows_deq_timeout_ ##name(port, ev, timeout_ticks);	     \
146 }
147 
148 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
149 #undef R
150 
151 uint16_t __rte_hot
152 ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
153 {
154 	struct ssows *ws = port;
155 	uint16_t ret = 1;
156 
157 	RTE_SET_USED(nb_events);
158 
159 	switch (ev->op) {
160 	case RTE_EVENT_OP_NEW:
161 		rte_smp_wmb();
162 		ssows_new_event(ws, ev);
163 		break;
164 	case RTE_EVENT_OP_FORWARD:
165 		ssows_forward_event(ws, ev);
166 		break;
167 	case RTE_EVENT_OP_RELEASE:
168 		ssows_release_event(ws);
169 		break;
170 	default:
171 		ret = 0;
172 	}
173 	return ret;
174 }
175 
176 uint16_t __rte_hot
177 ssows_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
178 {
179 	uint16_t i;
180 	struct ssows *ws = port;
181 
182 	rte_smp_wmb();
183 	for (i = 0; i < nb_events; i++)
184 		ssows_new_event(ws,  &ev[i]);
185 
186 	return nb_events;
187 }
188 
189 uint16_t __rte_hot
190 ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
191 {
192 	struct ssows *ws = port;
193 	RTE_SET_USED(nb_events);
194 
195 	ssows_forward_event(ws,  ev);
196 
197 	return 1;
198 }
199 
200 void
201 ssows_flush_events(struct ssows *ws, uint8_t queue_id,
202 				ssows_handle_event_t fn, void *arg)
203 {
204 	uint32_t reg_off;
205 	struct rte_event ev;
206 	uint64_t enable, aq_cnt = 1, cq_ds_cnt = 1;
207 	uint64_t get_work0, get_work1;
208 	uint64_t sched_type_queue;
209 	uint8_t *base = ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
210 
211 	enable = ssovf_read64(base + SSO_VHGRP_QCTL);
212 	if (!enable)
213 		return;
214 
215 	reg_off = SSOW_VHWS_OP_GET_WORK0;
216 	reg_off |= 1 << 17; /* Grouped */
217 	reg_off |= 1 << 16; /* WAIT */
218 	reg_off |= queue_id << 4; /* INDEX_GGRP_MASK(group number) */
219 	while (aq_cnt || cq_ds_cnt) {
220 		aq_cnt = ssovf_read64(base + SSO_VHGRP_AQ_CNT);
221 		cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT);
222 		/* Extract cq and ds count */
223 		cq_ds_cnt &= 0x1FFF1FFF0000;
224 
225 		ssovf_load_pair(get_work0, get_work1, ws->base + reg_off);
226 
227 		sched_type_queue = (get_work0 >> 32) & 0xfff;
228 		ws->cur_tt = sched_type_queue & 0x3;
229 		ws->cur_grp = sched_type_queue >> 2;
230 		sched_type_queue = sched_type_queue << 38;
231 		ev.event = sched_type_queue | (get_work0 & 0xffffffff);
232 		if (get_work1 && ev.event_type == RTE_EVENT_TYPE_ETHDEV)
233 			ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
234 					(ev.event >> 20) & 0x7F,
235 					OCCTX_RX_OFFLOAD_NONE |
236 					OCCTX_RX_MULTI_SEG_F,
237 					ws->lookup_mem);
238 		else
239 			ev.u64 = get_work1;
240 
241 		if (fn != NULL && ev.u64 != 0)
242 			fn(arg, ev);
243 	}
244 }
245 
246 void
247 ssows_reset(struct ssows *ws)
248 {
249 	uint64_t tag;
250 	uint64_t pend_tag;
251 	uint8_t pend_tt;
252 	uint8_t tt;
253 
254 	tag = ssovf_read64(ws->base + SSOW_VHWS_TAG);
255 	pend_tag = ssovf_read64(ws->base + SSOW_VHWS_PENDTAG);
256 
257 	if (pend_tag & (1ULL << 63)) { /* Tagswitch pending */
258 		pend_tt = (pend_tag >> 32) & 0x3;
259 		if (pend_tt == SSO_SYNC_ORDERED || pend_tt == SSO_SYNC_ATOMIC)
260 			ssows_desched(ws);
261 	} else {
262 		tt = (tag >> 32) & 0x3;
263 		if (tt == SSO_SYNC_ORDERED || tt == SSO_SYNC_ATOMIC)
264 			ssows_swtag_untag(ws);
265 	}
266 }
267 
268 static __rte_always_inline uint16_t
269 __sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
270 			       uint16_t nb_events, uint64_t *cmd,
271 			       const uint16_t flag)
272 {
273 	uint16_t port_id;
274 	uint16_t queue_id;
275 	struct rte_mbuf *m;
276 	struct rte_eth_dev *ethdev;
277 	struct ssows *ws = port;
278 	struct octeontx_txq *txq;
279 
280 	RTE_SET_USED(nb_events);
281 	switch (ev->sched_type) {
282 	case SSO_SYNC_ORDERED:
283 		ssows_swtag_norm(ws, ev->event, SSO_SYNC_ATOMIC);
284 		rte_io_wmb();
285 		ssows_swtag_wait(ws);
286 		break;
287 	case SSO_SYNC_UNTAGGED:
288 		ssows_swtag_full(ws, ev->u64, ev->event, SSO_SYNC_ATOMIC,
289 				ev->queue_id);
290 		rte_io_wmb();
291 		ssows_swtag_wait(ws);
292 		break;
293 	case SSO_SYNC_ATOMIC:
294 		rte_io_wmb();
295 		break;
296 	}
297 
298 	m = ev[0].mbuf;
299 	port_id = m->port;
300 	queue_id = rte_event_eth_tx_adapter_txq_get(m);
301 	ethdev = &rte_eth_devices[port_id];
302 	txq = ethdev->data->tx_queues[queue_id];
303 
304 	return __octeontx_xmit_pkts(txq, &m, 1, cmd, flag);
305 }
306 
307 #define T(name, f3, f2, f1, f0, sz, flags)				     \
308 static uint16_t __rte_noinline	__rte_hot				     \
309 sso_event_tx_adapter_enqueue_ ## name(void *port, struct rte_event ev[],     \
310 				  uint16_t nb_events)			     \
311 {									     \
312 	uint64_t cmd[sz];						     \
313 	return __sso_event_tx_adapter_enqueue(port, ev, nb_events, cmd,	     \
314 					      flags);			     \
315 }
316 
317 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
318 #undef T
319 
320 static uint16_t __rte_hot
321 ssow_crypto_adapter_enqueue(void *port, struct rte_event ev[],
322 			    uint16_t nb_events)
323 {
324 	RTE_SET_USED(nb_events);
325 
326 	return otx_crypto_adapter_enqueue(port, ev->event_ptr);
327 }
328 
329 void
330 ssovf_fastpath_fns_set(struct rte_eventdev *dev)
331 {
332 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
333 
334 	dev->enqueue_burst = ssows_enq_burst;
335 	dev->enqueue_new_burst = ssows_enq_new_burst;
336 	dev->enqueue_forward_burst = ssows_enq_fwd_burst;
337 
338 	dev->ca_enqueue = ssow_crypto_adapter_enqueue;
339 
340 	const event_tx_adapter_enqueue_t ssow_txa_enqueue[2][2][2][2] = {
341 #define T(name, f3, f2, f1, f0, sz, flags)				\
342 	[f3][f2][f1][f0] =  sso_event_tx_adapter_enqueue_ ##name,
343 
344 		SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
345 #undef T
346 	};
347 
348 	dev->txa_enqueue = ssow_txa_enqueue
349 		[!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)]
350 		[!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
351 		[!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)]
352 		[!!(edev->tx_offload_flags & OCCTX_TX_MULTI_SEG_F)];
353 
354 	dev->txa_enqueue_same_dest = dev->txa_enqueue;
355 
356 	/* Assigning dequeue func pointers */
357 	const event_dequeue_burst_t ssow_deq_burst[2][2][2] = {
358 #define R(name, f2, f1, f0, flags)					\
359 	[f2][f1][f0] =  ssows_deq_burst_ ##name,
360 
361 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
362 #undef R
363 	};
364 
365 	dev->dequeue_burst = ssow_deq_burst
366 		[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
367 		[!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
368 		[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
369 
370 	if (edev->is_timeout_deq) {
371 	const event_dequeue_burst_t ssow_deq_timeout_burst[2][2][2] = {
372 #define R(name, f2, f1, f0, flags)					\
373 	[f2][f1][f0] =  ssows_deq_timeout_burst_ ##name,
374 
375 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
376 #undef R
377 		};
378 
379 	dev->dequeue_burst = ssow_deq_timeout_burst
380 		[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
381 		[!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
382 		[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
383 	}
384 }
385 
386 static void
387 octeontx_create_rx_ol_flags_array(void *mem)
388 {
389 	uint16_t idx, errcode, errlev;
390 	uint32_t val, *ol_flags;
391 
392 	/* Skip ptype array memory */
393 	ol_flags = (uint32_t *)mem;
394 
395 	for (idx = 0; idx < BIT(ERRCODE_ERRLEN_WIDTH); idx++) {
396 		errcode = idx & 0xff;
397 		errlev = (idx & 0x700) >> 8;
398 
399 		val = RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
400 		val |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
401 		val |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN;
402 
403 		switch (errlev) {
404 		case OCCTX_ERRLEV_RE:
405 			if (errcode) {
406 				val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
407 				val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
408 			} else {
409 				val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
410 				val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
411 			}
412 			break;
413 		case OCCTX_ERRLEV_LC:
414 			if (errcode == OCCTX_EC_IP4_CSUM) {
415 				val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
416 				val |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
417 			} else {
418 				val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
419 			}
420 			break;
421 		case OCCTX_ERRLEV_LD:
422 			/* Check if parsed packet is neither IPv4 or IPV6 */
423 			if (errcode == OCCTX_EC_IP4_NOT)
424 				break;
425 			val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
426 			if (errcode == OCCTX_EC_L4_CSUM)
427 				val |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
428 			else
429 				val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
430 			break;
431 		case OCCTX_ERRLEV_LE:
432 			if (errcode == OCCTX_EC_IP4_CSUM)
433 				val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
434 			else
435 				val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
436 			break;
437 		case OCCTX_ERRLEV_LF:
438 			/* Check if parsed packet is neither IPv4 or IPV6 */
439 			if (errcode == OCCTX_EC_IP4_NOT)
440 				break;
441 			val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
442 			if (errcode == OCCTX_EC_L4_CSUM)
443 				val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
444 			else
445 				val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
446 			break;
447 		}
448 
449 		ol_flags[idx] = val;
450 	}
451 }
452 
453 void *
454 octeontx_fastpath_lookup_mem_get(void)
455 {
456 	const char name[] = OCCTX_FASTPATH_LOOKUP_MEM;
457 	const struct rte_memzone *mz;
458 	void *mem;
459 
460 	mz = rte_memzone_lookup(name);
461 	if (mz != NULL)
462 		return mz->addr;
463 
464 	/* Request for the first time */
465 	mz = rte_memzone_reserve_aligned(name, LOOKUP_ARRAY_SZ,
466 					 SOCKET_ID_ANY, 0, OCCTX_ALIGN);
467 	if (mz != NULL) {
468 		mem = mz->addr;
469 		/* Form the rx ol_flags based on errcode */
470 		octeontx_create_rx_ol_flags_array(mem);
471 		return mem;
472 	}
473 	return NULL;
474 }
475