xref: /dpdk/drivers/event/octeontx/ssovf_worker.c (revision 844d302d73f931f80edd143329b1a9fdcc749a7a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include "ssovf_worker.h"
6 
7 static __rte_always_inline void
8 ssows_new_event(struct ssows *ws, const struct rte_event *ev)
9 {
10 	const uint64_t event_ptr = ev->u64;
11 	const uint32_t tag = (uint32_t)ev->event;
12 	const uint8_t new_tt = ev->sched_type;
13 	const uint8_t grp = ev->queue_id;
14 
15 	ssows_add_work(ws, event_ptr, tag, new_tt, grp);
16 }
17 
18 static __rte_always_inline void
19 ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
20 {
21 	const uint8_t cur_tt = ws->cur_tt;
22 	const uint8_t new_tt = ev->sched_type;
23 	const uint32_t tag = (uint32_t)ev->event;
24 	/*
25 	 * cur_tt/new_tt     SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
26 	 *
27 	 * SSO_SYNC_ORDERED        norm           norm             untag
28 	 * SSO_SYNC_ATOMIC         norm           norm		   untag
29 	 * SSO_SYNC_UNTAGGED       full           full             NOOP
30 	 */
31 	if (unlikely(cur_tt == SSO_SYNC_UNTAGGED)) {
32 		if (new_tt != SSO_SYNC_UNTAGGED) {
33 			ssows_swtag_full(ws, ev->u64, tag,
34 				new_tt, grp);
35 		}
36 	} else {
37 		if (likely(new_tt != SSO_SYNC_UNTAGGED))
38 			ssows_swtag_norm(ws, tag, new_tt);
39 		else
40 			ssows_swtag_untag(ws);
41 	}
42 	ws->swtag_req = 1;
43 }
44 
45 #define OCT_EVENT_TYPE_GRP_FWD (RTE_EVENT_TYPE_MAX - 1)
46 
47 static __rte_always_inline void
48 ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
49 {
50 	const uint64_t event_ptr = ev->u64;
51 	const uint32_t tag = (uint32_t)ev->event;
52 	const uint8_t cur_tt = ws->cur_tt;
53 	const uint8_t new_tt = ev->sched_type;
54 
55 	if (cur_tt == SSO_SYNC_ORDERED) {
56 		/* Create unique tag based on custom event type and new grp */
57 		uint32_t newtag = OCT_EVENT_TYPE_GRP_FWD << 28;
58 
59 		newtag |= grp << 20;
60 		newtag |= tag;
61 		ssows_swtag_norm(ws, newtag, SSO_SYNC_ATOMIC);
62 		rte_smp_wmb();
63 		ssows_swtag_wait(ws);
64 	} else {
65 		rte_smp_wmb();
66 	}
67 	ssows_add_work(ws, event_ptr, tag, new_tt, grp);
68 }
69 
70 static __rte_always_inline void
71 ssows_forward_event(struct ssows *ws, const struct rte_event *ev)
72 {
73 	const uint8_t grp = ev->queue_id;
74 
75 	/* Group hasn't changed, Use SWTAG to forward the event */
76 	if (ws->cur_grp == grp)
77 		ssows_fwd_swtag(ws, ev, grp);
78 	else
79 	/*
80 	 * Group has been changed for group based work pipelining,
81 	 * Use deschedule/add_work operation to transfer the event to
82 	 * new group/core
83 	 */
84 		ssows_fwd_group(ws, ev, grp);
85 }
86 
87 static __rte_always_inline void
88 ssows_release_event(struct ssows *ws)
89 {
90 	if (likely(ws->cur_tt != SSO_SYNC_UNTAGGED))
91 		ssows_swtag_untag(ws);
92 }
93 
94 __rte_always_inline uint16_t __rte_hot
95 ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
96 {
97 	struct ssows *ws = port;
98 
99 	RTE_SET_USED(timeout_ticks);
100 
101 	if (ws->swtag_req) {
102 		ws->swtag_req = 0;
103 		ssows_swtag_wait(ws);
104 		return 1;
105 	} else {
106 		return ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE);
107 	}
108 }
109 
110 __rte_always_inline uint16_t __rte_hot
111 ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks)
112 {
113 	struct ssows *ws = port;
114 	uint64_t iter;
115 	uint16_t ret = 1;
116 
117 	if (ws->swtag_req) {
118 		ws->swtag_req = 0;
119 		ssows_swtag_wait(ws);
120 	} else {
121 		ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE);
122 		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
123 			ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE);
124 	}
125 	return ret;
126 }
127 
128 uint16_t __rte_hot
129 ssows_deq_burst(void *port, struct rte_event ev[], uint16_t nb_events,
130 		uint64_t timeout_ticks)
131 {
132 	RTE_SET_USED(nb_events);
133 
134 	return ssows_deq(port, ev, timeout_ticks);
135 }
136 
137 uint16_t __rte_hot
138 ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events,
139 			uint64_t timeout_ticks)
140 {
141 	RTE_SET_USED(nb_events);
142 
143 	return ssows_deq_timeout(port, ev, timeout_ticks);
144 }
145 
146 __rte_always_inline uint16_t __rte_hot
147 ssows_deq_mseg(void *port, struct rte_event *ev, uint64_t timeout_ticks)
148 {
149 	struct ssows *ws = port;
150 
151 	RTE_SET_USED(timeout_ticks);
152 
153 	if (ws->swtag_req) {
154 		ws->swtag_req = 0;
155 		ssows_swtag_wait(ws);
156 		return 1;
157 	} else {
158 		return ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE |
159 				      OCCTX_RX_MULTI_SEG_F);
160 	}
161 }
162 
163 __rte_always_inline uint16_t __rte_hot
164 ssows_deq_timeout_mseg(void *port, struct rte_event *ev, uint64_t timeout_ticks)
165 {
166 	struct ssows *ws = port;
167 	uint64_t iter;
168 	uint16_t ret = 1;
169 
170 	if (ws->swtag_req) {
171 		ws->swtag_req = 0;
172 		ssows_swtag_wait(ws);
173 	} else {
174 		ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE |
175 				     OCCTX_RX_MULTI_SEG_F);
176 		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
177 			ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE |
178 					     OCCTX_RX_MULTI_SEG_F);
179 	}
180 	return ret;
181 }
182 
183 uint16_t __rte_hot
184 ssows_deq_burst_mseg(void *port, struct rte_event ev[], uint16_t nb_events,
185 		uint64_t timeout_ticks)
186 {
187 	RTE_SET_USED(nb_events);
188 
189 	return ssows_deq_mseg(port, ev, timeout_ticks);
190 }
191 
192 uint16_t __rte_hot
193 ssows_deq_timeout_burst_mseg(void *port, struct rte_event ev[],
194 			     uint16_t nb_events, uint64_t timeout_ticks)
195 {
196 	RTE_SET_USED(nb_events);
197 
198 	return ssows_deq_timeout_mseg(port, ev, timeout_ticks);
199 }
200 
201 __rte_always_inline uint16_t __rte_hot
202 ssows_enq(void *port, const struct rte_event *ev)
203 {
204 	struct ssows *ws = port;
205 	uint16_t ret = 1;
206 
207 	switch (ev->op) {
208 	case RTE_EVENT_OP_NEW:
209 		rte_smp_wmb();
210 		ssows_new_event(ws, ev);
211 		break;
212 	case RTE_EVENT_OP_FORWARD:
213 		ssows_forward_event(ws, ev);
214 		break;
215 	case RTE_EVENT_OP_RELEASE:
216 		ssows_release_event(ws);
217 		break;
218 	default:
219 		ret = 0;
220 	}
221 	return ret;
222 }
223 
224 uint16_t __rte_hot
225 ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
226 {
227 	RTE_SET_USED(nb_events);
228 	return ssows_enq(port, ev);
229 }
230 
231 uint16_t __rte_hot
232 ssows_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
233 {
234 	uint16_t i;
235 	struct ssows *ws = port;
236 
237 	rte_smp_wmb();
238 	for (i = 0; i < nb_events; i++)
239 		ssows_new_event(ws,  &ev[i]);
240 
241 	return nb_events;
242 }
243 
244 uint16_t __rte_hot
245 ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
246 {
247 	struct ssows *ws = port;
248 	RTE_SET_USED(nb_events);
249 
250 	ssows_forward_event(ws,  ev);
251 
252 	return 1;
253 }
254 
255 void
256 ssows_flush_events(struct ssows *ws, uint8_t queue_id,
257 				ssows_handle_event_t fn, void *arg)
258 {
259 	uint32_t reg_off;
260 	struct rte_event ev;
261 	uint64_t enable, aq_cnt = 1, cq_ds_cnt = 1;
262 	uint64_t get_work0, get_work1;
263 	uint64_t sched_type_queue;
264 	uint8_t *base = ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
265 
266 	enable = ssovf_read64(base + SSO_VHGRP_QCTL);
267 	if (!enable)
268 		return;
269 
270 	reg_off = SSOW_VHWS_OP_GET_WORK0;
271 	reg_off |= 1 << 17; /* Grouped */
272 	reg_off |= 1 << 16; /* WAIT */
273 	reg_off |= queue_id << 4; /* INDEX_GGRP_MASK(group number) */
274 	while (aq_cnt || cq_ds_cnt) {
275 		aq_cnt = ssovf_read64(base + SSO_VHGRP_AQ_CNT);
276 		cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT);
277 		/* Extract cq and ds count */
278 		cq_ds_cnt &= 0x1FFF1FFF0000;
279 
280 		ssovf_load_pair(get_work0, get_work1, ws->base + reg_off);
281 
282 		sched_type_queue = (get_work0 >> 32) & 0xfff;
283 		ws->cur_tt = sched_type_queue & 0x3;
284 		ws->cur_grp = sched_type_queue >> 2;
285 		sched_type_queue = sched_type_queue << 38;
286 		ev.event = sched_type_queue | (get_work0 & 0xffffffff);
287 		if (get_work1 && ev.event_type == RTE_EVENT_TYPE_ETHDEV)
288 			ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
289 					(ev.event >> 20) & 0x7F,
290 					OCCTX_RX_OFFLOAD_NONE |
291 					OCCTX_RX_MULTI_SEG_F);
292 		else
293 			ev.u64 = get_work1;
294 
295 		if (fn != NULL && ev.u64 != 0)
296 			fn(arg, ev);
297 	}
298 }
299 
300 void
301 ssows_reset(struct ssows *ws)
302 {
303 	uint64_t tag;
304 	uint64_t pend_tag;
305 	uint8_t pend_tt;
306 	uint8_t tt;
307 
308 	tag = ssovf_read64(ws->base + SSOW_VHWS_TAG);
309 	pend_tag = ssovf_read64(ws->base + SSOW_VHWS_PENDTAG);
310 
311 	if (pend_tag & (1ULL << 63)) { /* Tagswitch pending */
312 		pend_tt = (pend_tag >> 32) & 0x3;
313 		if (pend_tt == SSO_SYNC_ORDERED || pend_tt == SSO_SYNC_ATOMIC)
314 			ssows_desched(ws);
315 	} else {
316 		tt = (tag >> 32) & 0x3;
317 		if (tt == SSO_SYNC_ORDERED || tt == SSO_SYNC_ATOMIC)
318 			ssows_swtag_untag(ws);
319 	}
320 }
321 
322 static __rte_always_inline uint16_t
323 __sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
324 			       uint16_t nb_events, const uint16_t flag)
325 {
326 	uint16_t port_id;
327 	uint16_t queue_id;
328 	struct rte_mbuf *m;
329 	struct rte_eth_dev *ethdev;
330 	struct ssows *ws = port;
331 	struct octeontx_txq *txq;
332 	uint64_t cmd[4];
333 
334 	RTE_SET_USED(nb_events);
335 	switch (ev->sched_type) {
336 	case SSO_SYNC_ORDERED:
337 		ssows_swtag_norm(ws, ev->event, SSO_SYNC_ATOMIC);
338 		rte_cio_wmb();
339 		ssows_swtag_wait(ws);
340 		break;
341 	case SSO_SYNC_UNTAGGED:
342 		ssows_swtag_full(ws, ev->u64, ev->event, SSO_SYNC_ATOMIC,
343 				ev->queue_id);
344 		rte_cio_wmb();
345 		ssows_swtag_wait(ws);
346 		break;
347 	case SSO_SYNC_ATOMIC:
348 		rte_cio_wmb();
349 		break;
350 	}
351 
352 	m = ev[0].mbuf;
353 	port_id = m->port;
354 	queue_id = rte_event_eth_tx_adapter_txq_get(m);
355 	ethdev = &rte_eth_devices[port_id];
356 	txq = ethdev->data->tx_queues[queue_id];
357 
358 	return __octeontx_xmit_pkts(txq, &m, 1, cmd, flag);
359 }
360 
361 uint16_t
362 sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
363 			     uint16_t nb_events)
364 {
365 	return __sso_event_tx_adapter_enqueue(port, ev, nb_events,
366 					      OCCTX_TX_OFFLOAD_NONE);
367 }
368 
369 uint16_t
370 sso_event_tx_adapter_enqueue_mseg(void *port, struct rte_event ev[],
371 				  uint16_t nb_events)
372 {
373 	return __sso_event_tx_adapter_enqueue(port, ev, nb_events,
374 					      OCCTX_TX_OFFLOAD_NONE |
375 					      OCCTX_TX_MULTI_SEG_F);
376 }
377