xref: /dpdk/drivers/event/octeontx/ssovf_worker.c (revision 6ac67eb3b6b66a73853d6b934f6b3e7b4839e225)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (C) Cavium networks Ltd. 2017.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Cavium networks nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include "ssovf_worker.h"
34 
35 static __rte_always_inline void
36 ssows_new_event(struct ssows *ws, const struct rte_event *ev)
37 {
38 	const uint64_t event_ptr = ev->u64;
39 	const uint32_t tag = (uint32_t)ev->event;
40 	const uint8_t new_tt = ev->sched_type;
41 	const uint8_t grp = ev->queue_id;
42 
43 	ssows_add_work(ws, event_ptr, tag, new_tt, grp);
44 }
45 
46 static __rte_always_inline void
47 ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
48 {
49 	const uint8_t cur_tt = ws->cur_tt;
50 	const uint8_t new_tt = ev->sched_type;
51 	const uint32_t tag = (uint32_t)ev->event;
52 	/*
53 	 * cur_tt/new_tt     SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
54 	 *
55 	 * SSO_SYNC_ORDERED        norm           norm             untag
56 	 * SSO_SYNC_ATOMIC         norm           norm		   untag
57 	 * SSO_SYNC_UNTAGGED       full           full             NOOP
58 	 */
59 	if (unlikely(cur_tt == SSO_SYNC_UNTAGGED)) {
60 		if (new_tt != SSO_SYNC_UNTAGGED) {
61 			ssows_swtag_full(ws, ev->u64, tag,
62 				new_tt, grp);
63 		}
64 	} else {
65 		if (likely(new_tt != SSO_SYNC_UNTAGGED))
66 			ssows_swtag_norm(ws, tag, new_tt);
67 		else
68 			ssows_swtag_untag(ws);
69 	}
70 	ws->swtag_req = 1;
71 }
72 
73 #define OCT_EVENT_TYPE_GRP_FWD (RTE_EVENT_TYPE_MAX - 1)
74 
75 static __rte_always_inline void
76 ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
77 {
78 	const uint64_t event_ptr = ev->u64;
79 	const uint32_t tag = (uint32_t)ev->event;
80 	const uint8_t cur_tt = ws->cur_tt;
81 	const uint8_t new_tt = ev->sched_type;
82 
83 	if (cur_tt == SSO_SYNC_ORDERED) {
84 		/* Create unique tag based on custom event type and new grp */
85 		uint32_t newtag = OCT_EVENT_TYPE_GRP_FWD << 28;
86 
87 		newtag |= grp << 20;
88 		newtag |= tag;
89 		ssows_swtag_norm(ws, newtag, SSO_SYNC_ATOMIC);
90 		rte_smp_wmb();
91 		ssows_swtag_wait(ws);
92 	} else {
93 		rte_smp_wmb();
94 	}
95 	ssows_add_work(ws, event_ptr, tag, new_tt, grp);
96 }
97 
98 static __rte_always_inline void
99 ssows_forward_event(struct ssows *ws, const struct rte_event *ev)
100 {
101 	const uint8_t grp = ev->queue_id;
102 
103 	/* Group hasn't changed, Use SWTAG to forward the event */
104 	if (ws->cur_grp == grp)
105 		ssows_fwd_swtag(ws, ev, grp);
106 	else
107 	/*
108 	 * Group has been changed for group based work pipelining,
109 	 * Use deschedule/add_work operation to transfer the event to
110 	 * new group/core
111 	 */
112 		ssows_fwd_group(ws, ev, grp);
113 }
114 
115 static __rte_always_inline void
116 ssows_release_event(struct ssows *ws)
117 {
118 	if (likely(ws->cur_tt != SSO_SYNC_UNTAGGED))
119 		ssows_swtag_untag(ws);
120 }
121 
122 __rte_always_inline uint16_t __hot
123 ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
124 {
125 	struct ssows *ws = port;
126 
127 	RTE_SET_USED(timeout_ticks);
128 
129 	if (ws->swtag_req) {
130 		ws->swtag_req = 0;
131 		ssows_swtag_wait(ws);
132 		return 1;
133 	} else {
134 		return ssows_get_work(ws, ev);
135 	}
136 }
137 
138 __rte_always_inline uint16_t __hot
139 ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks)
140 {
141 	struct ssows *ws = port;
142 	uint64_t iter;
143 	uint16_t ret = 1;
144 
145 	if (ws->swtag_req) {
146 		ws->swtag_req = 0;
147 		ssows_swtag_wait(ws);
148 	} else {
149 		ret = ssows_get_work(ws, ev);
150 		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
151 			ret = ssows_get_work(ws, ev);
152 	}
153 	return ret;
154 }
155 
156 uint16_t __hot
157 ssows_deq_burst(void *port, struct rte_event ev[], uint16_t nb_events,
158 		uint64_t timeout_ticks)
159 {
160 	RTE_SET_USED(nb_events);
161 
162 	return ssows_deq(port, ev, timeout_ticks);
163 }
164 
165 uint16_t __hot
166 ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events,
167 			uint64_t timeout_ticks)
168 {
169 	RTE_SET_USED(nb_events);
170 
171 	return ssows_deq_timeout(port, ev, timeout_ticks);
172 }
173 
174 __rte_always_inline uint16_t __hot
175 ssows_enq(void *port, const struct rte_event *ev)
176 {
177 	struct ssows *ws = port;
178 	uint16_t ret = 1;
179 
180 	switch (ev->op) {
181 	case RTE_EVENT_OP_NEW:
182 		rte_smp_wmb();
183 		ssows_new_event(ws, ev);
184 		break;
185 	case RTE_EVENT_OP_FORWARD:
186 		ssows_forward_event(ws, ev);
187 		break;
188 	case RTE_EVENT_OP_RELEASE:
189 		ssows_release_event(ws);
190 		break;
191 	default:
192 		ret = 0;
193 	}
194 	return ret;
195 }
196 
197 uint16_t __hot
198 ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
199 {
200 	RTE_SET_USED(nb_events);
201 	return ssows_enq(port, ev);
202 }
203 
204 void
205 ssows_flush_events(struct ssows *ws, uint8_t queue_id)
206 {
207 	uint32_t reg_off;
208 	uint64_t aq_cnt = 1;
209 	uint64_t cq_ds_cnt = 1;
210 	uint64_t enable, get_work0, get_work1;
211 	uint8_t *base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
212 
213 	enable = ssovf_read64(base + SSO_VHGRP_QCTL);
214 	if (!enable)
215 		return;
216 
217 	reg_off = SSOW_VHWS_OP_GET_WORK0;
218 	reg_off |= 1 << 17; /* Grouped */
219 	reg_off |= 1 << 16; /* WAIT */
220 	reg_off |= queue_id << 4; /* INDEX_GGRP_MASK(group number) */
221 	while (aq_cnt || cq_ds_cnt) {
222 		aq_cnt = ssovf_read64(base + SSO_VHGRP_AQ_CNT);
223 		cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT);
224 		/* Extract cq and ds count */
225 		cq_ds_cnt &= 0x1FFF1FFF0000;
226 		ssovf_load_pair(get_work0, get_work1, ws->base + reg_off);
227 	}
228 
229 	RTE_SET_USED(get_work0);
230 	RTE_SET_USED(get_work1);
231 }
232 
233 void
234 ssows_reset(struct ssows *ws)
235 {
236 	uint64_t tag;
237 	uint64_t pend_tag;
238 	uint8_t pend_tt;
239 	uint8_t tt;
240 
241 	tag = ssovf_read64(ws->base + SSOW_VHWS_TAG);
242 	pend_tag = ssovf_read64(ws->base + SSOW_VHWS_PENDTAG);
243 
244 	if (pend_tag & (1ULL << 63)) { /* Tagswitch pending */
245 		pend_tt = (pend_tag >> 32) & 0x3;
246 		if (pend_tt == SSO_SYNC_ORDERED || pend_tt == SSO_SYNC_ATOMIC)
247 			ssows_desched(ws);
248 	} else {
249 		tt = (tag >> 32) & 0x3;
250 		if (tt == SSO_SYNC_ORDERED || tt == SSO_SYNC_ATOMIC)
251 			ssows_swtag_untag(ws);
252 	}
253 }
254