1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
3 */
4
5 #include <eventdev_pmd.h>
6
7 #include <rte_common.h>
8 #include <rte_branch_prediction.h>
9
10 #include "timvf_evdev.h"
11
12 static inline int16_t
timr_bkt_fetch_rem(uint64_t w1)13 timr_bkt_fetch_rem(uint64_t w1)
14 {
15 return (w1 >> TIM_BUCKET_W1_S_CHUNK_REMAINDER) &
16 TIM_BUCKET_W1_M_CHUNK_REMAINDER;
17 }
18
19 static inline int16_t
timr_bkt_get_rem(struct tim_mem_bucket * bktp)20 timr_bkt_get_rem(struct tim_mem_bucket *bktp)
21 {
22 return rte_atomic_load_explicit(&bktp->chunk_remainder,
23 rte_memory_order_acquire);
24 }
25
26 static inline void
timr_bkt_set_rem(struct tim_mem_bucket * bktp,uint16_t v)27 timr_bkt_set_rem(struct tim_mem_bucket *bktp, uint16_t v)
28 {
29 rte_atomic_store_explicit(&bktp->chunk_remainder, v,
30 rte_memory_order_release);
31 }
32
33 static inline void
timr_bkt_sub_rem(struct tim_mem_bucket * bktp,uint16_t v)34 timr_bkt_sub_rem(struct tim_mem_bucket *bktp, uint16_t v)
35 {
36 rte_atomic_fetch_sub_explicit(&bktp->chunk_remainder, v,
37 rte_memory_order_release);
38 }
39
40 static inline uint8_t
timr_bkt_get_sbt(uint64_t w1)41 timr_bkt_get_sbt(uint64_t w1)
42 {
43 return (w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT;
44 }
45
46 static inline uint64_t
timr_bkt_set_sbt(struct tim_mem_bucket * bktp)47 timr_bkt_set_sbt(struct tim_mem_bucket *bktp)
48 {
49 const uint64_t v = TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT;
50 return rte_atomic_fetch_or_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
51 }
52
53 static inline uint64_t
timr_bkt_clr_sbt(struct tim_mem_bucket * bktp)54 timr_bkt_clr_sbt(struct tim_mem_bucket *bktp)
55 {
56 const uint64_t v = ~(TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT);
57 return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
58 }
59
60 static inline uint8_t
timr_bkt_get_shbt(uint64_t w1)61 timr_bkt_get_shbt(uint64_t w1)
62 {
63 return ((w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT) |
64 ((w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT);
65 }
66
67 static inline uint8_t
timr_bkt_get_hbt(uint64_t w1)68 timr_bkt_get_hbt(uint64_t w1)
69 {
70 return (w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT;
71 }
72
73 static inline uint8_t
timr_bkt_get_bsk(uint64_t w1)74 timr_bkt_get_bsk(uint64_t w1)
75 {
76 return (w1 >> TIM_BUCKET_W1_S_BSK) & TIM_BUCKET_W1_M_BSK;
77 }
78
79 static inline uint64_t
timr_bkt_clr_bsk(struct tim_mem_bucket * bktp)80 timr_bkt_clr_bsk(struct tim_mem_bucket *bktp)
81 {
82 /*Clear everything except lock. */
83 const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
84 return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
85 }
86
87 static inline uint64_t
timr_bkt_fetch_sema_lock(struct tim_mem_bucket * bktp)88 timr_bkt_fetch_sema_lock(struct tim_mem_bucket *bktp)
89 {
90 return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
91 rte_memory_order_acq_rel);
92 }
93
94 static inline uint64_t
timr_bkt_fetch_sema(struct tim_mem_bucket * bktp)95 timr_bkt_fetch_sema(struct tim_mem_bucket *bktp)
96 {
97 return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA,
98 rte_memory_order_relaxed);
99 }
100
101 static inline uint64_t
timr_bkt_inc_lock(struct tim_mem_bucket * bktp)102 timr_bkt_inc_lock(struct tim_mem_bucket *bktp)
103 {
104 const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
105 return rte_atomic_fetch_add_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
106 }
107
108 static inline void
timr_bkt_dec_lock(struct tim_mem_bucket * bktp)109 timr_bkt_dec_lock(struct tim_mem_bucket *bktp)
110 {
111 rte_atomic_fetch_add_explicit(&bktp->lock, 0xff, rte_memory_order_acq_rel);
112 }
113
114 static inline uint32_t
timr_bkt_get_nent(uint64_t w1)115 timr_bkt_get_nent(uint64_t w1)
116 {
117 return (w1 >> TIM_BUCKET_W1_S_NUM_ENTRIES) &
118 TIM_BUCKET_W1_M_NUM_ENTRIES;
119 }
120
121 static inline void
timr_bkt_inc_nent(struct tim_mem_bucket * bktp)122 timr_bkt_inc_nent(struct tim_mem_bucket *bktp)
123 {
124 rte_atomic_fetch_add_explicit(&bktp->nb_entry, 1, rte_memory_order_relaxed);
125 }
126
127 static inline void
timr_bkt_add_nent(struct tim_mem_bucket * bktp,uint32_t v)128 timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v)
129 {
130 rte_atomic_fetch_add_explicit(&bktp->nb_entry, v, rte_memory_order_relaxed);
131 }
132
133 static inline uint64_t
timr_bkt_clr_nent(struct tim_mem_bucket * bktp)134 timr_bkt_clr_nent(struct tim_mem_bucket *bktp)
135 {
136 const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
137 TIM_BUCKET_W1_S_NUM_ENTRIES);
138 return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel) & v;
139 }
140
141 static inline struct tim_mem_entry *
timr_clr_bkt(struct timvf_ring * const timr,struct tim_mem_bucket * const bkt)142 timr_clr_bkt(struct timvf_ring * const timr, struct tim_mem_bucket * const bkt)
143 {
144 struct tim_mem_entry *chunk;
145 struct tim_mem_entry *pnext;
146 chunk = ((struct tim_mem_entry *)(uintptr_t)bkt->first_chunk);
147 chunk = (struct tim_mem_entry *)(uintptr_t)(chunk + nb_chunk_slots)->w0;
148
149 while (chunk) {
150 pnext = (struct tim_mem_entry *)(uintptr_t)
151 ((chunk + nb_chunk_slots)->w0);
152 rte_mempool_put(timr->chunk_pool, chunk);
153 chunk = pnext;
154 }
155 return (struct tim_mem_entry *)(uintptr_t)bkt->first_chunk;
156 }
157
158 static inline int
timvf_rem_entry(struct rte_event_timer * tim)159 timvf_rem_entry(struct rte_event_timer *tim)
160 {
161 uint64_t lock_sema;
162 struct tim_mem_entry *entry;
163 struct tim_mem_bucket *bkt;
164 if (tim->impl_opaque[1] == 0 ||
165 tim->impl_opaque[0] == 0)
166 return -ENOENT;
167
168 entry = (struct tim_mem_entry *)(uintptr_t)tim->impl_opaque[0];
169 if (entry->wqe != tim->ev.u64) {
170 tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
171 return -ENOENT;
172 }
173 bkt = (struct tim_mem_bucket *)(uintptr_t)tim->impl_opaque[1];
174 lock_sema = timr_bkt_inc_lock(bkt);
175 if (timr_bkt_get_shbt(lock_sema)
176 || !timr_bkt_get_nent(lock_sema)) {
177 timr_bkt_dec_lock(bkt);
178 tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
179 return -ENOENT;
180 }
181
182 entry->w0 = entry->wqe = 0;
183 timr_bkt_dec_lock(bkt);
184
185 tim->state = RTE_EVENT_TIMER_CANCELED;
186 tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
187 return 0;
188 }
189
190 static inline struct tim_mem_entry *
timvf_refill_chunk_generic(struct tim_mem_bucket * const bkt,struct timvf_ring * const timr)191 timvf_refill_chunk_generic(struct tim_mem_bucket * const bkt,
192 struct timvf_ring * const timr)
193 {
194 struct tim_mem_entry *chunk;
195
196 if (bkt->nb_entry || !bkt->first_chunk) {
197 if (unlikely(rte_mempool_get(timr->chunk_pool,
198 (void **)&chunk))) {
199 return NULL;
200 }
201 if (bkt->nb_entry) {
202 *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
203 bkt->current_chunk) +
204 nb_chunk_slots) =
205 (uintptr_t) chunk;
206 } else {
207 bkt->first_chunk = (uintptr_t) chunk;
208 }
209 } else {
210 chunk = timr_clr_bkt(timr, bkt);
211 bkt->first_chunk = (uintptr_t)chunk;
212 }
213 *(uint64_t *)(chunk + nb_chunk_slots) = 0;
214
215 return chunk;
216 }
217
218 static inline struct tim_mem_entry *
timvf_refill_chunk_fpa(struct tim_mem_bucket * const bkt,struct timvf_ring * const timr)219 timvf_refill_chunk_fpa(struct tim_mem_bucket * const bkt,
220 struct timvf_ring * const timr)
221 {
222 struct tim_mem_entry *chunk;
223
224 if (unlikely(rte_mempool_get(timr->chunk_pool, (void **)&chunk)))
225 return NULL;
226
227 *(uint64_t *)(chunk + nb_chunk_slots) = 0;
228 if (bkt->nb_entry) {
229 *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
230 bkt->current_chunk) +
231 nb_chunk_slots) =
232 (uintptr_t) chunk;
233 } else {
234 bkt->first_chunk = (uintptr_t) chunk;
235 }
236
237 return chunk;
238 }
239
240 static inline struct tim_mem_bucket *
timvf_get_target_bucket(struct timvf_ring * const timr,const uint32_t rel_bkt)241 timvf_get_target_bucket(struct timvf_ring * const timr, const uint32_t rel_bkt)
242 {
243 const uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
244 const uint32_t bucket = rte_reciprocal_divide_u64(bkt_cyc,
245 &timr->fast_div) + rel_bkt;
246 const uint32_t tbkt_id = timr->get_target_bkt(bucket,
247 timr->nb_bkts);
248 return &timr->bkt[tbkt_id];
249 }
250
251 /* Single producer functions. */
252 static inline int
timvf_add_entry_sp(struct timvf_ring * const timr,const uint32_t rel_bkt,struct rte_event_timer * const tim,const struct tim_mem_entry * const pent)253 timvf_add_entry_sp(struct timvf_ring * const timr, const uint32_t rel_bkt,
254 struct rte_event_timer * const tim,
255 const struct tim_mem_entry * const pent)
256 {
257 int16_t rem;
258 uint64_t lock_sema;
259 struct tim_mem_bucket *bkt;
260 struct tim_mem_entry *chunk;
261
262
263 bkt = timvf_get_target_bucket(timr, rel_bkt);
264 __retry:
265 /*Get Bucket sema*/
266 lock_sema = timr_bkt_fetch_sema(bkt);
267 /* Bucket related checks. */
268 if (unlikely(timr_bkt_get_hbt(lock_sema)))
269 goto __retry;
270
271 /* Insert the work. */
272 rem = timr_bkt_fetch_rem(lock_sema);
273
274 if (!rem) {
275 chunk = timr->refill_chunk(bkt, timr);
276 if (unlikely(chunk == NULL)) {
277 timr_bkt_set_rem(bkt, 0);
278 tim->impl_opaque[0] = tim->impl_opaque[1] = 0;
279 tim->state = RTE_EVENT_TIMER_ERROR;
280 return -ENOMEM;
281 }
282 bkt->current_chunk = (uintptr_t) chunk;
283 timr_bkt_set_rem(bkt, nb_chunk_slots - 1);
284 } else {
285 chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
286 chunk += nb_chunk_slots - rem;
287 }
288 /* Copy work entry. */
289 *chunk = *pent;
290 timr_bkt_inc_nent(bkt);
291
292 tim->impl_opaque[0] = (uintptr_t)chunk;
293 tim->impl_opaque[1] = (uintptr_t)bkt;
294 tim->state = RTE_EVENT_TIMER_ARMED;
295 return 0;
296 }
297
298 /* Multi producer functions. */
299 static inline int
timvf_add_entry_mp(struct timvf_ring * const timr,const uint32_t rel_bkt,struct rte_event_timer * const tim,const struct tim_mem_entry * const pent)300 timvf_add_entry_mp(struct timvf_ring * const timr, const uint32_t rel_bkt,
301 struct rte_event_timer * const tim,
302 const struct tim_mem_entry * const pent)
303 {
304 int16_t rem;
305 uint64_t lock_sema;
306 struct tim_mem_bucket *bkt;
307 struct tim_mem_entry *chunk;
308
309 __retry:
310 bkt = timvf_get_target_bucket(timr, rel_bkt);
311 /* Bucket related checks. */
312 /*Get Bucket sema*/
313 lock_sema = timr_bkt_fetch_sema_lock(bkt);
314 if (unlikely(timr_bkt_get_shbt(lock_sema))) {
315 timr_bkt_dec_lock(bkt);
316 goto __retry;
317 }
318
319 rem = timr_bkt_fetch_rem(lock_sema);
320
321 if (rem < 0) {
322 /* goto diff bucket. */
323 timr_bkt_dec_lock(bkt);
324 goto __retry;
325 } else if (!rem) {
326 /*Only one thread can be here*/
327 chunk = timr->refill_chunk(bkt, timr);
328 if (unlikely(chunk == NULL)) {
329 timr_bkt_set_rem(bkt, 0);
330 timr_bkt_dec_lock(bkt);
331 tim->impl_opaque[0] = tim->impl_opaque[1] = 0;
332 tim->state = RTE_EVENT_TIMER_ERROR;
333 return -ENOMEM;
334 }
335 bkt->current_chunk = (uintptr_t) chunk;
336 timr_bkt_set_rem(bkt, nb_chunk_slots - 1);
337 } else {
338 chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
339 chunk += nb_chunk_slots - rem;
340 }
341 /* Copy work entry. */
342 *chunk = *pent;
343 timr_bkt_inc_nent(bkt);
344 timr_bkt_dec_lock(bkt);
345
346 tim->impl_opaque[0] = (uintptr_t)chunk;
347 tim->impl_opaque[1] = (uintptr_t)bkt;
348 tim->state = RTE_EVENT_TIMER_ARMED;
349 return 0;
350 }
351
352 static inline uint16_t
timvf_cpy_wrk(uint16_t index,uint16_t cpy_lmt,struct tim_mem_entry * chunk,struct rte_event_timer ** const tim,const struct tim_mem_entry * const ents,const struct tim_mem_bucket * const bkt)353 timvf_cpy_wrk(uint16_t index, uint16_t cpy_lmt,
354 struct tim_mem_entry *chunk,
355 struct rte_event_timer ** const tim,
356 const struct tim_mem_entry * const ents,
357 const struct tim_mem_bucket * const bkt)
358 {
359 for (; index < cpy_lmt; index++) {
360 *chunk = *(ents + index);
361 tim[index]->impl_opaque[0] = (uintptr_t)chunk++;
362 tim[index]->impl_opaque[1] = (uintptr_t)bkt;
363 tim[index]->state = RTE_EVENT_TIMER_ARMED;
364 }
365
366 return index;
367 }
368
369 /* Burst mode functions */
370 static inline int
timvf_add_entry_brst(struct timvf_ring * const timr,const uint16_t rel_bkt,struct rte_event_timer ** const tim,const struct tim_mem_entry * ents,const uint16_t nb_timers)371 timvf_add_entry_brst(struct timvf_ring * const timr, const uint16_t rel_bkt,
372 struct rte_event_timer ** const tim,
373 const struct tim_mem_entry *ents,
374 const uint16_t nb_timers)
375 {
376 int16_t rem;
377 int16_t crem;
378 uint8_t lock_cnt;
379 uint16_t index = 0;
380 uint16_t chunk_remainder;
381 uint64_t lock_sema;
382 struct tim_mem_bucket *bkt;
383 struct tim_mem_entry *chunk;
384
385 __retry:
386 bkt = timvf_get_target_bucket(timr, rel_bkt);
387
388 /* Only one thread beyond this. */
389 lock_sema = timr_bkt_inc_lock(bkt);
390 lock_cnt = (uint8_t)
391 ((lock_sema >> TIM_BUCKET_W1_S_LOCK) & TIM_BUCKET_W1_M_LOCK);
392
393 if (lock_cnt) {
394 timr_bkt_dec_lock(bkt);
395 goto __retry;
396 }
397
398 /* Bucket related checks. */
399 if (unlikely(timr_bkt_get_hbt(lock_sema))) {
400 timr_bkt_dec_lock(bkt);
401 goto __retry;
402 }
403
404 chunk_remainder = timr_bkt_fetch_rem(lock_sema);
405 rem = chunk_remainder - nb_timers;
406 if (rem < 0) {
407 crem = nb_chunk_slots - chunk_remainder;
408 if (chunk_remainder && crem) {
409 chunk = ((struct tim_mem_entry *)
410 (uintptr_t)bkt->current_chunk) + crem;
411
412 index = timvf_cpy_wrk(index, chunk_remainder,
413 chunk, tim, ents, bkt);
414 timr_bkt_sub_rem(bkt, chunk_remainder);
415 timr_bkt_add_nent(bkt, chunk_remainder);
416 }
417 rem = nb_timers - chunk_remainder;
418 ents = ents + chunk_remainder;
419
420 chunk = timr->refill_chunk(bkt, timr);
421 if (unlikely(chunk == NULL)) {
422 timr_bkt_dec_lock(bkt);
423 rte_errno = ENOMEM;
424 tim[index]->state = RTE_EVENT_TIMER_ERROR;
425 return crem;
426 }
427 *(uint64_t *)(chunk + nb_chunk_slots) = 0;
428 bkt->current_chunk = (uintptr_t) chunk;
429
430 index = timvf_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
431 timr_bkt_set_rem(bkt, nb_chunk_slots - rem);
432 timr_bkt_add_nent(bkt, rem);
433 } else {
434 chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
435 chunk += (nb_chunk_slots - chunk_remainder);
436
437 index = timvf_cpy_wrk(index, nb_timers,
438 chunk, tim, ents, bkt);
439 timr_bkt_sub_rem(bkt, nb_timers);
440 timr_bkt_add_nent(bkt, nb_timers);
441 }
442
443 timr_bkt_dec_lock(bkt);
444 return nb_timers;
445 }
446