xref: /dpdk/drivers/event/octeontx/timvf_worker.h (revision e12a0166c80f65e35408f4715b2f3a60763c3741)
1b6d814d8SPavan Nikhilesh /* SPDX-License-Identifier: BSD-3-Clause
2b6d814d8SPavan Nikhilesh  * Copyright(c) 2017 Cavium, Inc
3b6d814d8SPavan Nikhilesh  */
4b6d814d8SPavan Nikhilesh 
5295c053fSPavan Nikhilesh #include <eventdev_pmd.h>
6295c053fSPavan Nikhilesh 
7b6d814d8SPavan Nikhilesh #include <rte_common.h>
8b6d814d8SPavan Nikhilesh #include <rte_branch_prediction.h>
9b6d814d8SPavan Nikhilesh 
10b6d814d8SPavan Nikhilesh #include "timvf_evdev.h"
11b6d814d8SPavan Nikhilesh 
12b6d814d8SPavan Nikhilesh static inline int16_t
timr_bkt_fetch_rem(uint64_t w1)13b6d814d8SPavan Nikhilesh timr_bkt_fetch_rem(uint64_t w1)
14b6d814d8SPavan Nikhilesh {
15b6d814d8SPavan Nikhilesh 	return (w1 >> TIM_BUCKET_W1_S_CHUNK_REMAINDER) &
16b6d814d8SPavan Nikhilesh 		TIM_BUCKET_W1_M_CHUNK_REMAINDER;
17b6d814d8SPavan Nikhilesh }
18b6d814d8SPavan Nikhilesh 
19b6d814d8SPavan Nikhilesh static inline int16_t
timr_bkt_get_rem(struct tim_mem_bucket * bktp)20b6d814d8SPavan Nikhilesh timr_bkt_get_rem(struct tim_mem_bucket *bktp)
21b6d814d8SPavan Nikhilesh {
22*e12a0166STyler Retzlaff 	return rte_atomic_load_explicit(&bktp->chunk_remainder,
23*e12a0166STyler Retzlaff 			rte_memory_order_acquire);
24b6d814d8SPavan Nikhilesh }
25b6d814d8SPavan Nikhilesh 
26b6d814d8SPavan Nikhilesh static inline void
timr_bkt_set_rem(struct tim_mem_bucket * bktp,uint16_t v)27b6d814d8SPavan Nikhilesh timr_bkt_set_rem(struct tim_mem_bucket *bktp, uint16_t v)
28b6d814d8SPavan Nikhilesh {
29*e12a0166STyler Retzlaff 	rte_atomic_store_explicit(&bktp->chunk_remainder, v,
30*e12a0166STyler Retzlaff 			rte_memory_order_release);
31b6d814d8SPavan Nikhilesh }
32b6d814d8SPavan Nikhilesh 
33b6d814d8SPavan Nikhilesh static inline void
timr_bkt_sub_rem(struct tim_mem_bucket * bktp,uint16_t v)34b6d814d8SPavan Nikhilesh timr_bkt_sub_rem(struct tim_mem_bucket *bktp, uint16_t v)
35b6d814d8SPavan Nikhilesh {
36*e12a0166STyler Retzlaff 	rte_atomic_fetch_sub_explicit(&bktp->chunk_remainder, v,
37*e12a0166STyler Retzlaff 			rte_memory_order_release);
38b6d814d8SPavan Nikhilesh }
39b6d814d8SPavan Nikhilesh 
40b6d814d8SPavan Nikhilesh static inline uint8_t
timr_bkt_get_sbt(uint64_t w1)41b6d814d8SPavan Nikhilesh timr_bkt_get_sbt(uint64_t w1)
42b6d814d8SPavan Nikhilesh {
43b6d814d8SPavan Nikhilesh 	return (w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT;
44b6d814d8SPavan Nikhilesh }
45b6d814d8SPavan Nikhilesh 
46b6d814d8SPavan Nikhilesh static inline uint64_t
timr_bkt_set_sbt(struct tim_mem_bucket * bktp)47b6d814d8SPavan Nikhilesh timr_bkt_set_sbt(struct tim_mem_bucket *bktp)
48b6d814d8SPavan Nikhilesh {
49b6d814d8SPavan Nikhilesh 	const uint64_t v = TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT;
50*e12a0166STyler Retzlaff 	return rte_atomic_fetch_or_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
51b6d814d8SPavan Nikhilesh }
52b6d814d8SPavan Nikhilesh 
53b6d814d8SPavan Nikhilesh static inline uint64_t
timr_bkt_clr_sbt(struct tim_mem_bucket * bktp)54b6d814d8SPavan Nikhilesh timr_bkt_clr_sbt(struct tim_mem_bucket *bktp)
55b6d814d8SPavan Nikhilesh {
56b6d814d8SPavan Nikhilesh 	const uint64_t v = ~(TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT);
57*e12a0166STyler Retzlaff 	return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
58b6d814d8SPavan Nikhilesh }
59b6d814d8SPavan Nikhilesh 
60b6d814d8SPavan Nikhilesh static inline uint8_t
timr_bkt_get_shbt(uint64_t w1)61b6d814d8SPavan Nikhilesh timr_bkt_get_shbt(uint64_t w1)
62b6d814d8SPavan Nikhilesh {
63b6d814d8SPavan Nikhilesh 	return ((w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT) |
64b6d814d8SPavan Nikhilesh 		((w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT);
65b6d814d8SPavan Nikhilesh }
66b6d814d8SPavan Nikhilesh 
67b6d814d8SPavan Nikhilesh static inline uint8_t
timr_bkt_get_hbt(uint64_t w1)68b6d814d8SPavan Nikhilesh timr_bkt_get_hbt(uint64_t w1)
69b6d814d8SPavan Nikhilesh {
70b6d814d8SPavan Nikhilesh 	return (w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT;
71b6d814d8SPavan Nikhilesh }
72b6d814d8SPavan Nikhilesh 
73b6d814d8SPavan Nikhilesh static inline uint8_t
timr_bkt_get_bsk(uint64_t w1)74b6d814d8SPavan Nikhilesh timr_bkt_get_bsk(uint64_t w1)
75b6d814d8SPavan Nikhilesh {
76b6d814d8SPavan Nikhilesh 	return (w1 >> TIM_BUCKET_W1_S_BSK) & TIM_BUCKET_W1_M_BSK;
77b6d814d8SPavan Nikhilesh }
78b6d814d8SPavan Nikhilesh 
79b6d814d8SPavan Nikhilesh static inline uint64_t
timr_bkt_clr_bsk(struct tim_mem_bucket * bktp)80b6d814d8SPavan Nikhilesh timr_bkt_clr_bsk(struct tim_mem_bucket *bktp)
81b6d814d8SPavan Nikhilesh {
82b6d814d8SPavan Nikhilesh 	/*Clear everything except lock. */
83b6d814d8SPavan Nikhilesh 	const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
84*e12a0166STyler Retzlaff 	return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
85b6d814d8SPavan Nikhilesh }
86b6d814d8SPavan Nikhilesh 
87b6d814d8SPavan Nikhilesh static inline uint64_t
timr_bkt_fetch_sema_lock(struct tim_mem_bucket * bktp)88b6d814d8SPavan Nikhilesh timr_bkt_fetch_sema_lock(struct tim_mem_bucket *bktp)
89b6d814d8SPavan Nikhilesh {
90*e12a0166STyler Retzlaff 	return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
91*e12a0166STyler Retzlaff 			rte_memory_order_acq_rel);
92b6d814d8SPavan Nikhilesh }
93b6d814d8SPavan Nikhilesh 
94b6d814d8SPavan Nikhilesh static inline uint64_t
timr_bkt_fetch_sema(struct tim_mem_bucket * bktp)95b6d814d8SPavan Nikhilesh timr_bkt_fetch_sema(struct tim_mem_bucket *bktp)
96b6d814d8SPavan Nikhilesh {
97*e12a0166STyler Retzlaff 	return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA,
98*e12a0166STyler Retzlaff 			rte_memory_order_relaxed);
99b6d814d8SPavan Nikhilesh }
100b6d814d8SPavan Nikhilesh 
101b6d814d8SPavan Nikhilesh static inline uint64_t
timr_bkt_inc_lock(struct tim_mem_bucket * bktp)102b6d814d8SPavan Nikhilesh timr_bkt_inc_lock(struct tim_mem_bucket *bktp)
103b6d814d8SPavan Nikhilesh {
104b6d814d8SPavan Nikhilesh 	const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
105*e12a0166STyler Retzlaff 	return rte_atomic_fetch_add_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
106b6d814d8SPavan Nikhilesh }
107b6d814d8SPavan Nikhilesh 
108b6d814d8SPavan Nikhilesh static inline void
timr_bkt_dec_lock(struct tim_mem_bucket * bktp)109b6d814d8SPavan Nikhilesh timr_bkt_dec_lock(struct tim_mem_bucket *bktp)
110b6d814d8SPavan Nikhilesh {
111*e12a0166STyler Retzlaff 	rte_atomic_fetch_add_explicit(&bktp->lock, 0xff, rte_memory_order_acq_rel);
112b6d814d8SPavan Nikhilesh }
113b6d814d8SPavan Nikhilesh 
114b6d814d8SPavan Nikhilesh static inline uint32_t
timr_bkt_get_nent(uint64_t w1)115b6d814d8SPavan Nikhilesh timr_bkt_get_nent(uint64_t w1)
116b6d814d8SPavan Nikhilesh {
117b6d814d8SPavan Nikhilesh 	return (w1 >> TIM_BUCKET_W1_S_NUM_ENTRIES) &
118b6d814d8SPavan Nikhilesh 		TIM_BUCKET_W1_M_NUM_ENTRIES;
119b6d814d8SPavan Nikhilesh }
120b6d814d8SPavan Nikhilesh 
121b6d814d8SPavan Nikhilesh static inline void
timr_bkt_inc_nent(struct tim_mem_bucket * bktp)122b6d814d8SPavan Nikhilesh timr_bkt_inc_nent(struct tim_mem_bucket *bktp)
123b6d814d8SPavan Nikhilesh {
124*e12a0166STyler Retzlaff 	rte_atomic_fetch_add_explicit(&bktp->nb_entry, 1, rte_memory_order_relaxed);
125b6d814d8SPavan Nikhilesh }
126b6d814d8SPavan Nikhilesh 
127b6d814d8SPavan Nikhilesh static inline void
timr_bkt_add_nent(struct tim_mem_bucket * bktp,uint32_t v)128b6d814d8SPavan Nikhilesh timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v)
129b6d814d8SPavan Nikhilesh {
130*e12a0166STyler Retzlaff 	rte_atomic_fetch_add_explicit(&bktp->nb_entry, v, rte_memory_order_relaxed);
131b6d814d8SPavan Nikhilesh }
132b6d814d8SPavan Nikhilesh 
133b6d814d8SPavan Nikhilesh static inline uint64_t
timr_bkt_clr_nent(struct tim_mem_bucket * bktp)134b6d814d8SPavan Nikhilesh timr_bkt_clr_nent(struct tim_mem_bucket *bktp)
135b6d814d8SPavan Nikhilesh {
136b6d814d8SPavan Nikhilesh 	const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
137b6d814d8SPavan Nikhilesh 			TIM_BUCKET_W1_S_NUM_ENTRIES);
138*e12a0166STyler Retzlaff 	return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel) & v;
139b6d814d8SPavan Nikhilesh }
140b6d814d8SPavan Nikhilesh 
141b6d814d8SPavan Nikhilesh static inline struct tim_mem_entry *
timr_clr_bkt(struct timvf_ring * const timr,struct tim_mem_bucket * const bkt)142b6d814d8SPavan Nikhilesh timr_clr_bkt(struct timvf_ring * const timr, struct tim_mem_bucket * const bkt)
143b6d814d8SPavan Nikhilesh {
144b6d814d8SPavan Nikhilesh 	struct tim_mem_entry *chunk;
145b6d814d8SPavan Nikhilesh 	struct tim_mem_entry *pnext;
146b6d814d8SPavan Nikhilesh 	chunk = ((struct tim_mem_entry *)(uintptr_t)bkt->first_chunk);
147b6d814d8SPavan Nikhilesh 	chunk = (struct tim_mem_entry *)(uintptr_t)(chunk + nb_chunk_slots)->w0;
148b6d814d8SPavan Nikhilesh 
149b6d814d8SPavan Nikhilesh 	while (chunk) {
150b6d814d8SPavan Nikhilesh 		pnext = (struct tim_mem_entry *)(uintptr_t)
151b6d814d8SPavan Nikhilesh 			((chunk + nb_chunk_slots)->w0);
152b6d814d8SPavan Nikhilesh 		rte_mempool_put(timr->chunk_pool, chunk);
153b6d814d8SPavan Nikhilesh 		chunk = pnext;
154b6d814d8SPavan Nikhilesh 	}
155b6d814d8SPavan Nikhilesh 	return (struct tim_mem_entry *)(uintptr_t)bkt->first_chunk;
156b6d814d8SPavan Nikhilesh }
157b6d814d8SPavan Nikhilesh 
158b6d814d8SPavan Nikhilesh static inline int
timvf_rem_entry(struct rte_event_timer * tim)159b6d814d8SPavan Nikhilesh timvf_rem_entry(struct rte_event_timer *tim)
160b6d814d8SPavan Nikhilesh {
161b6d814d8SPavan Nikhilesh 	uint64_t lock_sema;
162b6d814d8SPavan Nikhilesh 	struct tim_mem_entry *entry;
163b6d814d8SPavan Nikhilesh 	struct tim_mem_bucket *bkt;
164b6d814d8SPavan Nikhilesh 	if (tim->impl_opaque[1] == 0 ||
165b6d814d8SPavan Nikhilesh 			tim->impl_opaque[0] == 0)
166b6d814d8SPavan Nikhilesh 		return -ENOENT;
167b6d814d8SPavan Nikhilesh 
168b6d814d8SPavan Nikhilesh 	entry = (struct tim_mem_entry *)(uintptr_t)tim->impl_opaque[0];
169b6d814d8SPavan Nikhilesh 	if (entry->wqe != tim->ev.u64) {
170b6d814d8SPavan Nikhilesh 		tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
171b6d814d8SPavan Nikhilesh 		return -ENOENT;
172b6d814d8SPavan Nikhilesh 	}
173b6d814d8SPavan Nikhilesh 	bkt = (struct tim_mem_bucket *)(uintptr_t)tim->impl_opaque[1];
174b6d814d8SPavan Nikhilesh 	lock_sema = timr_bkt_inc_lock(bkt);
175b6d814d8SPavan Nikhilesh 	if (timr_bkt_get_shbt(lock_sema)
176b6d814d8SPavan Nikhilesh 			|| !timr_bkt_get_nent(lock_sema)) {
177b6d814d8SPavan Nikhilesh 		timr_bkt_dec_lock(bkt);
178b6d814d8SPavan Nikhilesh 		tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
179b6d814d8SPavan Nikhilesh 		return -ENOENT;
180b6d814d8SPavan Nikhilesh 	}
181b6d814d8SPavan Nikhilesh 
182b6d814d8SPavan Nikhilesh 	entry->w0 = entry->wqe = 0;
183b6d814d8SPavan Nikhilesh 	timr_bkt_dec_lock(bkt);
184b6d814d8SPavan Nikhilesh 
185b6d814d8SPavan Nikhilesh 	tim->state = RTE_EVENT_TIMER_CANCELED;
186b6d814d8SPavan Nikhilesh 	tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
187b6d814d8SPavan Nikhilesh 	return 0;
188b6d814d8SPavan Nikhilesh }
189b6d814d8SPavan Nikhilesh 
190b6d814d8SPavan Nikhilesh static inline struct tim_mem_entry *
timvf_refill_chunk_generic(struct tim_mem_bucket * const bkt,struct timvf_ring * const timr)191b6d814d8SPavan Nikhilesh timvf_refill_chunk_generic(struct tim_mem_bucket * const bkt,
192b6d814d8SPavan Nikhilesh 		struct timvf_ring * const timr)
193b6d814d8SPavan Nikhilesh {
194b6d814d8SPavan Nikhilesh 	struct tim_mem_entry *chunk;
195b6d814d8SPavan Nikhilesh 
196b6d814d8SPavan Nikhilesh 	if (bkt->nb_entry || !bkt->first_chunk) {
197b6d814d8SPavan Nikhilesh 		if (unlikely(rte_mempool_get(timr->chunk_pool,
198b6d814d8SPavan Nikhilesh 						(void **)&chunk))) {
199b6d814d8SPavan Nikhilesh 			return NULL;
200b6d814d8SPavan Nikhilesh 		}
201b6d814d8SPavan Nikhilesh 		if (bkt->nb_entry) {
202b6d814d8SPavan Nikhilesh 			*(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
203b6d814d8SPavan Nikhilesh 					bkt->current_chunk) +
204b6d814d8SPavan Nikhilesh 					nb_chunk_slots) =
205b6d814d8SPavan Nikhilesh 				(uintptr_t) chunk;
206b6d814d8SPavan Nikhilesh 		} else {
207b6d814d8SPavan Nikhilesh 			bkt->first_chunk = (uintptr_t) chunk;
208b6d814d8SPavan Nikhilesh 		}
209b6d814d8SPavan Nikhilesh 	} else {
210b6d814d8SPavan Nikhilesh 		chunk = timr_clr_bkt(timr, bkt);
211b6d814d8SPavan Nikhilesh 		bkt->first_chunk = (uintptr_t)chunk;
212b6d814d8SPavan Nikhilesh 	}
213b6d814d8SPavan Nikhilesh 	*(uint64_t *)(chunk + nb_chunk_slots) = 0;
214b6d814d8SPavan Nikhilesh 
215b6d814d8SPavan Nikhilesh 	return chunk;
216b6d814d8SPavan Nikhilesh }
217b6d814d8SPavan Nikhilesh 
2183e249bc5SPavan Nikhilesh static inline struct tim_mem_entry *
timvf_refill_chunk_fpa(struct tim_mem_bucket * const bkt,struct timvf_ring * const timr)2193e249bc5SPavan Nikhilesh timvf_refill_chunk_fpa(struct tim_mem_bucket * const bkt,
2203e249bc5SPavan Nikhilesh 		struct timvf_ring * const timr)
2213e249bc5SPavan Nikhilesh {
2223e249bc5SPavan Nikhilesh 	struct tim_mem_entry *chunk;
2233e249bc5SPavan Nikhilesh 
2243e249bc5SPavan Nikhilesh 	if (unlikely(rte_mempool_get(timr->chunk_pool, (void **)&chunk)))
2253e249bc5SPavan Nikhilesh 		return NULL;
2263e249bc5SPavan Nikhilesh 
2273e249bc5SPavan Nikhilesh 	*(uint64_t *)(chunk + nb_chunk_slots) = 0;
2283e249bc5SPavan Nikhilesh 	if (bkt->nb_entry) {
2293e249bc5SPavan Nikhilesh 		*(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
2303e249bc5SPavan Nikhilesh 				bkt->current_chunk) +
2313e249bc5SPavan Nikhilesh 				nb_chunk_slots) =
2323e249bc5SPavan Nikhilesh 			(uintptr_t) chunk;
2333e249bc5SPavan Nikhilesh 	} else {
2343e249bc5SPavan Nikhilesh 		bkt->first_chunk = (uintptr_t) chunk;
2353e249bc5SPavan Nikhilesh 	}
2363e249bc5SPavan Nikhilesh 
2373e249bc5SPavan Nikhilesh 	return chunk;
2383e249bc5SPavan Nikhilesh }
2393e249bc5SPavan Nikhilesh 
240b6d814d8SPavan Nikhilesh static inline struct tim_mem_bucket *
timvf_get_target_bucket(struct timvf_ring * const timr,const uint32_t rel_bkt)241b6d814d8SPavan Nikhilesh timvf_get_target_bucket(struct timvf_ring * const timr, const uint32_t rel_bkt)
242b6d814d8SPavan Nikhilesh {
243b6d814d8SPavan Nikhilesh 	const uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
244b6d814d8SPavan Nikhilesh 	const uint32_t bucket = rte_reciprocal_divide_u64(bkt_cyc,
245b6d814d8SPavan Nikhilesh 			&timr->fast_div) + rel_bkt;
246b6d814d8SPavan Nikhilesh 	const uint32_t tbkt_id = timr->get_target_bkt(bucket,
247b6d814d8SPavan Nikhilesh 			timr->nb_bkts);
248b6d814d8SPavan Nikhilesh 	return &timr->bkt[tbkt_id];
249b6d814d8SPavan Nikhilesh }
250b6d814d8SPavan Nikhilesh 
2517684fcf1SPavan Nikhilesh /* Single producer functions. */
2527684fcf1SPavan Nikhilesh static inline int
timvf_add_entry_sp(struct timvf_ring * const timr,const uint32_t rel_bkt,struct rte_event_timer * const tim,const struct tim_mem_entry * const pent)2537684fcf1SPavan Nikhilesh timvf_add_entry_sp(struct timvf_ring * const timr, const uint32_t rel_bkt,
2547684fcf1SPavan Nikhilesh 		struct rte_event_timer * const tim,
2557684fcf1SPavan Nikhilesh 		const struct tim_mem_entry * const pent)
2567684fcf1SPavan Nikhilesh {
2577684fcf1SPavan Nikhilesh 	int16_t rem;
2587684fcf1SPavan Nikhilesh 	uint64_t lock_sema;
2597684fcf1SPavan Nikhilesh 	struct tim_mem_bucket *bkt;
2607684fcf1SPavan Nikhilesh 	struct tim_mem_entry *chunk;
2617684fcf1SPavan Nikhilesh 
2627684fcf1SPavan Nikhilesh 
2637684fcf1SPavan Nikhilesh 	bkt = timvf_get_target_bucket(timr, rel_bkt);
2647684fcf1SPavan Nikhilesh __retry:
2657684fcf1SPavan Nikhilesh 	/*Get Bucket sema*/
2667684fcf1SPavan Nikhilesh 	lock_sema = timr_bkt_fetch_sema(bkt);
2677684fcf1SPavan Nikhilesh 	/* Bucket related checks. */
2687684fcf1SPavan Nikhilesh 	if (unlikely(timr_bkt_get_hbt(lock_sema)))
2697684fcf1SPavan Nikhilesh 		goto __retry;
2707684fcf1SPavan Nikhilesh 
2717684fcf1SPavan Nikhilesh 	/* Insert the work. */
2727684fcf1SPavan Nikhilesh 	rem = timr_bkt_fetch_rem(lock_sema);
2737684fcf1SPavan Nikhilesh 
2747684fcf1SPavan Nikhilesh 	if (!rem) {
2757684fcf1SPavan Nikhilesh 		chunk = timr->refill_chunk(bkt, timr);
2767684fcf1SPavan Nikhilesh 		if (unlikely(chunk == NULL)) {
2777684fcf1SPavan Nikhilesh 			timr_bkt_set_rem(bkt, 0);
2787684fcf1SPavan Nikhilesh 			tim->impl_opaque[0] = tim->impl_opaque[1] = 0;
2797684fcf1SPavan Nikhilesh 			tim->state = RTE_EVENT_TIMER_ERROR;
2807684fcf1SPavan Nikhilesh 			return -ENOMEM;
2817684fcf1SPavan Nikhilesh 		}
2827684fcf1SPavan Nikhilesh 		bkt->current_chunk = (uintptr_t) chunk;
2837684fcf1SPavan Nikhilesh 		timr_bkt_set_rem(bkt, nb_chunk_slots - 1);
2847684fcf1SPavan Nikhilesh 	} else {
2857684fcf1SPavan Nikhilesh 		chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
2867684fcf1SPavan Nikhilesh 		chunk += nb_chunk_slots - rem;
2877684fcf1SPavan Nikhilesh 	}
2887684fcf1SPavan Nikhilesh 	/* Copy work entry. */
2897684fcf1SPavan Nikhilesh 	*chunk = *pent;
2907684fcf1SPavan Nikhilesh 	timr_bkt_inc_nent(bkt);
2917684fcf1SPavan Nikhilesh 
2927684fcf1SPavan Nikhilesh 	tim->impl_opaque[0] = (uintptr_t)chunk;
2937684fcf1SPavan Nikhilesh 	tim->impl_opaque[1] = (uintptr_t)bkt;
2947684fcf1SPavan Nikhilesh 	tim->state = RTE_EVENT_TIMER_ARMED;
2957684fcf1SPavan Nikhilesh 	return 0;
2967684fcf1SPavan Nikhilesh }
2977684fcf1SPavan Nikhilesh 
298b6d814d8SPavan Nikhilesh /* Multi producer functions. */
299b6d814d8SPavan Nikhilesh static inline int
timvf_add_entry_mp(struct timvf_ring * const timr,const uint32_t rel_bkt,struct rte_event_timer * const tim,const struct tim_mem_entry * const pent)300b6d814d8SPavan Nikhilesh timvf_add_entry_mp(struct timvf_ring * const timr, const uint32_t rel_bkt,
301b6d814d8SPavan Nikhilesh 		struct rte_event_timer * const tim,
302b6d814d8SPavan Nikhilesh 		const struct tim_mem_entry * const pent)
303b6d814d8SPavan Nikhilesh {
304b6d814d8SPavan Nikhilesh 	int16_t rem;
305b6d814d8SPavan Nikhilesh 	uint64_t lock_sema;
306b6d814d8SPavan Nikhilesh 	struct tim_mem_bucket *bkt;
307b6d814d8SPavan Nikhilesh 	struct tim_mem_entry *chunk;
308b6d814d8SPavan Nikhilesh 
309b6d814d8SPavan Nikhilesh __retry:
310b6d814d8SPavan Nikhilesh 	bkt = timvf_get_target_bucket(timr, rel_bkt);
311b6d814d8SPavan Nikhilesh 	/* Bucket related checks. */
312b6d814d8SPavan Nikhilesh 	/*Get Bucket sema*/
313b6d814d8SPavan Nikhilesh 	lock_sema = timr_bkt_fetch_sema_lock(bkt);
314b6d814d8SPavan Nikhilesh 	if (unlikely(timr_bkt_get_shbt(lock_sema))) {
315b6d814d8SPavan Nikhilesh 		timr_bkt_dec_lock(bkt);
316b6d814d8SPavan Nikhilesh 		goto __retry;
317b6d814d8SPavan Nikhilesh 	}
318b6d814d8SPavan Nikhilesh 
319b6d814d8SPavan Nikhilesh 	rem = timr_bkt_fetch_rem(lock_sema);
320b6d814d8SPavan Nikhilesh 
321b6d814d8SPavan Nikhilesh 	if (rem < 0) {
322b6d814d8SPavan Nikhilesh 		/* goto diff bucket. */
323b6d814d8SPavan Nikhilesh 		timr_bkt_dec_lock(bkt);
324b6d814d8SPavan Nikhilesh 		goto __retry;
325b6d814d8SPavan Nikhilesh 	} else if (!rem) {
326b6d814d8SPavan Nikhilesh 		/*Only one thread can be here*/
327b6d814d8SPavan Nikhilesh 		chunk = timr->refill_chunk(bkt, timr);
328b6d814d8SPavan Nikhilesh 		if (unlikely(chunk == NULL)) {
329b6d814d8SPavan Nikhilesh 			timr_bkt_set_rem(bkt, 0);
330b6d814d8SPavan Nikhilesh 			timr_bkt_dec_lock(bkt);
331b6d814d8SPavan Nikhilesh 			tim->impl_opaque[0] = tim->impl_opaque[1] = 0;
332b6d814d8SPavan Nikhilesh 			tim->state = RTE_EVENT_TIMER_ERROR;
333b6d814d8SPavan Nikhilesh 			return -ENOMEM;
334b6d814d8SPavan Nikhilesh 		}
335b6d814d8SPavan Nikhilesh 		bkt->current_chunk = (uintptr_t) chunk;
336b6d814d8SPavan Nikhilesh 		timr_bkt_set_rem(bkt, nb_chunk_slots - 1);
337b6d814d8SPavan Nikhilesh 	} else {
338b6d814d8SPavan Nikhilesh 		chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
339b6d814d8SPavan Nikhilesh 		chunk += nb_chunk_slots - rem;
340b6d814d8SPavan Nikhilesh 	}
341b6d814d8SPavan Nikhilesh 	/* Copy work entry. */
342b6d814d8SPavan Nikhilesh 	*chunk = *pent;
343b6d814d8SPavan Nikhilesh 	timr_bkt_inc_nent(bkt);
344b6d814d8SPavan Nikhilesh 	timr_bkt_dec_lock(bkt);
345b6d814d8SPavan Nikhilesh 
346b6d814d8SPavan Nikhilesh 	tim->impl_opaque[0] = (uintptr_t)chunk;
347b6d814d8SPavan Nikhilesh 	tim->impl_opaque[1] = (uintptr_t)bkt;
348b6d814d8SPavan Nikhilesh 	tim->state = RTE_EVENT_TIMER_ARMED;
349b6d814d8SPavan Nikhilesh 	return 0;
350b6d814d8SPavan Nikhilesh }
3510896f7e0SPavan Nikhilesh 
3520896f7e0SPavan Nikhilesh static inline uint16_t
timvf_cpy_wrk(uint16_t index,uint16_t cpy_lmt,struct tim_mem_entry * chunk,struct rte_event_timer ** const tim,const struct tim_mem_entry * const ents,const struct tim_mem_bucket * const bkt)3530896f7e0SPavan Nikhilesh timvf_cpy_wrk(uint16_t index, uint16_t cpy_lmt,
3540896f7e0SPavan Nikhilesh 		struct tim_mem_entry *chunk,
3550896f7e0SPavan Nikhilesh 		struct rte_event_timer ** const tim,
3560896f7e0SPavan Nikhilesh 		const struct tim_mem_entry * const ents,
3570896f7e0SPavan Nikhilesh 		const struct tim_mem_bucket * const bkt)
3580896f7e0SPavan Nikhilesh {
3590896f7e0SPavan Nikhilesh 	for (; index < cpy_lmt; index++) {
3600896f7e0SPavan Nikhilesh 		*chunk = *(ents + index);
3610896f7e0SPavan Nikhilesh 		tim[index]->impl_opaque[0] = (uintptr_t)chunk++;
3620896f7e0SPavan Nikhilesh 		tim[index]->impl_opaque[1] = (uintptr_t)bkt;
3630896f7e0SPavan Nikhilesh 		tim[index]->state = RTE_EVENT_TIMER_ARMED;
3640896f7e0SPavan Nikhilesh 	}
3650896f7e0SPavan Nikhilesh 
3660896f7e0SPavan Nikhilesh 	return index;
3670896f7e0SPavan Nikhilesh }
3680896f7e0SPavan Nikhilesh 
3690896f7e0SPavan Nikhilesh /* Burst mode functions */
3700896f7e0SPavan Nikhilesh static inline int
timvf_add_entry_brst(struct timvf_ring * const timr,const uint16_t rel_bkt,struct rte_event_timer ** const tim,const struct tim_mem_entry * ents,const uint16_t nb_timers)3710896f7e0SPavan Nikhilesh timvf_add_entry_brst(struct timvf_ring * const timr, const uint16_t rel_bkt,
3720896f7e0SPavan Nikhilesh 		struct rte_event_timer ** const tim,
3730896f7e0SPavan Nikhilesh 		const struct tim_mem_entry *ents,
3740896f7e0SPavan Nikhilesh 		const uint16_t nb_timers)
3750896f7e0SPavan Nikhilesh {
3760896f7e0SPavan Nikhilesh 	int16_t rem;
3770896f7e0SPavan Nikhilesh 	int16_t crem;
3780896f7e0SPavan Nikhilesh 	uint8_t lock_cnt;
3790896f7e0SPavan Nikhilesh 	uint16_t index = 0;
3800896f7e0SPavan Nikhilesh 	uint16_t chunk_remainder;
3810896f7e0SPavan Nikhilesh 	uint64_t lock_sema;
3820896f7e0SPavan Nikhilesh 	struct tim_mem_bucket *bkt;
3830896f7e0SPavan Nikhilesh 	struct tim_mem_entry *chunk;
3840896f7e0SPavan Nikhilesh 
3850896f7e0SPavan Nikhilesh __retry:
3860896f7e0SPavan Nikhilesh 	bkt = timvf_get_target_bucket(timr, rel_bkt);
3870896f7e0SPavan Nikhilesh 
3880896f7e0SPavan Nikhilesh 	/* Only one thread beyond this. */
3890896f7e0SPavan Nikhilesh 	lock_sema = timr_bkt_inc_lock(bkt);
3900896f7e0SPavan Nikhilesh 	lock_cnt = (uint8_t)
3910896f7e0SPavan Nikhilesh 		((lock_sema >> TIM_BUCKET_W1_S_LOCK) & TIM_BUCKET_W1_M_LOCK);
3920896f7e0SPavan Nikhilesh 
3930896f7e0SPavan Nikhilesh 	if (lock_cnt) {
3940896f7e0SPavan Nikhilesh 		timr_bkt_dec_lock(bkt);
3950896f7e0SPavan Nikhilesh 		goto __retry;
3960896f7e0SPavan Nikhilesh 	}
3970896f7e0SPavan Nikhilesh 
3980896f7e0SPavan Nikhilesh 	/* Bucket related checks. */
3990896f7e0SPavan Nikhilesh 	if (unlikely(timr_bkt_get_hbt(lock_sema))) {
4000896f7e0SPavan Nikhilesh 		timr_bkt_dec_lock(bkt);
4010896f7e0SPavan Nikhilesh 		goto __retry;
4020896f7e0SPavan Nikhilesh 	}
4030896f7e0SPavan Nikhilesh 
4040896f7e0SPavan Nikhilesh 	chunk_remainder = timr_bkt_fetch_rem(lock_sema);
4050896f7e0SPavan Nikhilesh 	rem = chunk_remainder - nb_timers;
4060896f7e0SPavan Nikhilesh 	if (rem < 0) {
4070896f7e0SPavan Nikhilesh 		crem = nb_chunk_slots - chunk_remainder;
4080896f7e0SPavan Nikhilesh 		if (chunk_remainder && crem) {
4090896f7e0SPavan Nikhilesh 			chunk = ((struct tim_mem_entry *)
4100896f7e0SPavan Nikhilesh 					(uintptr_t)bkt->current_chunk) + crem;
4110896f7e0SPavan Nikhilesh 
4120896f7e0SPavan Nikhilesh 			index = timvf_cpy_wrk(index, chunk_remainder,
4130896f7e0SPavan Nikhilesh 					chunk, tim, ents, bkt);
4140896f7e0SPavan Nikhilesh 			timr_bkt_sub_rem(bkt, chunk_remainder);
4150896f7e0SPavan Nikhilesh 			timr_bkt_add_nent(bkt, chunk_remainder);
4160896f7e0SPavan Nikhilesh 		}
4170896f7e0SPavan Nikhilesh 		rem = nb_timers - chunk_remainder;
4180896f7e0SPavan Nikhilesh 		ents = ents + chunk_remainder;
4190896f7e0SPavan Nikhilesh 
4200896f7e0SPavan Nikhilesh 		chunk = timr->refill_chunk(bkt, timr);
4210896f7e0SPavan Nikhilesh 		if (unlikely(chunk == NULL)) {
4220896f7e0SPavan Nikhilesh 			timr_bkt_dec_lock(bkt);
4230896f7e0SPavan Nikhilesh 			rte_errno = ENOMEM;
4240896f7e0SPavan Nikhilesh 			tim[index]->state = RTE_EVENT_TIMER_ERROR;
4250896f7e0SPavan Nikhilesh 			return crem;
4260896f7e0SPavan Nikhilesh 		}
4270896f7e0SPavan Nikhilesh 		*(uint64_t *)(chunk + nb_chunk_slots) = 0;
4280896f7e0SPavan Nikhilesh 		bkt->current_chunk = (uintptr_t) chunk;
4290896f7e0SPavan Nikhilesh 
4300896f7e0SPavan Nikhilesh 		index = timvf_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
4310896f7e0SPavan Nikhilesh 		timr_bkt_set_rem(bkt, nb_chunk_slots - rem);
4320896f7e0SPavan Nikhilesh 		timr_bkt_add_nent(bkt, rem);
4330896f7e0SPavan Nikhilesh 	} else {
4340896f7e0SPavan Nikhilesh 		chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
4350896f7e0SPavan Nikhilesh 		chunk += (nb_chunk_slots - chunk_remainder);
4360896f7e0SPavan Nikhilesh 
4370896f7e0SPavan Nikhilesh 		index = timvf_cpy_wrk(index, nb_timers,
4380896f7e0SPavan Nikhilesh 				chunk, tim, ents, bkt);
4390896f7e0SPavan Nikhilesh 		timr_bkt_sub_rem(bkt, nb_timers);
4400896f7e0SPavan Nikhilesh 		timr_bkt_add_nent(bkt, nb_timers);
4410896f7e0SPavan Nikhilesh 	}
4420896f7e0SPavan Nikhilesh 
4430896f7e0SPavan Nikhilesh 	timr_bkt_dec_lock(bkt);
4440896f7e0SPavan Nikhilesh 	return nb_timers;
4450896f7e0SPavan Nikhilesh }
446