xref: /dpdk/lib/timer/rte_timer.c (revision c6552d9a8deffa448de2d5e2e726f50508c1efd2)
199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
299a2dd95SBruce Richardson  * Copyright(c) 2010-2014 Intel Corporation
399a2dd95SBruce Richardson  */
499a2dd95SBruce Richardson 
599a2dd95SBruce Richardson #include <stdio.h>
699a2dd95SBruce Richardson #include <stdint.h>
799a2dd95SBruce Richardson #include <stdbool.h>
899a2dd95SBruce Richardson #include <inttypes.h>
999a2dd95SBruce Richardson #include <assert.h>
1099a2dd95SBruce Richardson 
1199a2dd95SBruce Richardson #include <rte_common.h>
1299a2dd95SBruce Richardson #include <rte_cycles.h>
1399a2dd95SBruce Richardson #include <rte_eal_memconfig.h>
1499a2dd95SBruce Richardson #include <rte_memory.h>
1599a2dd95SBruce Richardson #include <rte_lcore.h>
1699a2dd95SBruce Richardson #include <rte_branch_prediction.h>
1799a2dd95SBruce Richardson #include <rte_spinlock.h>
1899a2dd95SBruce Richardson #include <rte_random.h>
1999a2dd95SBruce Richardson #include <rte_pause.h>
2099a2dd95SBruce Richardson #include <rte_memzone.h>
2199a2dd95SBruce Richardson 
2299a2dd95SBruce Richardson #include "rte_timer.h"
2399a2dd95SBruce Richardson 
2499a2dd95SBruce Richardson /**
2599a2dd95SBruce Richardson  * Per-lcore info for timers.
2699a2dd95SBruce Richardson  */
27*c6552d9aSTyler Retzlaff struct __rte_cache_aligned priv_timer {
2899a2dd95SBruce Richardson 	struct rte_timer pending_head;  /**< dummy timer instance to head up list */
2999a2dd95SBruce Richardson 	rte_spinlock_t list_lock;       /**< lock to protect list access */
3099a2dd95SBruce Richardson 
3199a2dd95SBruce Richardson 	/** per-core variable that true if a timer was updated on this
3299a2dd95SBruce Richardson 	 *  core since last reset of the variable */
3399a2dd95SBruce Richardson 	int updated;
3499a2dd95SBruce Richardson 
3599a2dd95SBruce Richardson 	/** track the current depth of the skiplist */
3699a2dd95SBruce Richardson 	unsigned curr_skiplist_depth;
3799a2dd95SBruce Richardson 
3899a2dd95SBruce Richardson 	unsigned prev_lcore;              /**< used for lcore round robin */
3999a2dd95SBruce Richardson 
4099a2dd95SBruce Richardson 	/** running timer on this lcore now */
4199a2dd95SBruce Richardson 	struct rte_timer *running_tim;
4299a2dd95SBruce Richardson 
4399a2dd95SBruce Richardson #ifdef RTE_LIBRTE_TIMER_DEBUG
4499a2dd95SBruce Richardson 	/** per-lcore statistics */
4599a2dd95SBruce Richardson 	struct rte_timer_debug_stats stats;
4699a2dd95SBruce Richardson #endif
47*c6552d9aSTyler Retzlaff };
4899a2dd95SBruce Richardson 
4999a2dd95SBruce Richardson #define FL_ALLOCATED	(1 << 0)
5099a2dd95SBruce Richardson struct rte_timer_data {
5199a2dd95SBruce Richardson 	struct priv_timer priv_timer[RTE_MAX_LCORE];
5299a2dd95SBruce Richardson 	uint8_t internal_flags;
5399a2dd95SBruce Richardson };
5499a2dd95SBruce Richardson 
5599a2dd95SBruce Richardson #define RTE_MAX_DATA_ELS 64
5699a2dd95SBruce Richardson static const struct rte_memzone *rte_timer_data_mz;
5799a2dd95SBruce Richardson static int *volatile rte_timer_mz_refcnt;
5899a2dd95SBruce Richardson static struct rte_timer_data *rte_timer_data_arr;
5999a2dd95SBruce Richardson static const uint32_t default_data_id;
6099a2dd95SBruce Richardson static uint32_t rte_timer_subsystem_initialized;
6199a2dd95SBruce Richardson 
6299a2dd95SBruce Richardson /* when debug is enabled, store some statistics */
6399a2dd95SBruce Richardson #ifdef RTE_LIBRTE_TIMER_DEBUG
6499a2dd95SBruce Richardson #define __TIMER_STAT_ADD(priv_timer, name, n) do {			\
6599a2dd95SBruce Richardson 		unsigned __lcore_id = rte_lcore_id();			\
6699a2dd95SBruce Richardson 		if (__lcore_id < RTE_MAX_LCORE)				\
6799a2dd95SBruce Richardson 			priv_timer[__lcore_id].stats.name += (n);	\
6899a2dd95SBruce Richardson 	} while(0)
6999a2dd95SBruce Richardson #else
7099a2dd95SBruce Richardson #define __TIMER_STAT_ADD(priv_timer, name, n) do {} while (0)
7199a2dd95SBruce Richardson #endif
7299a2dd95SBruce Richardson 
7399a2dd95SBruce Richardson static inline int
timer_data_valid(uint32_t id)7499a2dd95SBruce Richardson timer_data_valid(uint32_t id)
7599a2dd95SBruce Richardson {
7699a2dd95SBruce Richardson 	return rte_timer_data_arr &&
7799a2dd95SBruce Richardson 		(rte_timer_data_arr[id].internal_flags & FL_ALLOCATED);
7899a2dd95SBruce Richardson }
7999a2dd95SBruce Richardson 
8099a2dd95SBruce Richardson /* validate ID and retrieve timer data pointer, or return error value */
8199a2dd95SBruce Richardson #define TIMER_DATA_VALID_GET_OR_ERR_RET(id, timer_data, retval) do {	\
8299a2dd95SBruce Richardson 	if (id >= RTE_MAX_DATA_ELS || !timer_data_valid(id))		\
8399a2dd95SBruce Richardson 		return retval;						\
8499a2dd95SBruce Richardson 	timer_data = &rte_timer_data_arr[id];				\
8599a2dd95SBruce Richardson } while (0)
8699a2dd95SBruce Richardson 
8799a2dd95SBruce Richardson int
rte_timer_data_alloc(uint32_t * id_ptr)8899a2dd95SBruce Richardson rte_timer_data_alloc(uint32_t *id_ptr)
8999a2dd95SBruce Richardson {
9099a2dd95SBruce Richardson 	int i;
9199a2dd95SBruce Richardson 	struct rte_timer_data *data;
9299a2dd95SBruce Richardson 
9399a2dd95SBruce Richardson 	if (!rte_timer_subsystem_initialized)
9499a2dd95SBruce Richardson 		return -ENOMEM;
9599a2dd95SBruce Richardson 
9699a2dd95SBruce Richardson 	for (i = 0; i < RTE_MAX_DATA_ELS; i++) {
9799a2dd95SBruce Richardson 		data = &rte_timer_data_arr[i];
9899a2dd95SBruce Richardson 		if (!(data->internal_flags & FL_ALLOCATED)) {
9999a2dd95SBruce Richardson 			data->internal_flags |= FL_ALLOCATED;
10099a2dd95SBruce Richardson 
10199a2dd95SBruce Richardson 			if (id_ptr)
10299a2dd95SBruce Richardson 				*id_ptr = i;
10399a2dd95SBruce Richardson 
10499a2dd95SBruce Richardson 			return 0;
10599a2dd95SBruce Richardson 		}
10699a2dd95SBruce Richardson 	}
10799a2dd95SBruce Richardson 
10899a2dd95SBruce Richardson 	return -ENOSPC;
10999a2dd95SBruce Richardson }
11099a2dd95SBruce Richardson 
11199a2dd95SBruce Richardson int
rte_timer_data_dealloc(uint32_t id)11299a2dd95SBruce Richardson rte_timer_data_dealloc(uint32_t id)
11399a2dd95SBruce Richardson {
11499a2dd95SBruce Richardson 	struct rte_timer_data *timer_data;
11599a2dd95SBruce Richardson 	TIMER_DATA_VALID_GET_OR_ERR_RET(id, timer_data, -EINVAL);
11699a2dd95SBruce Richardson 
11799a2dd95SBruce Richardson 	timer_data->internal_flags &= ~(FL_ALLOCATED);
11899a2dd95SBruce Richardson 
11999a2dd95SBruce Richardson 	return 0;
12099a2dd95SBruce Richardson }
12199a2dd95SBruce Richardson 
12299a2dd95SBruce Richardson /* Init the timer library. Allocate an array of timer data structs in shared
12399a2dd95SBruce Richardson  * memory, and allocate the zeroth entry for use with original timer
12499a2dd95SBruce Richardson  * APIs. Since the intersection of the sets of lcore ids in primary and
12599a2dd95SBruce Richardson  * secondary processes should be empty, the zeroth entry can be shared by
12699a2dd95SBruce Richardson  * multiple processes.
12799a2dd95SBruce Richardson  */
12899a2dd95SBruce Richardson int
rte_timer_subsystem_init(void)12999a2dd95SBruce Richardson rte_timer_subsystem_init(void)
13099a2dd95SBruce Richardson {
13199a2dd95SBruce Richardson 	const struct rte_memzone *mz;
13299a2dd95SBruce Richardson 	struct rte_timer_data *data;
13399a2dd95SBruce Richardson 	int i, lcore_id;
13499a2dd95SBruce Richardson 	static const char *mz_name = "rte_timer_mz";
13599a2dd95SBruce Richardson 	const size_t data_arr_size =
13699a2dd95SBruce Richardson 			RTE_MAX_DATA_ELS * sizeof(*rte_timer_data_arr);
13799a2dd95SBruce Richardson 	const size_t mem_size = data_arr_size + sizeof(*rte_timer_mz_refcnt);
13899a2dd95SBruce Richardson 	bool do_full_init = true;
13999a2dd95SBruce Richardson 
14099a2dd95SBruce Richardson 	rte_mcfg_timer_lock();
14199a2dd95SBruce Richardson 
14299a2dd95SBruce Richardson 	if (rte_timer_subsystem_initialized) {
14399a2dd95SBruce Richardson 		rte_mcfg_timer_unlock();
14499a2dd95SBruce Richardson 		return -EALREADY;
14599a2dd95SBruce Richardson 	}
14699a2dd95SBruce Richardson 
14799a2dd95SBruce Richardson 	mz = rte_memzone_lookup(mz_name);
14899a2dd95SBruce Richardson 	if (mz == NULL) {
14999a2dd95SBruce Richardson 		mz = rte_memzone_reserve_aligned(mz_name, mem_size,
15099a2dd95SBruce Richardson 				SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
15199a2dd95SBruce Richardson 		if (mz == NULL) {
15299a2dd95SBruce Richardson 			rte_mcfg_timer_unlock();
15399a2dd95SBruce Richardson 			return -ENOMEM;
15499a2dd95SBruce Richardson 		}
15599a2dd95SBruce Richardson 		do_full_init = true;
15699a2dd95SBruce Richardson 	} else
15799a2dd95SBruce Richardson 		do_full_init = false;
15899a2dd95SBruce Richardson 
15999a2dd95SBruce Richardson 	rte_timer_data_mz = mz;
16099a2dd95SBruce Richardson 	rte_timer_data_arr = mz->addr;
16199a2dd95SBruce Richardson 	rte_timer_mz_refcnt = (void *)((char *)mz->addr + data_arr_size);
16299a2dd95SBruce Richardson 
16399a2dd95SBruce Richardson 	if (do_full_init) {
16499a2dd95SBruce Richardson 		for (i = 0; i < RTE_MAX_DATA_ELS; i++) {
16599a2dd95SBruce Richardson 			data = &rte_timer_data_arr[i];
16699a2dd95SBruce Richardson 
16799a2dd95SBruce Richardson 			for (lcore_id = 0; lcore_id < RTE_MAX_LCORE;
16899a2dd95SBruce Richardson 			     lcore_id++) {
16999a2dd95SBruce Richardson 				rte_spinlock_init(
17099a2dd95SBruce Richardson 					&data->priv_timer[lcore_id].list_lock);
17199a2dd95SBruce Richardson 				data->priv_timer[lcore_id].prev_lcore =
17299a2dd95SBruce Richardson 					lcore_id;
17399a2dd95SBruce Richardson 			}
17499a2dd95SBruce Richardson 		}
17599a2dd95SBruce Richardson 	}
17699a2dd95SBruce Richardson 
17799a2dd95SBruce Richardson 	rte_timer_data_arr[default_data_id].internal_flags |= FL_ALLOCATED;
17899a2dd95SBruce Richardson 	(*rte_timer_mz_refcnt)++;
17999a2dd95SBruce Richardson 
18099a2dd95SBruce Richardson 	rte_timer_subsystem_initialized = 1;
18199a2dd95SBruce Richardson 
18299a2dd95SBruce Richardson 	rte_mcfg_timer_unlock();
18399a2dd95SBruce Richardson 
18499a2dd95SBruce Richardson 	return 0;
18599a2dd95SBruce Richardson }
18699a2dd95SBruce Richardson 
18799a2dd95SBruce Richardson void
rte_timer_subsystem_finalize(void)18899a2dd95SBruce Richardson rte_timer_subsystem_finalize(void)
18999a2dd95SBruce Richardson {
19099a2dd95SBruce Richardson 	rte_mcfg_timer_lock();
19199a2dd95SBruce Richardson 
19299a2dd95SBruce Richardson 	if (!rte_timer_subsystem_initialized) {
19399a2dd95SBruce Richardson 		rte_mcfg_timer_unlock();
19499a2dd95SBruce Richardson 		return;
19599a2dd95SBruce Richardson 	}
19699a2dd95SBruce Richardson 
19799a2dd95SBruce Richardson 	if (--(*rte_timer_mz_refcnt) == 0)
19899a2dd95SBruce Richardson 		rte_memzone_free(rte_timer_data_mz);
19999a2dd95SBruce Richardson 
20099a2dd95SBruce Richardson 	rte_timer_subsystem_initialized = 0;
20199a2dd95SBruce Richardson 
20299a2dd95SBruce Richardson 	rte_mcfg_timer_unlock();
20399a2dd95SBruce Richardson }
20499a2dd95SBruce Richardson 
20599a2dd95SBruce Richardson /* Initialize the timer handle tim for use */
20699a2dd95SBruce Richardson void
rte_timer_init(struct rte_timer * tim)20799a2dd95SBruce Richardson rte_timer_init(struct rte_timer *tim)
20899a2dd95SBruce Richardson {
20999a2dd95SBruce Richardson 	union rte_timer_status status;
21099a2dd95SBruce Richardson 
21199a2dd95SBruce Richardson 	status.state = RTE_TIMER_STOP;
21299a2dd95SBruce Richardson 	status.owner = RTE_TIMER_NO_OWNER;
21322c0353aSTyler Retzlaff 	rte_atomic_store_explicit(&tim->status.u32, status.u32, rte_memory_order_relaxed);
21499a2dd95SBruce Richardson }
21599a2dd95SBruce Richardson 
21699a2dd95SBruce Richardson /*
21799a2dd95SBruce Richardson  * if timer is pending or stopped (or running on the same core than
21899a2dd95SBruce Richardson  * us), mark timer as configuring, and on success return the previous
21999a2dd95SBruce Richardson  * status of the timer
22099a2dd95SBruce Richardson  */
22199a2dd95SBruce Richardson static int
timer_set_config_state(struct rte_timer * tim,union rte_timer_status * ret_prev_status,struct priv_timer * priv_timer)22299a2dd95SBruce Richardson timer_set_config_state(struct rte_timer *tim,
22399a2dd95SBruce Richardson 		       union rte_timer_status *ret_prev_status,
22499a2dd95SBruce Richardson 		       struct priv_timer *priv_timer)
22599a2dd95SBruce Richardson {
22699a2dd95SBruce Richardson 	union rte_timer_status prev_status, status;
22799a2dd95SBruce Richardson 	int success = 0;
22899a2dd95SBruce Richardson 	unsigned lcore_id;
22999a2dd95SBruce Richardson 
23099a2dd95SBruce Richardson 	lcore_id = rte_lcore_id();
23199a2dd95SBruce Richardson 
23299a2dd95SBruce Richardson 	/* wait that the timer is in correct status before update,
23399a2dd95SBruce Richardson 	 * and mark it as being configured */
23422c0353aSTyler Retzlaff 	prev_status.u32 = rte_atomic_load_explicit(&tim->status.u32, rte_memory_order_relaxed);
23599a2dd95SBruce Richardson 
23699a2dd95SBruce Richardson 	while (success == 0) {
23799a2dd95SBruce Richardson 		/* timer is running on another core
23899a2dd95SBruce Richardson 		 * or ready to run on local core, exit
23999a2dd95SBruce Richardson 		 */
24099a2dd95SBruce Richardson 		if (prev_status.state == RTE_TIMER_RUNNING &&
24199a2dd95SBruce Richardson 		    (prev_status.owner != (uint16_t)lcore_id ||
24299a2dd95SBruce Richardson 		     tim != priv_timer[lcore_id].running_tim))
24399a2dd95SBruce Richardson 			return -1;
24499a2dd95SBruce Richardson 
24599a2dd95SBruce Richardson 		/* timer is being configured on another core */
24699a2dd95SBruce Richardson 		if (prev_status.state == RTE_TIMER_CONFIG)
24799a2dd95SBruce Richardson 			return -1;
24899a2dd95SBruce Richardson 
24999a2dd95SBruce Richardson 		/* here, we know that timer is stopped or pending,
25099a2dd95SBruce Richardson 		 * mark it atomically as being configured */
25199a2dd95SBruce Richardson 		status.state = RTE_TIMER_CONFIG;
25299a2dd95SBruce Richardson 		status.owner = (int16_t)lcore_id;
25399a2dd95SBruce Richardson 		/* CONFIG states are acting as locked states. If the
25499a2dd95SBruce Richardson 		 * timer is in CONFIG state, the state cannot be changed
25599a2dd95SBruce Richardson 		 * by other threads. So, we should use ACQUIRE here.
25699a2dd95SBruce Richardson 		 */
25722c0353aSTyler Retzlaff 		success = rte_atomic_compare_exchange_strong_explicit(&tim->status.u32,
25822c0353aSTyler Retzlaff 					      (uint32_t *)(uintptr_t)&prev_status.u32,
25922c0353aSTyler Retzlaff 					      status.u32,
26022c0353aSTyler Retzlaff 					      rte_memory_order_acquire,
26122c0353aSTyler Retzlaff 					      rte_memory_order_relaxed);
26299a2dd95SBruce Richardson 	}
26399a2dd95SBruce Richardson 
26499a2dd95SBruce Richardson 	ret_prev_status->u32 = prev_status.u32;
26599a2dd95SBruce Richardson 	return 0;
26699a2dd95SBruce Richardson }
26799a2dd95SBruce Richardson 
26899a2dd95SBruce Richardson /*
26999a2dd95SBruce Richardson  * if timer is pending, mark timer as running
27099a2dd95SBruce Richardson  */
27199a2dd95SBruce Richardson static int
timer_set_running_state(struct rte_timer * tim)27299a2dd95SBruce Richardson timer_set_running_state(struct rte_timer *tim)
27399a2dd95SBruce Richardson {
27499a2dd95SBruce Richardson 	union rte_timer_status prev_status, status;
27599a2dd95SBruce Richardson 	unsigned lcore_id = rte_lcore_id();
27699a2dd95SBruce Richardson 	int success = 0;
27799a2dd95SBruce Richardson 
27899a2dd95SBruce Richardson 	/* wait that the timer is in correct status before update,
27999a2dd95SBruce Richardson 	 * and mark it as running */
28022c0353aSTyler Retzlaff 	prev_status.u32 = rte_atomic_load_explicit(&tim->status.u32, rte_memory_order_relaxed);
28199a2dd95SBruce Richardson 
28299a2dd95SBruce Richardson 	while (success == 0) {
28399a2dd95SBruce Richardson 		/* timer is not pending anymore */
28499a2dd95SBruce Richardson 		if (prev_status.state != RTE_TIMER_PENDING)
28599a2dd95SBruce Richardson 			return -1;
28699a2dd95SBruce Richardson 
28799a2dd95SBruce Richardson 		/* we know that the timer will be pending at this point
28899a2dd95SBruce Richardson 		 * mark it atomically as being running
28999a2dd95SBruce Richardson 		 */
29099a2dd95SBruce Richardson 		status.state = RTE_TIMER_RUNNING;
29199a2dd95SBruce Richardson 		status.owner = (int16_t)lcore_id;
29299a2dd95SBruce Richardson 		/* RUNNING states are acting as locked states. If the
29399a2dd95SBruce Richardson 		 * timer is in RUNNING state, the state cannot be changed
29499a2dd95SBruce Richardson 		 * by other threads. So, we should use ACQUIRE here.
29599a2dd95SBruce Richardson 		 */
29622c0353aSTyler Retzlaff 		success = rte_atomic_compare_exchange_strong_explicit(&tim->status.u32,
29722c0353aSTyler Retzlaff 					      (uint32_t *)(uintptr_t)&prev_status.u32,
29822c0353aSTyler Retzlaff 					      status.u32,
29922c0353aSTyler Retzlaff 					      rte_memory_order_acquire,
30022c0353aSTyler Retzlaff 					      rte_memory_order_relaxed);
30199a2dd95SBruce Richardson 	}
30299a2dd95SBruce Richardson 
30399a2dd95SBruce Richardson 	return 0;
30499a2dd95SBruce Richardson }
30599a2dd95SBruce Richardson 
30699a2dd95SBruce Richardson /*
30799a2dd95SBruce Richardson  * Return a skiplist level for a new entry.
30899a2dd95SBruce Richardson  * This probabilistically gives a level with p=1/4 that an entry at level n
30999a2dd95SBruce Richardson  * will also appear at level n+1.
31099a2dd95SBruce Richardson  */
31199a2dd95SBruce Richardson static uint32_t
timer_get_skiplist_level(unsigned curr_depth)31299a2dd95SBruce Richardson timer_get_skiplist_level(unsigned curr_depth)
31399a2dd95SBruce Richardson {
31499a2dd95SBruce Richardson #ifdef RTE_LIBRTE_TIMER_DEBUG
31599a2dd95SBruce Richardson 	static uint32_t i, count = 0;
31699a2dd95SBruce Richardson 	static uint32_t levels[MAX_SKIPLIST_DEPTH] = {0};
31799a2dd95SBruce Richardson #endif
31899a2dd95SBruce Richardson 
31999a2dd95SBruce Richardson 	/* probability value is 1/4, i.e. all at level 0, 1 in 4 is at level 1,
32099a2dd95SBruce Richardson 	 * 1 in 16 at level 2, 1 in 64 at level 3, etc. Calculated using lowest
32199a2dd95SBruce Richardson 	 * bit position of a (pseudo)random number.
32299a2dd95SBruce Richardson 	 */
32399a2dd95SBruce Richardson 	uint32_t rand = rte_rand() & (UINT32_MAX - 1);
32499a2dd95SBruce Richardson 	uint32_t level = rand == 0 ? MAX_SKIPLIST_DEPTH : (rte_bsf32(rand)-1) / 2;
32599a2dd95SBruce Richardson 
32699a2dd95SBruce Richardson 	/* limit the levels used to one above our current level, so we don't,
32799a2dd95SBruce Richardson 	 * for instance, have a level 0 and a level 7 without anything between
32899a2dd95SBruce Richardson 	 */
32999a2dd95SBruce Richardson 	if (level > curr_depth)
33099a2dd95SBruce Richardson 		level = curr_depth;
33199a2dd95SBruce Richardson 	if (level >= MAX_SKIPLIST_DEPTH)
33299a2dd95SBruce Richardson 		level = MAX_SKIPLIST_DEPTH-1;
33399a2dd95SBruce Richardson #ifdef RTE_LIBRTE_TIMER_DEBUG
33499a2dd95SBruce Richardson 	count ++;
33599a2dd95SBruce Richardson 	levels[level]++;
33699a2dd95SBruce Richardson 	if (count % 10000 == 0)
33799a2dd95SBruce Richardson 		for (i = 0; i < MAX_SKIPLIST_DEPTH; i++)
33899a2dd95SBruce Richardson 			printf("Level %u: %u\n", (unsigned)i, (unsigned)levels[i]);
33999a2dd95SBruce Richardson #endif
34099a2dd95SBruce Richardson 	return level;
34199a2dd95SBruce Richardson }
34299a2dd95SBruce Richardson 
34399a2dd95SBruce Richardson /*
34499a2dd95SBruce Richardson  * For a given time value, get the entries at each level which
34599a2dd95SBruce Richardson  * are <= that time value.
34699a2dd95SBruce Richardson  */
34799a2dd95SBruce Richardson static void
timer_get_prev_entries(uint64_t time_val,unsigned tim_lcore,struct rte_timer ** prev,struct priv_timer * priv_timer)34899a2dd95SBruce Richardson timer_get_prev_entries(uint64_t time_val, unsigned tim_lcore,
34999a2dd95SBruce Richardson 		       struct rte_timer **prev, struct priv_timer *priv_timer)
35099a2dd95SBruce Richardson {
35199a2dd95SBruce Richardson 	unsigned lvl = priv_timer[tim_lcore].curr_skiplist_depth;
35299a2dd95SBruce Richardson 	prev[lvl] = &priv_timer[tim_lcore].pending_head;
35399a2dd95SBruce Richardson 	while(lvl != 0) {
35499a2dd95SBruce Richardson 		lvl--;
35599a2dd95SBruce Richardson 		prev[lvl] = prev[lvl+1];
35699a2dd95SBruce Richardson 		while (prev[lvl]->sl_next[lvl] &&
35799a2dd95SBruce Richardson 				prev[lvl]->sl_next[lvl]->expire <= time_val)
35899a2dd95SBruce Richardson 			prev[lvl] = prev[lvl]->sl_next[lvl];
35999a2dd95SBruce Richardson 	}
36099a2dd95SBruce Richardson }
36199a2dd95SBruce Richardson 
36299a2dd95SBruce Richardson /*
36399a2dd95SBruce Richardson  * Given a timer node in the skiplist, find the previous entries for it at
36499a2dd95SBruce Richardson  * all skiplist levels.
36599a2dd95SBruce Richardson  */
36699a2dd95SBruce Richardson static void
timer_get_prev_entries_for_node(struct rte_timer * tim,unsigned tim_lcore,struct rte_timer ** prev,struct priv_timer * priv_timer)36799a2dd95SBruce Richardson timer_get_prev_entries_for_node(struct rte_timer *tim, unsigned tim_lcore,
36899a2dd95SBruce Richardson 				struct rte_timer **prev,
36999a2dd95SBruce Richardson 				struct priv_timer *priv_timer)
37099a2dd95SBruce Richardson {
37199a2dd95SBruce Richardson 	int i;
37299a2dd95SBruce Richardson 
37399a2dd95SBruce Richardson 	/* to get a specific entry in the list, look for just lower than the time
37499a2dd95SBruce Richardson 	 * values, and then increment on each level individually if necessary
37599a2dd95SBruce Richardson 	 */
37699a2dd95SBruce Richardson 	timer_get_prev_entries(tim->expire - 1, tim_lcore, prev, priv_timer);
37799a2dd95SBruce Richardson 	for (i = priv_timer[tim_lcore].curr_skiplist_depth - 1; i >= 0; i--) {
37899a2dd95SBruce Richardson 		while (prev[i]->sl_next[i] != NULL &&
37999a2dd95SBruce Richardson 				prev[i]->sl_next[i] != tim &&
38099a2dd95SBruce Richardson 				prev[i]->sl_next[i]->expire <= tim->expire)
38199a2dd95SBruce Richardson 			prev[i] = prev[i]->sl_next[i];
38299a2dd95SBruce Richardson 	}
38399a2dd95SBruce Richardson }
38499a2dd95SBruce Richardson 
38599a2dd95SBruce Richardson /* call with lock held as necessary
38699a2dd95SBruce Richardson  * add in list
38799a2dd95SBruce Richardson  * timer must be in config state
38899a2dd95SBruce Richardson  * timer must not be in a list
38999a2dd95SBruce Richardson  */
39099a2dd95SBruce Richardson static void
timer_add(struct rte_timer * tim,unsigned int tim_lcore,struct priv_timer * priv_timer)39199a2dd95SBruce Richardson timer_add(struct rte_timer *tim, unsigned int tim_lcore,
39299a2dd95SBruce Richardson 	  struct priv_timer *priv_timer)
39399a2dd95SBruce Richardson {
39499a2dd95SBruce Richardson 	unsigned lvl;
39599a2dd95SBruce Richardson 	struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1];
39699a2dd95SBruce Richardson 
39799a2dd95SBruce Richardson 	/* find where exactly this element goes in the list of elements
39899a2dd95SBruce Richardson 	 * for each depth. */
39999a2dd95SBruce Richardson 	timer_get_prev_entries(tim->expire, tim_lcore, prev, priv_timer);
40099a2dd95SBruce Richardson 
40199a2dd95SBruce Richardson 	/* now assign it a new level and add at that level */
40299a2dd95SBruce Richardson 	const unsigned tim_level = timer_get_skiplist_level(
40399a2dd95SBruce Richardson 			priv_timer[tim_lcore].curr_skiplist_depth);
40499a2dd95SBruce Richardson 	if (tim_level == priv_timer[tim_lcore].curr_skiplist_depth)
40599a2dd95SBruce Richardson 		priv_timer[tim_lcore].curr_skiplist_depth++;
40699a2dd95SBruce Richardson 
40799a2dd95SBruce Richardson 	lvl = tim_level;
40899a2dd95SBruce Richardson 	while (lvl > 0) {
40999a2dd95SBruce Richardson 		tim->sl_next[lvl] = prev[lvl]->sl_next[lvl];
41099a2dd95SBruce Richardson 		prev[lvl]->sl_next[lvl] = tim;
41199a2dd95SBruce Richardson 		lvl--;
41299a2dd95SBruce Richardson 	}
41399a2dd95SBruce Richardson 	tim->sl_next[0] = prev[0]->sl_next[0];
41499a2dd95SBruce Richardson 	prev[0]->sl_next[0] = tim;
41599a2dd95SBruce Richardson 
41699a2dd95SBruce Richardson 	/* save the lowest list entry into the expire field of the dummy hdr
41799a2dd95SBruce Richardson 	 * NOTE: this is not atomic on 32-bit*/
41899a2dd95SBruce Richardson 	priv_timer[tim_lcore].pending_head.expire = priv_timer[tim_lcore].\
41999a2dd95SBruce Richardson 			pending_head.sl_next[0]->expire;
42099a2dd95SBruce Richardson }
42199a2dd95SBruce Richardson 
42299a2dd95SBruce Richardson /*
42399a2dd95SBruce Richardson  * del from list, lock if needed
42499a2dd95SBruce Richardson  * timer must be in config state
42599a2dd95SBruce Richardson  * timer must be in a list
42699a2dd95SBruce Richardson  */
42799a2dd95SBruce Richardson static void
timer_del(struct rte_timer * tim,union rte_timer_status prev_status,int local_is_locked,struct priv_timer * priv_timer)42899a2dd95SBruce Richardson timer_del(struct rte_timer *tim, union rte_timer_status prev_status,
42999a2dd95SBruce Richardson 	  int local_is_locked, struct priv_timer *priv_timer)
43099a2dd95SBruce Richardson {
43199a2dd95SBruce Richardson 	unsigned lcore_id = rte_lcore_id();
43299a2dd95SBruce Richardson 	unsigned prev_owner = prev_status.owner;
43399a2dd95SBruce Richardson 	int i;
43499a2dd95SBruce Richardson 	struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1];
43599a2dd95SBruce Richardson 
43699a2dd95SBruce Richardson 	/* if timer needs is pending another core, we need to lock the
43799a2dd95SBruce Richardson 	 * list; if it is on local core, we need to lock if we are not
43899a2dd95SBruce Richardson 	 * called from rte_timer_manage() */
43999a2dd95SBruce Richardson 	if (prev_owner != lcore_id || !local_is_locked)
44099a2dd95SBruce Richardson 		rte_spinlock_lock(&priv_timer[prev_owner].list_lock);
44199a2dd95SBruce Richardson 
44299a2dd95SBruce Richardson 	/* save the lowest list entry into the expire field of the dummy hdr.
44399a2dd95SBruce Richardson 	 * NOTE: this is not atomic on 32-bit */
44499a2dd95SBruce Richardson 	if (tim == priv_timer[prev_owner].pending_head.sl_next[0])
44599a2dd95SBruce Richardson 		priv_timer[prev_owner].pending_head.expire =
44699a2dd95SBruce Richardson 				((tim->sl_next[0] == NULL) ? 0 : tim->sl_next[0]->expire);
44799a2dd95SBruce Richardson 
44899a2dd95SBruce Richardson 	/* adjust pointers from previous entries to point past this */
44999a2dd95SBruce Richardson 	timer_get_prev_entries_for_node(tim, prev_owner, prev, priv_timer);
45099a2dd95SBruce Richardson 	for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--) {
45199a2dd95SBruce Richardson 		if (prev[i]->sl_next[i] == tim)
45299a2dd95SBruce Richardson 			prev[i]->sl_next[i] = tim->sl_next[i];
45399a2dd95SBruce Richardson 	}
45499a2dd95SBruce Richardson 
45599a2dd95SBruce Richardson 	/* in case we deleted last entry at a level, adjust down max level */
45699a2dd95SBruce Richardson 	for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--)
45799a2dd95SBruce Richardson 		if (priv_timer[prev_owner].pending_head.sl_next[i] == NULL)
45899a2dd95SBruce Richardson 			priv_timer[prev_owner].curr_skiplist_depth --;
45999a2dd95SBruce Richardson 		else
46099a2dd95SBruce Richardson 			break;
46199a2dd95SBruce Richardson 
46299a2dd95SBruce Richardson 	if (prev_owner != lcore_id || !local_is_locked)
46399a2dd95SBruce Richardson 		rte_spinlock_unlock(&priv_timer[prev_owner].list_lock);
46499a2dd95SBruce Richardson }
46599a2dd95SBruce Richardson 
46699a2dd95SBruce Richardson /* Reset and start the timer associated with the timer handle (private func) */
46799a2dd95SBruce Richardson static int
__rte_timer_reset(struct rte_timer * tim,uint64_t expire,uint64_t period,unsigned tim_lcore,rte_timer_cb_t fct,void * arg,int local_is_locked,struct rte_timer_data * timer_data)46899a2dd95SBruce Richardson __rte_timer_reset(struct rte_timer *tim, uint64_t expire,
46999a2dd95SBruce Richardson 		  uint64_t period, unsigned tim_lcore,
47099a2dd95SBruce Richardson 		  rte_timer_cb_t fct, void *arg,
47199a2dd95SBruce Richardson 		  int local_is_locked,
47299a2dd95SBruce Richardson 		  struct rte_timer_data *timer_data)
47399a2dd95SBruce Richardson {
47499a2dd95SBruce Richardson 	union rte_timer_status prev_status, status;
47599a2dd95SBruce Richardson 	int ret;
47699a2dd95SBruce Richardson 	unsigned lcore_id = rte_lcore_id();
47799a2dd95SBruce Richardson 	struct priv_timer *priv_timer = timer_data->priv_timer;
47899a2dd95SBruce Richardson 
47999a2dd95SBruce Richardson 	/* round robin for tim_lcore */
48099a2dd95SBruce Richardson 	if (tim_lcore == (unsigned)LCORE_ID_ANY) {
48199a2dd95SBruce Richardson 		if (lcore_id < RTE_MAX_LCORE) {
48299a2dd95SBruce Richardson 			/* EAL thread with valid lcore_id */
48399a2dd95SBruce Richardson 			tim_lcore = rte_get_next_lcore(
48499a2dd95SBruce Richardson 				priv_timer[lcore_id].prev_lcore,
48599a2dd95SBruce Richardson 				0, 1);
48699a2dd95SBruce Richardson 			priv_timer[lcore_id].prev_lcore = tim_lcore;
48799a2dd95SBruce Richardson 		} else
48899a2dd95SBruce Richardson 			/* non-EAL thread do not run rte_timer_manage(),
48999a2dd95SBruce Richardson 			 * so schedule the timer on the first enabled lcore. */
49099a2dd95SBruce Richardson 			tim_lcore = rte_get_next_lcore(LCORE_ID_ANY, 0, 1);
49199a2dd95SBruce Richardson 	}
49299a2dd95SBruce Richardson 
49399a2dd95SBruce Richardson 	/* wait that the timer is in correct status before update,
49499a2dd95SBruce Richardson 	 * and mark it as being configured */
49599a2dd95SBruce Richardson 	ret = timer_set_config_state(tim, &prev_status, priv_timer);
49699a2dd95SBruce Richardson 	if (ret < 0)
49799a2dd95SBruce Richardson 		return -1;
49899a2dd95SBruce Richardson 
49999a2dd95SBruce Richardson 	__TIMER_STAT_ADD(priv_timer, reset, 1);
50099a2dd95SBruce Richardson 	if (prev_status.state == RTE_TIMER_RUNNING &&
50199a2dd95SBruce Richardson 	    lcore_id < RTE_MAX_LCORE) {
50299a2dd95SBruce Richardson 		priv_timer[lcore_id].updated = 1;
50399a2dd95SBruce Richardson 	}
50499a2dd95SBruce Richardson 
50599a2dd95SBruce Richardson 	/* remove it from list */
50699a2dd95SBruce Richardson 	if (prev_status.state == RTE_TIMER_PENDING) {
50799a2dd95SBruce Richardson 		timer_del(tim, prev_status, local_is_locked, priv_timer);
50899a2dd95SBruce Richardson 		__TIMER_STAT_ADD(priv_timer, pending, -1);
50999a2dd95SBruce Richardson 	}
51099a2dd95SBruce Richardson 
51199a2dd95SBruce Richardson 	tim->period = period;
51299a2dd95SBruce Richardson 	tim->expire = expire;
51399a2dd95SBruce Richardson 	tim->f = fct;
51499a2dd95SBruce Richardson 	tim->arg = arg;
51599a2dd95SBruce Richardson 
51699a2dd95SBruce Richardson 	/* if timer needs to be scheduled on another core, we need to
51799a2dd95SBruce Richardson 	 * lock the destination list; if it is on local core, we need to lock if
51899a2dd95SBruce Richardson 	 * we are not called from rte_timer_manage()
51999a2dd95SBruce Richardson 	 */
52099a2dd95SBruce Richardson 	if (tim_lcore != lcore_id || !local_is_locked)
52199a2dd95SBruce Richardson 		rte_spinlock_lock(&priv_timer[tim_lcore].list_lock);
52299a2dd95SBruce Richardson 
52399a2dd95SBruce Richardson 	__TIMER_STAT_ADD(priv_timer, pending, 1);
52499a2dd95SBruce Richardson 	timer_add(tim, tim_lcore, priv_timer);
52599a2dd95SBruce Richardson 
52699a2dd95SBruce Richardson 	/* update state: as we are in CONFIG state, only us can modify
52799a2dd95SBruce Richardson 	 * the state so we don't need to use cmpset() here */
52899a2dd95SBruce Richardson 	status.state = RTE_TIMER_PENDING;
52999a2dd95SBruce Richardson 	status.owner = (int16_t)tim_lcore;
53099a2dd95SBruce Richardson 	/* The "RELEASE" ordering guarantees the memory operations above
53199a2dd95SBruce Richardson 	 * the status update are observed before the update by all threads
53299a2dd95SBruce Richardson 	 */
53322c0353aSTyler Retzlaff 	rte_atomic_store_explicit(&tim->status.u32, status.u32, rte_memory_order_release);
53499a2dd95SBruce Richardson 
53599a2dd95SBruce Richardson 	if (tim_lcore != lcore_id || !local_is_locked)
53699a2dd95SBruce Richardson 		rte_spinlock_unlock(&priv_timer[tim_lcore].list_lock);
53799a2dd95SBruce Richardson 
53899a2dd95SBruce Richardson 	return 0;
53999a2dd95SBruce Richardson }
54099a2dd95SBruce Richardson 
54199a2dd95SBruce Richardson /* Reset and start the timer associated with the timer handle tim */
54299a2dd95SBruce Richardson int
rte_timer_reset(struct rte_timer * tim,uint64_t ticks,enum rte_timer_type type,unsigned int tim_lcore,rte_timer_cb_t fct,void * arg)54399a2dd95SBruce Richardson rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
54499a2dd95SBruce Richardson 		      enum rte_timer_type type, unsigned int tim_lcore,
54599a2dd95SBruce Richardson 		      rte_timer_cb_t fct, void *arg)
54699a2dd95SBruce Richardson {
54799a2dd95SBruce Richardson 	return rte_timer_alt_reset(default_data_id, tim, ticks, type,
54899a2dd95SBruce Richardson 				   tim_lcore, fct, arg);
54999a2dd95SBruce Richardson }
55099a2dd95SBruce Richardson 
55199a2dd95SBruce Richardson int
rte_timer_alt_reset(uint32_t timer_data_id,struct rte_timer * tim,uint64_t ticks,enum rte_timer_type type,unsigned int tim_lcore,rte_timer_cb_t fct,void * arg)55299a2dd95SBruce Richardson rte_timer_alt_reset(uint32_t timer_data_id, struct rte_timer *tim,
55399a2dd95SBruce Richardson 		    uint64_t ticks, enum rte_timer_type type,
55499a2dd95SBruce Richardson 		    unsigned int tim_lcore, rte_timer_cb_t fct, void *arg)
55599a2dd95SBruce Richardson {
55699a2dd95SBruce Richardson 	uint64_t cur_time = rte_get_timer_cycles();
55799a2dd95SBruce Richardson 	uint64_t period;
55899a2dd95SBruce Richardson 	struct rte_timer_data *timer_data;
55999a2dd95SBruce Richardson 
56099a2dd95SBruce Richardson 	TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
56199a2dd95SBruce Richardson 
56299a2dd95SBruce Richardson 	if (type == PERIODICAL)
56399a2dd95SBruce Richardson 		period = ticks;
56499a2dd95SBruce Richardson 	else
56599a2dd95SBruce Richardson 		period = 0;
56699a2dd95SBruce Richardson 
56799a2dd95SBruce Richardson 	return __rte_timer_reset(tim,  cur_time + ticks, period, tim_lcore,
56899a2dd95SBruce Richardson 				 fct, arg, 0, timer_data);
56999a2dd95SBruce Richardson }
57099a2dd95SBruce Richardson 
57199a2dd95SBruce Richardson /* loop until rte_timer_reset() succeed */
57299a2dd95SBruce Richardson void
rte_timer_reset_sync(struct rte_timer * tim,uint64_t ticks,enum rte_timer_type type,unsigned tim_lcore,rte_timer_cb_t fct,void * arg)57399a2dd95SBruce Richardson rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks,
57499a2dd95SBruce Richardson 		     enum rte_timer_type type, unsigned tim_lcore,
57599a2dd95SBruce Richardson 		     rte_timer_cb_t fct, void *arg)
57699a2dd95SBruce Richardson {
57799a2dd95SBruce Richardson 	while (rte_timer_reset(tim, ticks, type, tim_lcore,
57899a2dd95SBruce Richardson 			       fct, arg) != 0)
57999a2dd95SBruce Richardson 		rte_pause();
58099a2dd95SBruce Richardson }
58199a2dd95SBruce Richardson 
58299a2dd95SBruce Richardson static int
__rte_timer_stop(struct rte_timer * tim,struct rte_timer_data * timer_data)583eb63c85dSNaga Harish K S V __rte_timer_stop(struct rte_timer *tim,
58499a2dd95SBruce Richardson 		 struct rte_timer_data *timer_data)
58599a2dd95SBruce Richardson {
58699a2dd95SBruce Richardson 	union rte_timer_status prev_status, status;
58799a2dd95SBruce Richardson 	unsigned lcore_id = rte_lcore_id();
58899a2dd95SBruce Richardson 	int ret;
58999a2dd95SBruce Richardson 	struct priv_timer *priv_timer = timer_data->priv_timer;
59099a2dd95SBruce Richardson 
59199a2dd95SBruce Richardson 	/* wait that the timer is in correct status before update,
59299a2dd95SBruce Richardson 	 * and mark it as being configured */
59399a2dd95SBruce Richardson 	ret = timer_set_config_state(tim, &prev_status, priv_timer);
59499a2dd95SBruce Richardson 	if (ret < 0)
59599a2dd95SBruce Richardson 		return -1;
59699a2dd95SBruce Richardson 
59799a2dd95SBruce Richardson 	__TIMER_STAT_ADD(priv_timer, stop, 1);
59899a2dd95SBruce Richardson 	if (prev_status.state == RTE_TIMER_RUNNING &&
59999a2dd95SBruce Richardson 	    lcore_id < RTE_MAX_LCORE) {
60099a2dd95SBruce Richardson 		priv_timer[lcore_id].updated = 1;
60199a2dd95SBruce Richardson 	}
60299a2dd95SBruce Richardson 
60399a2dd95SBruce Richardson 	/* remove it from list */
60499a2dd95SBruce Richardson 	if (prev_status.state == RTE_TIMER_PENDING) {
605eb63c85dSNaga Harish K S V 		timer_del(tim, prev_status, 0, priv_timer);
60699a2dd95SBruce Richardson 		__TIMER_STAT_ADD(priv_timer, pending, -1);
60799a2dd95SBruce Richardson 	}
60899a2dd95SBruce Richardson 
60999a2dd95SBruce Richardson 	/* mark timer as stopped */
61099a2dd95SBruce Richardson 	status.state = RTE_TIMER_STOP;
61199a2dd95SBruce Richardson 	status.owner = RTE_TIMER_NO_OWNER;
61299a2dd95SBruce Richardson 	/* The "RELEASE" ordering guarantees the memory operations above
61399a2dd95SBruce Richardson 	 * the status update are observed before the update by all threads
61499a2dd95SBruce Richardson 	 */
61522c0353aSTyler Retzlaff 	rte_atomic_store_explicit(&tim->status.u32, status.u32, rte_memory_order_release);
61699a2dd95SBruce Richardson 
61799a2dd95SBruce Richardson 	return 0;
61899a2dd95SBruce Richardson }
61999a2dd95SBruce Richardson 
62099a2dd95SBruce Richardson /* Stop the timer associated with the timer handle tim */
62199a2dd95SBruce Richardson int
rte_timer_stop(struct rte_timer * tim)62299a2dd95SBruce Richardson rte_timer_stop(struct rte_timer *tim)
62399a2dd95SBruce Richardson {
62499a2dd95SBruce Richardson 	return rte_timer_alt_stop(default_data_id, tim);
62599a2dd95SBruce Richardson }
62699a2dd95SBruce Richardson 
62799a2dd95SBruce Richardson int
rte_timer_alt_stop(uint32_t timer_data_id,struct rte_timer * tim)62899a2dd95SBruce Richardson rte_timer_alt_stop(uint32_t timer_data_id, struct rte_timer *tim)
62999a2dd95SBruce Richardson {
63099a2dd95SBruce Richardson 	struct rte_timer_data *timer_data;
63199a2dd95SBruce Richardson 
63299a2dd95SBruce Richardson 	TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
63399a2dd95SBruce Richardson 
634eb63c85dSNaga Harish K S V 	return __rte_timer_stop(tim, timer_data);
63599a2dd95SBruce Richardson }
63699a2dd95SBruce Richardson 
63799a2dd95SBruce Richardson /* loop until rte_timer_stop() succeed */
63899a2dd95SBruce Richardson void
rte_timer_stop_sync(struct rte_timer * tim)63999a2dd95SBruce Richardson rte_timer_stop_sync(struct rte_timer *tim)
64099a2dd95SBruce Richardson {
64199a2dd95SBruce Richardson 	while (rte_timer_stop(tim) != 0)
64299a2dd95SBruce Richardson 		rte_pause();
64399a2dd95SBruce Richardson }
64499a2dd95SBruce Richardson 
64599a2dd95SBruce Richardson /* Test the PENDING status of the timer handle tim */
64699a2dd95SBruce Richardson int
rte_timer_pending(struct rte_timer * tim)64799a2dd95SBruce Richardson rte_timer_pending(struct rte_timer *tim)
64899a2dd95SBruce Richardson {
64922c0353aSTyler Retzlaff 	return rte_atomic_load_explicit(&tim->status.state,
65022c0353aSTyler Retzlaff 				rte_memory_order_relaxed) == RTE_TIMER_PENDING;
65199a2dd95SBruce Richardson }
65299a2dd95SBruce Richardson 
65399a2dd95SBruce Richardson /* must be called periodically, run all timer that expired */
65499a2dd95SBruce Richardson static void
__rte_timer_manage(struct rte_timer_data * timer_data)65599a2dd95SBruce Richardson __rte_timer_manage(struct rte_timer_data *timer_data)
65699a2dd95SBruce Richardson {
65799a2dd95SBruce Richardson 	union rte_timer_status status;
65899a2dd95SBruce Richardson 	struct rte_timer *tim, *next_tim;
65999a2dd95SBruce Richardson 	struct rte_timer *run_first_tim, **pprev;
66099a2dd95SBruce Richardson 	unsigned lcore_id = rte_lcore_id();
66199a2dd95SBruce Richardson 	struct rte_timer *prev[MAX_SKIPLIST_DEPTH + 1];
66299a2dd95SBruce Richardson 	uint64_t cur_time;
66399a2dd95SBruce Richardson 	int i, ret;
66499a2dd95SBruce Richardson 	struct priv_timer *priv_timer = timer_data->priv_timer;
66599a2dd95SBruce Richardson 
66699a2dd95SBruce Richardson 	/* timer manager only runs on EAL thread with valid lcore_id */
66799a2dd95SBruce Richardson 	assert(lcore_id < RTE_MAX_LCORE);
66899a2dd95SBruce Richardson 
66999a2dd95SBruce Richardson 	__TIMER_STAT_ADD(priv_timer, manage, 1);
67099a2dd95SBruce Richardson 	/* optimize for the case where per-cpu list is empty */
67199a2dd95SBruce Richardson 	if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL)
67299a2dd95SBruce Richardson 		return;
67399a2dd95SBruce Richardson 	cur_time = rte_get_timer_cycles();
67499a2dd95SBruce Richardson 
67599a2dd95SBruce Richardson #ifdef RTE_ARCH_64
67699a2dd95SBruce Richardson 	/* on 64-bit the value cached in the pending_head.expired will be
67799a2dd95SBruce Richardson 	 * updated atomically, so we can consult that for a quick check here
67899a2dd95SBruce Richardson 	 * outside the lock */
67999a2dd95SBruce Richardson 	if (likely(priv_timer[lcore_id].pending_head.expire > cur_time))
68099a2dd95SBruce Richardson 		return;
68199a2dd95SBruce Richardson #endif
68299a2dd95SBruce Richardson 
68399a2dd95SBruce Richardson 	/* browse ordered list, add expired timers in 'expired' list */
68499a2dd95SBruce Richardson 	rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
68599a2dd95SBruce Richardson 
68699a2dd95SBruce Richardson 	/* if nothing to do just unlock and return */
68799a2dd95SBruce Richardson 	if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL ||
68899a2dd95SBruce Richardson 	    priv_timer[lcore_id].pending_head.sl_next[0]->expire > cur_time) {
68999a2dd95SBruce Richardson 		rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
69099a2dd95SBruce Richardson 		return;
69199a2dd95SBruce Richardson 	}
69299a2dd95SBruce Richardson 
69399a2dd95SBruce Richardson 	/* save start of list of expired timers */
69499a2dd95SBruce Richardson 	tim = priv_timer[lcore_id].pending_head.sl_next[0];
69599a2dd95SBruce Richardson 
69699a2dd95SBruce Richardson 	/* break the existing list at current time point */
69799a2dd95SBruce Richardson 	timer_get_prev_entries(cur_time, lcore_id, prev, priv_timer);
69899a2dd95SBruce Richardson 	for (i = priv_timer[lcore_id].curr_skiplist_depth -1; i >= 0; i--) {
69999a2dd95SBruce Richardson 		if (prev[i] == &priv_timer[lcore_id].pending_head)
70099a2dd95SBruce Richardson 			continue;
70199a2dd95SBruce Richardson 		priv_timer[lcore_id].pending_head.sl_next[i] =
70299a2dd95SBruce Richardson 		    prev[i]->sl_next[i];
70399a2dd95SBruce Richardson 		if (prev[i]->sl_next[i] == NULL)
70499a2dd95SBruce Richardson 			priv_timer[lcore_id].curr_skiplist_depth--;
70599a2dd95SBruce Richardson 		prev[i] ->sl_next[i] = NULL;
70699a2dd95SBruce Richardson 	}
70799a2dd95SBruce Richardson 
70899a2dd95SBruce Richardson 	/* transition run-list from PENDING to RUNNING */
70999a2dd95SBruce Richardson 	run_first_tim = tim;
71099a2dd95SBruce Richardson 	pprev = &run_first_tim;
71199a2dd95SBruce Richardson 
71299a2dd95SBruce Richardson 	for ( ; tim != NULL; tim = next_tim) {
71399a2dd95SBruce Richardson 		next_tim = tim->sl_next[0];
71499a2dd95SBruce Richardson 
71599a2dd95SBruce Richardson 		ret = timer_set_running_state(tim);
71699a2dd95SBruce Richardson 		if (likely(ret == 0)) {
71799a2dd95SBruce Richardson 			pprev = &tim->sl_next[0];
71899a2dd95SBruce Richardson 		} else {
71999a2dd95SBruce Richardson 			/* another core is trying to re-config this one,
72099a2dd95SBruce Richardson 			 * remove it from local expired list
72199a2dd95SBruce Richardson 			 */
72299a2dd95SBruce Richardson 			*pprev = next_tim;
72399a2dd95SBruce Richardson 		}
72499a2dd95SBruce Richardson 	}
72599a2dd95SBruce Richardson 
72699a2dd95SBruce Richardson 	/* update the next to expire timer value */
72799a2dd95SBruce Richardson 	priv_timer[lcore_id].pending_head.expire =
72899a2dd95SBruce Richardson 	    (priv_timer[lcore_id].pending_head.sl_next[0] == NULL) ? 0 :
72999a2dd95SBruce Richardson 		priv_timer[lcore_id].pending_head.sl_next[0]->expire;
73099a2dd95SBruce Richardson 
73199a2dd95SBruce Richardson 	rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
73299a2dd95SBruce Richardson 
73399a2dd95SBruce Richardson 	/* now scan expired list and call callbacks */
73499a2dd95SBruce Richardson 	for (tim = run_first_tim; tim != NULL; tim = next_tim) {
73599a2dd95SBruce Richardson 		next_tim = tim->sl_next[0];
73699a2dd95SBruce Richardson 		priv_timer[lcore_id].updated = 0;
73799a2dd95SBruce Richardson 		priv_timer[lcore_id].running_tim = tim;
73899a2dd95SBruce Richardson 
73999a2dd95SBruce Richardson 		/* execute callback function with list unlocked */
74099a2dd95SBruce Richardson 		tim->f(tim, tim->arg);
74199a2dd95SBruce Richardson 
74299a2dd95SBruce Richardson 		__TIMER_STAT_ADD(priv_timer, pending, -1);
74399a2dd95SBruce Richardson 		/* the timer was stopped or reloaded by the callback
74499a2dd95SBruce Richardson 		 * function, we have nothing to do here */
74599a2dd95SBruce Richardson 		if (priv_timer[lcore_id].updated == 1)
74699a2dd95SBruce Richardson 			continue;
74799a2dd95SBruce Richardson 
74899a2dd95SBruce Richardson 		if (tim->period == 0) {
74999a2dd95SBruce Richardson 			/* remove from done list and mark timer as stopped */
75099a2dd95SBruce Richardson 			status.state = RTE_TIMER_STOP;
75199a2dd95SBruce Richardson 			status.owner = RTE_TIMER_NO_OWNER;
75299a2dd95SBruce Richardson 			/* The "RELEASE" ordering guarantees the memory
75399a2dd95SBruce Richardson 			 * operations above the status update are observed
75499a2dd95SBruce Richardson 			 * before the update by all threads
75599a2dd95SBruce Richardson 			 */
75622c0353aSTyler Retzlaff 			rte_atomic_store_explicit(&tim->status.u32, status.u32,
75722c0353aSTyler Retzlaff 				rte_memory_order_release);
75899a2dd95SBruce Richardson 		}
75999a2dd95SBruce Richardson 		else {
76099a2dd95SBruce Richardson 			/* keep it in list and mark timer as pending */
76199a2dd95SBruce Richardson 			rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
76299a2dd95SBruce Richardson 			status.state = RTE_TIMER_PENDING;
76399a2dd95SBruce Richardson 			__TIMER_STAT_ADD(priv_timer, pending, 1);
76499a2dd95SBruce Richardson 			status.owner = (int16_t)lcore_id;
76599a2dd95SBruce Richardson 			/* The "RELEASE" ordering guarantees the memory
76699a2dd95SBruce Richardson 			 * operations above the status update are observed
76799a2dd95SBruce Richardson 			 * before the update by all threads
76899a2dd95SBruce Richardson 			 */
76922c0353aSTyler Retzlaff 			rte_atomic_store_explicit(&tim->status.u32, status.u32,
77022c0353aSTyler Retzlaff 				rte_memory_order_release);
77199a2dd95SBruce Richardson 			__rte_timer_reset(tim, tim->expire + tim->period,
77299a2dd95SBruce Richardson 				tim->period, lcore_id, tim->f, tim->arg, 1,
77399a2dd95SBruce Richardson 				timer_data);
77499a2dd95SBruce Richardson 			rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
77599a2dd95SBruce Richardson 		}
77699a2dd95SBruce Richardson 	}
77799a2dd95SBruce Richardson 	priv_timer[lcore_id].running_tim = NULL;
77899a2dd95SBruce Richardson }
77999a2dd95SBruce Richardson 
78099a2dd95SBruce Richardson int
rte_timer_manage(void)78199a2dd95SBruce Richardson rte_timer_manage(void)
78299a2dd95SBruce Richardson {
78399a2dd95SBruce Richardson 	struct rte_timer_data *timer_data;
78499a2dd95SBruce Richardson 
78599a2dd95SBruce Richardson 	TIMER_DATA_VALID_GET_OR_ERR_RET(default_data_id, timer_data, -EINVAL);
78699a2dd95SBruce Richardson 
78799a2dd95SBruce Richardson 	__rte_timer_manage(timer_data);
78899a2dd95SBruce Richardson 
78999a2dd95SBruce Richardson 	return 0;
79099a2dd95SBruce Richardson }
79199a2dd95SBruce Richardson 
79299a2dd95SBruce Richardson int
rte_timer_alt_manage(uint32_t timer_data_id,unsigned int * poll_lcores,int nb_poll_lcores,rte_timer_alt_manage_cb_t f)79399a2dd95SBruce Richardson rte_timer_alt_manage(uint32_t timer_data_id,
79499a2dd95SBruce Richardson 		     unsigned int *poll_lcores,
79599a2dd95SBruce Richardson 		     int nb_poll_lcores,
79699a2dd95SBruce Richardson 		     rte_timer_alt_manage_cb_t f)
79799a2dd95SBruce Richardson {
79899a2dd95SBruce Richardson 	unsigned int default_poll_lcores[] = {rte_lcore_id()};
79999a2dd95SBruce Richardson 	union rte_timer_status status;
80099a2dd95SBruce Richardson 	struct rte_timer *tim, *next_tim, **pprev;
80199a2dd95SBruce Richardson 	struct rte_timer *run_first_tims[RTE_MAX_LCORE];
80299a2dd95SBruce Richardson 	unsigned int this_lcore = rte_lcore_id();
80399a2dd95SBruce Richardson 	struct rte_timer *prev[MAX_SKIPLIST_DEPTH + 1];
80499a2dd95SBruce Richardson 	uint64_t cur_time;
80599a2dd95SBruce Richardson 	int i, j, ret;
80699a2dd95SBruce Richardson 	int nb_runlists = 0;
80799a2dd95SBruce Richardson 	struct rte_timer_data *data;
80899a2dd95SBruce Richardson 	struct priv_timer *privp;
80999a2dd95SBruce Richardson 	uint32_t poll_lcore;
81099a2dd95SBruce Richardson 
81199a2dd95SBruce Richardson 	TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, data, -EINVAL);
81299a2dd95SBruce Richardson 
81399a2dd95SBruce Richardson 	/* timer manager only runs on EAL thread with valid lcore_id */
81499a2dd95SBruce Richardson 	assert(this_lcore < RTE_MAX_LCORE);
81599a2dd95SBruce Richardson 
81699a2dd95SBruce Richardson 	__TIMER_STAT_ADD(data->priv_timer, manage, 1);
81799a2dd95SBruce Richardson 
81899a2dd95SBruce Richardson 	if (poll_lcores == NULL) {
81999a2dd95SBruce Richardson 		poll_lcores = default_poll_lcores;
82099a2dd95SBruce Richardson 		nb_poll_lcores = RTE_DIM(default_poll_lcores);
82199a2dd95SBruce Richardson 	}
82299a2dd95SBruce Richardson 
82399a2dd95SBruce Richardson 	for (i = 0; i < nb_poll_lcores; i++) {
82499a2dd95SBruce Richardson 		poll_lcore = poll_lcores[i];
82599a2dd95SBruce Richardson 		privp = &data->priv_timer[poll_lcore];
82699a2dd95SBruce Richardson 
82799a2dd95SBruce Richardson 		/* optimize for the case where per-cpu list is empty */
82899a2dd95SBruce Richardson 		if (privp->pending_head.sl_next[0] == NULL)
82999a2dd95SBruce Richardson 			continue;
83099a2dd95SBruce Richardson 		cur_time = rte_get_timer_cycles();
83199a2dd95SBruce Richardson 
83299a2dd95SBruce Richardson #ifdef RTE_ARCH_64
83399a2dd95SBruce Richardson 		/* on 64-bit the value cached in the pending_head.expired will
83499a2dd95SBruce Richardson 		 * be updated atomically, so we can consult that for a quick
83599a2dd95SBruce Richardson 		 * check here outside the lock
83699a2dd95SBruce Richardson 		 */
83799a2dd95SBruce Richardson 		if (likely(privp->pending_head.expire > cur_time))
83899a2dd95SBruce Richardson 			continue;
83999a2dd95SBruce Richardson #endif
84099a2dd95SBruce Richardson 
84199a2dd95SBruce Richardson 		/* browse ordered list, add expired timers in 'expired' list */
84299a2dd95SBruce Richardson 		rte_spinlock_lock(&privp->list_lock);
84399a2dd95SBruce Richardson 
84499a2dd95SBruce Richardson 		/* if nothing to do just unlock and return */
84599a2dd95SBruce Richardson 		if (privp->pending_head.sl_next[0] == NULL ||
84699a2dd95SBruce Richardson 		    privp->pending_head.sl_next[0]->expire > cur_time) {
84799a2dd95SBruce Richardson 			rte_spinlock_unlock(&privp->list_lock);
84899a2dd95SBruce Richardson 			continue;
84999a2dd95SBruce Richardson 		}
85099a2dd95SBruce Richardson 
85199a2dd95SBruce Richardson 		/* save start of list of expired timers */
85299a2dd95SBruce Richardson 		tim = privp->pending_head.sl_next[0];
85399a2dd95SBruce Richardson 
85499a2dd95SBruce Richardson 		/* break the existing list at current time point */
85599a2dd95SBruce Richardson 		timer_get_prev_entries(cur_time, poll_lcore, prev,
85699a2dd95SBruce Richardson 				       data->priv_timer);
85799a2dd95SBruce Richardson 		for (j = privp->curr_skiplist_depth - 1; j >= 0; j--) {
85899a2dd95SBruce Richardson 			if (prev[j] == &privp->pending_head)
85999a2dd95SBruce Richardson 				continue;
86099a2dd95SBruce Richardson 			privp->pending_head.sl_next[j] =
86199a2dd95SBruce Richardson 				prev[j]->sl_next[j];
86299a2dd95SBruce Richardson 			if (prev[j]->sl_next[j] == NULL)
86399a2dd95SBruce Richardson 				privp->curr_skiplist_depth--;
86499a2dd95SBruce Richardson 
86599a2dd95SBruce Richardson 			prev[j]->sl_next[j] = NULL;
86699a2dd95SBruce Richardson 		}
86799a2dd95SBruce Richardson 
86899a2dd95SBruce Richardson 		/* transition run-list from PENDING to RUNNING */
86999a2dd95SBruce Richardson 		run_first_tims[nb_runlists] = tim;
87099a2dd95SBruce Richardson 		pprev = &run_first_tims[nb_runlists];
87199a2dd95SBruce Richardson 		nb_runlists++;
87299a2dd95SBruce Richardson 
87399a2dd95SBruce Richardson 		for ( ; tim != NULL; tim = next_tim) {
87499a2dd95SBruce Richardson 			next_tim = tim->sl_next[0];
87599a2dd95SBruce Richardson 
87699a2dd95SBruce Richardson 			ret = timer_set_running_state(tim);
87799a2dd95SBruce Richardson 			if (likely(ret == 0)) {
87899a2dd95SBruce Richardson 				pprev = &tim->sl_next[0];
87999a2dd95SBruce Richardson 			} else {
88099a2dd95SBruce Richardson 				/* another core is trying to re-config this one,
88199a2dd95SBruce Richardson 				 * remove it from local expired list
88299a2dd95SBruce Richardson 				 */
88399a2dd95SBruce Richardson 				*pprev = next_tim;
88499a2dd95SBruce Richardson 			}
88599a2dd95SBruce Richardson 		}
88699a2dd95SBruce Richardson 
88799a2dd95SBruce Richardson 		/* update the next to expire timer value */
88899a2dd95SBruce Richardson 		privp->pending_head.expire =
88999a2dd95SBruce Richardson 		    (privp->pending_head.sl_next[0] == NULL) ? 0 :
89099a2dd95SBruce Richardson 			privp->pending_head.sl_next[0]->expire;
89199a2dd95SBruce Richardson 
89299a2dd95SBruce Richardson 		rte_spinlock_unlock(&privp->list_lock);
89399a2dd95SBruce Richardson 	}
89499a2dd95SBruce Richardson 
89599a2dd95SBruce Richardson 	/* Now process the run lists */
89699a2dd95SBruce Richardson 	while (1) {
89799a2dd95SBruce Richardson 		bool done = true;
89899a2dd95SBruce Richardson 		uint64_t min_expire = UINT64_MAX;
89999a2dd95SBruce Richardson 		int min_idx = 0;
90099a2dd95SBruce Richardson 
90199a2dd95SBruce Richardson 		/* Find the next oldest timer to process */
90299a2dd95SBruce Richardson 		for (i = 0; i < nb_runlists; i++) {
90399a2dd95SBruce Richardson 			tim = run_first_tims[i];
90499a2dd95SBruce Richardson 
90599a2dd95SBruce Richardson 			if (tim != NULL && tim->expire < min_expire) {
90699a2dd95SBruce Richardson 				min_expire = tim->expire;
90799a2dd95SBruce Richardson 				min_idx = i;
90899a2dd95SBruce Richardson 				done = false;
90999a2dd95SBruce Richardson 			}
91099a2dd95SBruce Richardson 		}
91199a2dd95SBruce Richardson 
91299a2dd95SBruce Richardson 		if (done)
91399a2dd95SBruce Richardson 			break;
91499a2dd95SBruce Richardson 
91599a2dd95SBruce Richardson 		tim = run_first_tims[min_idx];
91699a2dd95SBruce Richardson 
91799a2dd95SBruce Richardson 		/* Move down the runlist from which we picked a timer to
91899a2dd95SBruce Richardson 		 * execute
91999a2dd95SBruce Richardson 		 */
92099a2dd95SBruce Richardson 		run_first_tims[min_idx] = run_first_tims[min_idx]->sl_next[0];
92199a2dd95SBruce Richardson 
92299a2dd95SBruce Richardson 		data->priv_timer[this_lcore].updated = 0;
92399a2dd95SBruce Richardson 		data->priv_timer[this_lcore].running_tim = tim;
92499a2dd95SBruce Richardson 
92599a2dd95SBruce Richardson 		/* Call the provided callback function */
92699a2dd95SBruce Richardson 		f(tim);
92799a2dd95SBruce Richardson 
92899a2dd95SBruce Richardson 		__TIMER_STAT_ADD(data->priv_timer, pending, -1);
92999a2dd95SBruce Richardson 
93099a2dd95SBruce Richardson 		/* the timer was stopped or reloaded by the callback
93199a2dd95SBruce Richardson 		 * function, we have nothing to do here
93299a2dd95SBruce Richardson 		 */
93399a2dd95SBruce Richardson 		if (data->priv_timer[this_lcore].updated == 1)
93499a2dd95SBruce Richardson 			continue;
93599a2dd95SBruce Richardson 
93699a2dd95SBruce Richardson 		if (tim->period == 0) {
93799a2dd95SBruce Richardson 			/* remove from done list and mark timer as stopped */
93899a2dd95SBruce Richardson 			status.state = RTE_TIMER_STOP;
93999a2dd95SBruce Richardson 			status.owner = RTE_TIMER_NO_OWNER;
94099a2dd95SBruce Richardson 			/* The "RELEASE" ordering guarantees the memory
94199a2dd95SBruce Richardson 			 * operations above the status update are observed
94299a2dd95SBruce Richardson 			 * before the update by all threads
94399a2dd95SBruce Richardson 			 */
94422c0353aSTyler Retzlaff 			rte_atomic_store_explicit(&tim->status.u32, status.u32,
94522c0353aSTyler Retzlaff 				rte_memory_order_release);
94699a2dd95SBruce Richardson 		} else {
94799a2dd95SBruce Richardson 			/* keep it in list and mark timer as pending */
94899a2dd95SBruce Richardson 			rte_spinlock_lock(
94999a2dd95SBruce Richardson 				&data->priv_timer[this_lcore].list_lock);
95099a2dd95SBruce Richardson 			status.state = RTE_TIMER_PENDING;
95199a2dd95SBruce Richardson 			__TIMER_STAT_ADD(data->priv_timer, pending, 1);
95299a2dd95SBruce Richardson 			status.owner = (int16_t)this_lcore;
95399a2dd95SBruce Richardson 			/* The "RELEASE" ordering guarantees the memory
95499a2dd95SBruce Richardson 			 * operations above the status update are observed
95599a2dd95SBruce Richardson 			 * before the update by all threads
95699a2dd95SBruce Richardson 			 */
95722c0353aSTyler Retzlaff 			rte_atomic_store_explicit(&tim->status.u32, status.u32,
95822c0353aSTyler Retzlaff 				rte_memory_order_release);
95999a2dd95SBruce Richardson 			__rte_timer_reset(tim, tim->expire + tim->period,
96099a2dd95SBruce Richardson 				tim->period, this_lcore, tim->f, tim->arg, 1,
96199a2dd95SBruce Richardson 				data);
96299a2dd95SBruce Richardson 			rte_spinlock_unlock(
96399a2dd95SBruce Richardson 				&data->priv_timer[this_lcore].list_lock);
96499a2dd95SBruce Richardson 		}
96599a2dd95SBruce Richardson 
96699a2dd95SBruce Richardson 		data->priv_timer[this_lcore].running_tim = NULL;
96799a2dd95SBruce Richardson 	}
96899a2dd95SBruce Richardson 
96999a2dd95SBruce Richardson 	return 0;
97099a2dd95SBruce Richardson }
97199a2dd95SBruce Richardson 
97299a2dd95SBruce Richardson /* Walk pending lists, stopping timers and calling user-specified function */
97399a2dd95SBruce Richardson int
rte_timer_stop_all(uint32_t timer_data_id,unsigned int * walk_lcores,int nb_walk_lcores,rte_timer_stop_all_cb_t f,void * f_arg)97499a2dd95SBruce Richardson rte_timer_stop_all(uint32_t timer_data_id, unsigned int *walk_lcores,
97599a2dd95SBruce Richardson 		   int nb_walk_lcores,
97699a2dd95SBruce Richardson 		   rte_timer_stop_all_cb_t f, void *f_arg)
97799a2dd95SBruce Richardson {
97899a2dd95SBruce Richardson 	int i;
97999a2dd95SBruce Richardson 	struct priv_timer *priv_timer;
98099a2dd95SBruce Richardson 	uint32_t walk_lcore;
98199a2dd95SBruce Richardson 	struct rte_timer *tim, *next_tim;
98299a2dd95SBruce Richardson 	struct rte_timer_data *timer_data;
98399a2dd95SBruce Richardson 
98499a2dd95SBruce Richardson 	TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
98599a2dd95SBruce Richardson 
98699a2dd95SBruce Richardson 	for (i = 0; i < nb_walk_lcores; i++) {
98799a2dd95SBruce Richardson 		walk_lcore = walk_lcores[i];
98899a2dd95SBruce Richardson 		priv_timer = &timer_data->priv_timer[walk_lcore];
98999a2dd95SBruce Richardson 
99099a2dd95SBruce Richardson 		for (tim = priv_timer->pending_head.sl_next[0];
99199a2dd95SBruce Richardson 		     tim != NULL;
99299a2dd95SBruce Richardson 		     tim = next_tim) {
99399a2dd95SBruce Richardson 			next_tim = tim->sl_next[0];
99499a2dd95SBruce Richardson 
995eb63c85dSNaga Harish K S V 			__rte_timer_stop(tim, timer_data);
99699a2dd95SBruce Richardson 
99799a2dd95SBruce Richardson 			if (f)
99899a2dd95SBruce Richardson 				f(tim, f_arg);
99999a2dd95SBruce Richardson 		}
100099a2dd95SBruce Richardson 	}
100199a2dd95SBruce Richardson 
100299a2dd95SBruce Richardson 	return 0;
100399a2dd95SBruce Richardson }
100499a2dd95SBruce Richardson 
100599a2dd95SBruce Richardson int64_t
rte_timer_next_ticks(void)100699a2dd95SBruce Richardson rte_timer_next_ticks(void)
100799a2dd95SBruce Richardson {
100899a2dd95SBruce Richardson 	unsigned int lcore_id = rte_lcore_id();
100999a2dd95SBruce Richardson 	struct rte_timer_data *timer_data;
101099a2dd95SBruce Richardson 	struct priv_timer *priv_timer;
101199a2dd95SBruce Richardson 	const struct rte_timer *tm;
101299a2dd95SBruce Richardson 	uint64_t cur_time;
101399a2dd95SBruce Richardson 	int64_t left = -ENOENT;
101499a2dd95SBruce Richardson 
101599a2dd95SBruce Richardson 	TIMER_DATA_VALID_GET_OR_ERR_RET(default_data_id, timer_data, -EINVAL);
101699a2dd95SBruce Richardson 
101799a2dd95SBruce Richardson 	priv_timer = timer_data->priv_timer;
101899a2dd95SBruce Richardson 	cur_time = rte_get_timer_cycles();
101999a2dd95SBruce Richardson 
102099a2dd95SBruce Richardson 	rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
102199a2dd95SBruce Richardson 	tm = priv_timer[lcore_id].pending_head.sl_next[0];
102299a2dd95SBruce Richardson 	if (tm) {
102399a2dd95SBruce Richardson 		left = tm->expire - cur_time;
102499a2dd95SBruce Richardson 		if (left < 0)
102599a2dd95SBruce Richardson 			left = 0;
102699a2dd95SBruce Richardson 	}
102799a2dd95SBruce Richardson 	rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
102899a2dd95SBruce Richardson 
102999a2dd95SBruce Richardson 	return left;
103099a2dd95SBruce Richardson }
103199a2dd95SBruce Richardson 
103299a2dd95SBruce Richardson /* dump statistics about timers */
103399a2dd95SBruce Richardson static void
__rte_timer_dump_stats(struct rte_timer_data * timer_data __rte_unused,FILE * f)103499a2dd95SBruce Richardson __rte_timer_dump_stats(struct rte_timer_data *timer_data __rte_unused, FILE *f)
103599a2dd95SBruce Richardson {
103699a2dd95SBruce Richardson #ifdef RTE_LIBRTE_TIMER_DEBUG
103799a2dd95SBruce Richardson 	struct rte_timer_debug_stats sum;
103899a2dd95SBruce Richardson 	unsigned lcore_id;
103999a2dd95SBruce Richardson 	struct priv_timer *priv_timer = timer_data->priv_timer;
104099a2dd95SBruce Richardson 
104199a2dd95SBruce Richardson 	memset(&sum, 0, sizeof(sum));
104299a2dd95SBruce Richardson 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
104399a2dd95SBruce Richardson 		sum.reset += priv_timer[lcore_id].stats.reset;
104499a2dd95SBruce Richardson 		sum.stop += priv_timer[lcore_id].stats.stop;
104599a2dd95SBruce Richardson 		sum.manage += priv_timer[lcore_id].stats.manage;
104699a2dd95SBruce Richardson 		sum.pending += priv_timer[lcore_id].stats.pending;
104799a2dd95SBruce Richardson 	}
104899a2dd95SBruce Richardson 	fprintf(f, "Timer statistics:\n");
104999a2dd95SBruce Richardson 	fprintf(f, "  reset = %"PRIu64"\n", sum.reset);
105099a2dd95SBruce Richardson 	fprintf(f, "  stop = %"PRIu64"\n", sum.stop);
105199a2dd95SBruce Richardson 	fprintf(f, "  manage = %"PRIu64"\n", sum.manage);
105299a2dd95SBruce Richardson 	fprintf(f, "  pending = %"PRIu64"\n", sum.pending);
105399a2dd95SBruce Richardson #else
105499a2dd95SBruce Richardson 	fprintf(f, "No timer statistics, RTE_LIBRTE_TIMER_DEBUG is disabled\n");
105599a2dd95SBruce Richardson #endif
105699a2dd95SBruce Richardson }
105799a2dd95SBruce Richardson 
105899a2dd95SBruce Richardson int
rte_timer_dump_stats(FILE * f)105999a2dd95SBruce Richardson rte_timer_dump_stats(FILE *f)
106099a2dd95SBruce Richardson {
106199a2dd95SBruce Richardson 	return rte_timer_alt_dump_stats(default_data_id, f);
106299a2dd95SBruce Richardson }
106399a2dd95SBruce Richardson 
106499a2dd95SBruce Richardson int
rte_timer_alt_dump_stats(uint32_t timer_data_id __rte_unused,FILE * f)106599a2dd95SBruce Richardson rte_timer_alt_dump_stats(uint32_t timer_data_id __rte_unused, FILE *f)
106699a2dd95SBruce Richardson {
106799a2dd95SBruce Richardson 	struct rte_timer_data *timer_data;
106899a2dd95SBruce Richardson 
106999a2dd95SBruce Richardson 	TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
107099a2dd95SBruce Richardson 
107199a2dd95SBruce Richardson 	__rte_timer_dump_stats(timer_data, f);
107299a2dd95SBruce Richardson 
107399a2dd95SBruce Richardson 	return 0;
107499a2dd95SBruce Richardson }
1075