xref: /dpdk/lib/eventdev/rte_event_timer_adapter.c (revision ed88c5a5e48e73ce15e9af0200307d9ff4cd6b36)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation.
3  * All rights reserved.
4  */
5 
6 #include <ctype.h>
7 #include <string.h>
8 #include <inttypes.h>
9 #include <stdbool.h>
10 #include <stdlib.h>
11 
12 #include <rte_memzone.h>
13 #include <rte_errno.h>
14 #include <rte_malloc.h>
15 #include <rte_mempool.h>
16 #include <rte_common.h>
17 #include <rte_timer.h>
18 #include <rte_service_component.h>
19 #include <rte_telemetry.h>
20 
21 #include "event_timer_adapter_pmd.h"
22 #include "eventdev_pmd.h"
23 #include "rte_event_timer_adapter.h"
24 #include "rte_eventdev.h"
25 #include "eventdev_trace.h"
26 
27 #define DATA_MZ_NAME_MAX_LEN 64
28 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
29 
30 RTE_LOG_REGISTER_SUFFIX(evtim_logtype, adapter.timer, NOTICE);
31 RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
32 RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
33 
34 static struct rte_event_timer_adapter *adapters;
35 
36 static const struct event_timer_adapter_ops swtim_ops;
37 
38 #define EVTIM_LOG(level, logtype, ...) \
39 	rte_log(RTE_LOG_ ## level, logtype, \
40 		RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
41 			"\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
42 
43 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
44 
45 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
46 #define EVTIM_LOG_DBG(...) \
47 	EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
48 #define EVTIM_BUF_LOG_DBG(...) \
49 	EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
50 #define EVTIM_SVC_LOG_DBG(...) \
51 	EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
52 #else
53 #define EVTIM_LOG_DBG(...) (void)0
54 #define EVTIM_BUF_LOG_DBG(...) (void)0
55 #define EVTIM_SVC_LOG_DBG(...) (void)0
56 #endif
57 
58 static inline enum rte_timer_type
59 get_timer_type(const struct rte_event_timer_adapter *adapter)
60 {
61 	return (adapter->data->conf.flags &
62 			RTE_EVENT_TIMER_ADAPTER_F_PERIODIC) ?
63 			PERIODICAL : SINGLE;
64 }
65 
66 static int
67 default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
68 		     void *conf_arg)
69 {
70 	struct rte_event_timer_adapter *adapter;
71 	struct rte_eventdev *dev;
72 	struct rte_event_dev_config dev_conf;
73 	struct rte_event_port_conf *port_conf, def_port_conf = {0};
74 	int started;
75 	uint8_t port_id;
76 	uint8_t dev_id;
77 	int ret;
78 
79 	RTE_SET_USED(event_dev_id);
80 
81 	adapter = &adapters[id];
82 	dev = &rte_eventdevs[adapter->data->event_dev_id];
83 	dev_id = dev->data->dev_id;
84 	dev_conf = dev->data->dev_conf;
85 
86 	started = dev->data->dev_started;
87 	if (started)
88 		rte_event_dev_stop(dev_id);
89 
90 	port_id = dev_conf.nb_event_ports;
91 	dev_conf.nb_event_ports += 1;
92 	ret = rte_event_dev_configure(dev_id, &dev_conf);
93 	if (ret < 0) {
94 		EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
95 		if (started)
96 			if (rte_event_dev_start(dev_id))
97 				return -EIO;
98 
99 		return ret;
100 	}
101 
102 	if (conf_arg != NULL)
103 		port_conf = conf_arg;
104 	else {
105 		port_conf = &def_port_conf;
106 		ret = rte_event_port_default_conf_get(dev_id, port_id,
107 						      port_conf);
108 		if (ret < 0)
109 			return ret;
110 	}
111 
112 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
113 	if (ret < 0) {
114 		EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
115 			      port_id, dev_id);
116 		return ret;
117 	}
118 
119 	*event_port_id = port_id;
120 
121 	if (started)
122 		ret = rte_event_dev_start(dev_id);
123 
124 	return ret;
125 }
126 
127 struct rte_event_timer_adapter *
128 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
129 {
130 	return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
131 						  NULL);
132 }
133 
134 struct rte_event_timer_adapter *
135 rte_event_timer_adapter_create_ext(
136 		const struct rte_event_timer_adapter_conf *conf,
137 		rte_event_timer_adapter_port_conf_cb_t conf_cb,
138 		void *conf_arg)
139 {
140 	uint16_t adapter_id;
141 	struct rte_event_timer_adapter *adapter;
142 	const struct rte_memzone *mz;
143 	char mz_name[DATA_MZ_NAME_MAX_LEN];
144 	int n, ret;
145 	struct rte_eventdev *dev;
146 
147 	if (adapters == NULL) {
148 		adapters = rte_zmalloc("Eventdev",
149 				       sizeof(struct rte_event_timer_adapter) *
150 					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
151 				       RTE_CACHE_LINE_SIZE);
152 		if (adapters == NULL) {
153 			rte_errno = ENOMEM;
154 			return NULL;
155 		}
156 	}
157 
158 	if (conf == NULL) {
159 		rte_errno = EINVAL;
160 		return NULL;
161 	}
162 
163 	/* Check eventdev ID */
164 	if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
165 		rte_errno = EINVAL;
166 		return NULL;
167 	}
168 	dev = &rte_eventdevs[conf->event_dev_id];
169 
170 	adapter_id = conf->timer_adapter_id;
171 
172 	/* Check that adapter_id is in range */
173 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
174 		rte_errno = EINVAL;
175 		return NULL;
176 	}
177 
178 	/* Check adapter ID not already allocated */
179 	adapter = &adapters[adapter_id];
180 	if (adapter->allocated) {
181 		rte_errno = EEXIST;
182 		return NULL;
183 	}
184 
185 	/* Create shared data area. */
186 	n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
187 	if (n >= (int)sizeof(mz_name)) {
188 		rte_errno = EINVAL;
189 		return NULL;
190 	}
191 	mz = rte_memzone_reserve(mz_name,
192 				 sizeof(struct rte_event_timer_adapter_data),
193 				 conf->socket_id, 0);
194 	if (mz == NULL)
195 		/* rte_errno set by rte_memzone_reserve */
196 		return NULL;
197 
198 	adapter->data = mz->addr;
199 	memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
200 
201 	adapter->data->mz = mz;
202 	adapter->data->event_dev_id = conf->event_dev_id;
203 	adapter->data->id = adapter_id;
204 	adapter->data->socket_id = conf->socket_id;
205 	adapter->data->conf = *conf;  /* copy conf structure */
206 
207 	/* Query eventdev PMD for timer adapter capabilities and ops */
208 	if (dev->dev_ops->timer_adapter_caps_get) {
209 		ret = dev->dev_ops->timer_adapter_caps_get(dev,
210 				adapter->data->conf.flags,
211 				&adapter->data->caps, &adapter->ops);
212 		if (ret < 0) {
213 			rte_errno = -ret;
214 			goto free_memzone;
215 		}
216 	}
217 
218 	if (!(adapter->data->caps &
219 	      RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
220 		FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, EINVAL);
221 		ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
222 			      &adapter->data->event_port_id, conf_arg);
223 		if (ret < 0) {
224 			rte_errno = -ret;
225 			goto free_memzone;
226 		}
227 	}
228 
229 	/* If eventdev PMD did not provide ops, use default software
230 	 * implementation.
231 	 */
232 	if (adapter->ops == NULL)
233 		adapter->ops = &swtim_ops;
234 
235 	/* Allow driver to do some setup */
236 	FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, ENOTSUP);
237 	ret = adapter->ops->init(adapter);
238 	if (ret < 0) {
239 		rte_errno = -ret;
240 		goto free_memzone;
241 	}
242 
243 	/* Set fast-path function pointers */
244 	adapter->arm_burst = adapter->ops->arm_burst;
245 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
246 	adapter->cancel_burst = adapter->ops->cancel_burst;
247 
248 	adapter->allocated = 1;
249 
250 	rte_eventdev_trace_timer_adapter_create(adapter_id, adapter, conf,
251 		conf_cb);
252 	return adapter;
253 
254 free_memzone:
255 	rte_memzone_free(adapter->data->mz);
256 	return NULL;
257 }
258 
259 int
260 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
261 		struct rte_event_timer_adapter_info *adapter_info)
262 {
263 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
264 
265 	if (adapter->ops->get_info)
266 		/* let driver set values it knows */
267 		adapter->ops->get_info(adapter, adapter_info);
268 
269 	/* Set common values */
270 	adapter_info->conf = adapter->data->conf;
271 	adapter_info->event_dev_port_id = adapter->data->event_port_id;
272 	adapter_info->caps = adapter->data->caps;
273 
274 	return 0;
275 }
276 
277 int
278 rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
279 {
280 	int ret;
281 
282 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
283 	FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
284 
285 	if (adapter->data->started) {
286 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" already started",
287 			      adapter->data->id);
288 		return -EALREADY;
289 	}
290 
291 	ret = adapter->ops->start(adapter);
292 	if (ret < 0)
293 		return ret;
294 
295 	adapter->data->started = 1;
296 	rte_eventdev_trace_timer_adapter_start(adapter);
297 	return 0;
298 }
299 
300 int
301 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
302 {
303 	int ret;
304 
305 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
306 	FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
307 
308 	if (adapter->data->started == 0) {
309 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
310 			      adapter->data->id);
311 		return 0;
312 	}
313 
314 	ret = adapter->ops->stop(adapter);
315 	if (ret < 0)
316 		return ret;
317 
318 	adapter->data->started = 0;
319 	rte_eventdev_trace_timer_adapter_stop(adapter);
320 	return 0;
321 }
322 
323 struct rte_event_timer_adapter *
324 rte_event_timer_adapter_lookup(uint16_t adapter_id)
325 {
326 	char name[DATA_MZ_NAME_MAX_LEN];
327 	const struct rte_memzone *mz;
328 	struct rte_event_timer_adapter_data *data;
329 	struct rte_event_timer_adapter *adapter;
330 	int ret;
331 	struct rte_eventdev *dev;
332 
333 	if (adapters == NULL) {
334 		adapters = rte_zmalloc("Eventdev",
335 				       sizeof(struct rte_event_timer_adapter) *
336 					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
337 				       RTE_CACHE_LINE_SIZE);
338 		if (adapters == NULL) {
339 			rte_errno = ENOMEM;
340 			return NULL;
341 		}
342 	}
343 
344 	if (adapters[adapter_id].allocated)
345 		return &adapters[adapter_id]; /* Adapter is already loaded */
346 
347 	snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
348 	mz = rte_memzone_lookup(name);
349 	if (mz == NULL) {
350 		rte_errno = ENOENT;
351 		return NULL;
352 	}
353 
354 	data = mz->addr;
355 
356 	adapter = &adapters[data->id];
357 	adapter->data = data;
358 
359 	dev = &rte_eventdevs[adapter->data->event_dev_id];
360 
361 	/* Query eventdev PMD for timer adapter capabilities and ops */
362 	if (dev->dev_ops->timer_adapter_caps_get) {
363 		ret = dev->dev_ops->timer_adapter_caps_get(dev,
364 				adapter->data->conf.flags,
365 				&adapter->data->caps, &adapter->ops);
366 		if (ret < 0) {
367 			rte_errno = EINVAL;
368 			return NULL;
369 		}
370 	}
371 
372 	/* If eventdev PMD did not provide ops, use default software
373 	 * implementation.
374 	 */
375 	if (adapter->ops == NULL)
376 		adapter->ops = &swtim_ops;
377 
378 	/* Set fast-path function pointers */
379 	adapter->arm_burst = adapter->ops->arm_burst;
380 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
381 	adapter->cancel_burst = adapter->ops->cancel_burst;
382 
383 	adapter->allocated = 1;
384 
385 	return adapter;
386 }
387 
388 int
389 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
390 {
391 	int i, ret;
392 
393 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
394 	FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
395 
396 	if (adapter->data->started == 1) {
397 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
398 			      "before freeing", adapter->data->id);
399 		return -EBUSY;
400 	}
401 
402 	/* free impl priv data */
403 	ret = adapter->ops->uninit(adapter);
404 	if (ret < 0)
405 		return ret;
406 
407 	/* free shared data area */
408 	ret = rte_memzone_free(adapter->data->mz);
409 	if (ret < 0)
410 		return ret;
411 
412 	adapter->data = NULL;
413 	adapter->allocated = 0;
414 
415 	ret = 0;
416 	for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++)
417 		if (adapters[i].allocated)
418 			ret = adapters[i].allocated;
419 
420 	if (!ret) {
421 		rte_free(adapters);
422 		adapters = NULL;
423 	}
424 
425 	rte_eventdev_trace_timer_adapter_free(adapter);
426 	return 0;
427 }
428 
429 int
430 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
431 				       uint32_t *service_id)
432 {
433 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
434 
435 	if (adapter->data->service_inited && service_id != NULL)
436 		*service_id = adapter->data->service_id;
437 
438 	return adapter->data->service_inited ? 0 : -ESRCH;
439 }
440 
441 int
442 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
443 				  struct rte_event_timer_adapter_stats *stats)
444 {
445 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
446 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
447 	if (stats == NULL)
448 		return -EINVAL;
449 
450 	return adapter->ops->stats_get(adapter, stats);
451 }
452 
453 int
454 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
455 {
456 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
457 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
458 	return adapter->ops->stats_reset(adapter);
459 }
460 
461 /*
462  * Software event timer adapter buffer helper functions
463  */
464 
465 #define NSECPERSEC 1E9
466 
467 /* Optimizations used to index into the buffer require that the buffer size
468  * be a power of 2.
469  */
470 #define EVENT_BUFFER_SZ 4096
471 #define EVENT_BUFFER_BATCHSZ 32
472 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
473 
474 #define EXP_TIM_BUF_SZ 128
475 
476 struct event_buffer {
477 	size_t head;
478 	size_t tail;
479 	struct rte_event events[EVENT_BUFFER_SZ];
480 } __rte_cache_aligned;
481 
482 static inline bool
483 event_buffer_full(struct event_buffer *bufp)
484 {
485 	return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
486 }
487 
488 static inline bool
489 event_buffer_batch_ready(struct event_buffer *bufp)
490 {
491 	return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
492 }
493 
494 static void
495 event_buffer_init(struct event_buffer *bufp)
496 {
497 	bufp->head = bufp->tail = 0;
498 	memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
499 }
500 
501 static int
502 event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
503 {
504 	size_t head_idx;
505 	struct rte_event *buf_eventp;
506 
507 	if (event_buffer_full(bufp))
508 		return -1;
509 
510 	/* Instead of modulus, bitwise AND with mask to get head_idx. */
511 	head_idx = bufp->head & EVENT_BUFFER_MASK;
512 	buf_eventp = &bufp->events[head_idx];
513 	rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
514 
515 	/* Wrap automatically when overflow occurs. */
516 	bufp->head++;
517 
518 	return 0;
519 }
520 
521 static void
522 event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
523 		   uint16_t *nb_events_flushed,
524 		   uint16_t *nb_events_inv)
525 {
526 	struct rte_event *events = bufp->events;
527 	size_t head_idx, tail_idx;
528 	uint16_t n = 0;
529 
530 	/* Instead of modulus, bitwise AND with mask to get index. */
531 	head_idx = bufp->head & EVENT_BUFFER_MASK;
532 	tail_idx = bufp->tail & EVENT_BUFFER_MASK;
533 
534 	RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ);
535 
536 	/* Determine the largest contiguous run we can attempt to enqueue to the
537 	 * event device.
538 	 */
539 	if (head_idx > tail_idx)
540 		n = head_idx - tail_idx;
541 	else if (head_idx < tail_idx)
542 		n = EVENT_BUFFER_SZ - tail_idx;
543 	else if (event_buffer_full(bufp))
544 		n = EVENT_BUFFER_SZ - tail_idx;
545 	else {
546 		*nb_events_flushed = 0;
547 		return;
548 	}
549 
550 	n = RTE_MIN(EVENT_BUFFER_BATCHSZ, n);
551 	*nb_events_inv = 0;
552 
553 	*nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
554 						     &events[tail_idx], n);
555 	if (*nb_events_flushed != n) {
556 		if (rte_errno == EINVAL) {
557 			EVTIM_LOG_ERR("failed to enqueue invalid event - "
558 				      "dropping it");
559 			(*nb_events_inv)++;
560 		} else if (rte_errno == ENOSPC)
561 			rte_pause();
562 	}
563 
564 	if (*nb_events_flushed > 0)
565 		EVTIM_BUF_LOG_DBG("enqueued %"PRIu16" timer events to event "
566 				  "device", *nb_events_flushed);
567 
568 	bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
569 }
570 
571 /*
572  * Software event timer adapter implementation
573  */
574 struct swtim {
575 	/* Identifier of service executing timer management logic. */
576 	uint32_t service_id;
577 	/* The cycle count at which the adapter should next tick */
578 	uint64_t next_tick_cycles;
579 	/* The tick resolution used by adapter instance. May have been
580 	 * adjusted from what user requested
581 	 */
582 	uint64_t timer_tick_ns;
583 	/* Maximum timeout in nanoseconds allowed by adapter instance. */
584 	uint64_t max_tmo_ns;
585 	/* Buffered timer expiry events to be enqueued to an event device. */
586 	struct event_buffer buffer;
587 	/* Statistics */
588 	struct rte_event_timer_adapter_stats stats;
589 	/* Mempool of timer objects */
590 	struct rte_mempool *tim_pool;
591 	/* Back pointer for convenience */
592 	struct rte_event_timer_adapter *adapter;
593 	/* Identifier of timer data instance */
594 	uint32_t timer_data_id;
595 	/* Track which cores have actually armed a timer */
596 	struct {
597 		uint16_t v;
598 	} __rte_cache_aligned in_use[RTE_MAX_LCORE];
599 	/* Track which cores' timer lists should be polled */
600 	unsigned int poll_lcores[RTE_MAX_LCORE];
601 	/* The number of lists that should be polled */
602 	int n_poll_lcores;
603 	/* Timers which have expired and can be returned to a mempool */
604 	struct rte_timer *expired_timers[EXP_TIM_BUF_SZ];
605 	/* The number of timers that can be returned to a mempool */
606 	size_t n_expired_timers;
607 };
608 
609 static inline struct swtim *
610 swtim_pmd_priv(const struct rte_event_timer_adapter *adapter)
611 {
612 	return adapter->data->adapter_priv;
613 }
614 
615 static void
616 swtim_callback(struct rte_timer *tim)
617 {
618 	struct rte_event_timer *evtim = tim->arg;
619 	struct rte_event_timer_adapter *adapter;
620 	unsigned int lcore = rte_lcore_id();
621 	struct swtim *sw;
622 	uint16_t nb_evs_flushed = 0;
623 	uint16_t nb_evs_invalid = 0;
624 	uint64_t opaque;
625 	int ret;
626 	int n_lcores;
627 	enum rte_timer_type type;
628 
629 	opaque = evtim->impl_opaque[1];
630 	adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
631 	sw = swtim_pmd_priv(adapter);
632 	type = get_timer_type(adapter);
633 
634 	if (unlikely(sw->in_use[lcore].v == 0)) {
635 		sw->in_use[lcore].v = 1;
636 		n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
637 					     __ATOMIC_RELAXED);
638 		__atomic_store_n(&sw->poll_lcores[n_lcores], lcore,
639 				__ATOMIC_RELAXED);
640 	}
641 
642 	ret = event_buffer_add(&sw->buffer, &evtim->ev);
643 	if (ret < 0) {
644 		if (type == SINGLE) {
645 			/* If event buffer is full, put timer back in list with
646 			 * immediate expiry value, so that we process it again
647 			 * on the next iteration.
648 			 */
649 			ret = rte_timer_alt_reset(sw->timer_data_id, tim, 0,
650 						SINGLE,	lcore, NULL, evtim);
651 			if (ret < 0) {
652 				EVTIM_LOG_DBG("event buffer full, failed to "
653 						"reset timer with immediate "
654 						"expiry value");
655 			} else {
656 				sw->stats.evtim_retry_count++;
657 				EVTIM_LOG_DBG("event buffer full, resetting "
658 						"rte_timer with immediate "
659 						"expiry value");
660 			}
661 		} else {
662 			sw->stats.evtim_drop_count++;
663 		}
664 
665 	} else {
666 		EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
667 
668 		/* Empty the buffer here, if necessary, to free older expired
669 		 * timers only
670 		 */
671 		if (unlikely(sw->n_expired_timers == EXP_TIM_BUF_SZ)) {
672 			rte_mempool_put_bulk(sw->tim_pool,
673 					     (void **)sw->expired_timers,
674 					     sw->n_expired_timers);
675 			sw->n_expired_timers = 0;
676 		}
677 
678 		/* Don't free rte_timer for a periodic event timer until
679 		 * it is cancelled
680 		 */
681 		if (type == SINGLE)
682 			sw->expired_timers[sw->n_expired_timers++] = tim;
683 		sw->stats.evtim_exp_count++;
684 
685 		if (type == SINGLE)
686 			__atomic_store_n(&evtim->state, RTE_EVENT_TIMER_NOT_ARMED,
687 				__ATOMIC_RELEASE);
688 	}
689 
690 	if (event_buffer_batch_ready(&sw->buffer)) {
691 		event_buffer_flush(&sw->buffer,
692 				   adapter->data->event_dev_id,
693 				   adapter->data->event_port_id,
694 				   &nb_evs_flushed,
695 				   &nb_evs_invalid);
696 
697 		sw->stats.ev_enq_count += nb_evs_flushed;
698 		sw->stats.ev_inv_count += nb_evs_invalid;
699 	}
700 }
701 
702 static __rte_always_inline uint64_t
703 get_timeout_cycles(struct rte_event_timer *evtim,
704 		   const struct rte_event_timer_adapter *adapter)
705 {
706 	struct swtim *sw = swtim_pmd_priv(adapter);
707 	uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
708 	return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
709 }
710 
711 /* This function returns true if one or more (adapter) ticks have occurred since
712  * the last time it was called.
713  */
714 static inline bool
715 swtim_did_tick(struct swtim *sw)
716 {
717 	uint64_t cycles_per_adapter_tick, start_cycles;
718 	uint64_t *next_tick_cyclesp;
719 
720 	next_tick_cyclesp = &sw->next_tick_cycles;
721 	cycles_per_adapter_tick = sw->timer_tick_ns *
722 			(rte_get_timer_hz() / NSECPERSEC);
723 	start_cycles = rte_get_timer_cycles();
724 
725 	/* Note: initially, *next_tick_cyclesp == 0, so the clause below will
726 	 * execute, and set things going.
727 	 */
728 
729 	if (start_cycles >= *next_tick_cyclesp) {
730 		/* Snap the current cycle count to the preceding adapter tick
731 		 * boundary.
732 		 */
733 		start_cycles -= start_cycles % cycles_per_adapter_tick;
734 		*next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
735 
736 		return true;
737 	}
738 
739 	return false;
740 }
741 
742 /* Check that event timer timeout value is in range */
743 static __rte_always_inline int
744 check_timeout(struct rte_event_timer *evtim,
745 	      const struct rte_event_timer_adapter *adapter)
746 {
747 	uint64_t tmo_nsec;
748 	struct swtim *sw = swtim_pmd_priv(adapter);
749 
750 	tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
751 	if (tmo_nsec > sw->max_tmo_ns)
752 		return -1;
753 	if (tmo_nsec < sw->timer_tick_ns)
754 		return -2;
755 
756 	return 0;
757 }
758 
759 /* Check that event timer event queue sched type matches destination event queue
760  * sched type
761  */
762 static __rte_always_inline int
763 check_destination_event_queue(struct rte_event_timer *evtim,
764 			      const struct rte_event_timer_adapter *adapter)
765 {
766 	int ret;
767 	uint32_t sched_type;
768 
769 	ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
770 				       evtim->ev.queue_id,
771 				       RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
772 				       &sched_type);
773 
774 	if ((ret == 0 && evtim->ev.sched_type == sched_type) ||
775 	    ret == -EOVERFLOW)
776 		return 0;
777 
778 	return -1;
779 }
780 
781 static int
782 swtim_service_func(void *arg)
783 {
784 	struct rte_event_timer_adapter *adapter = arg;
785 	struct swtim *sw = swtim_pmd_priv(adapter);
786 	uint16_t nb_evs_flushed = 0;
787 	uint16_t nb_evs_invalid = 0;
788 	const uint64_t prior_enq_count = sw->stats.ev_enq_count;
789 
790 	if (swtim_did_tick(sw)) {
791 		rte_timer_alt_manage(sw->timer_data_id,
792 				     sw->poll_lcores,
793 				     sw->n_poll_lcores,
794 				     swtim_callback);
795 
796 		/* Return expired timer objects back to mempool */
797 		rte_mempool_put_bulk(sw->tim_pool, (void **)sw->expired_timers,
798 				     sw->n_expired_timers);
799 		sw->n_expired_timers = 0;
800 
801 		event_buffer_flush(&sw->buffer,
802 				   adapter->data->event_dev_id,
803 				   adapter->data->event_port_id,
804 				   &nb_evs_flushed,
805 				   &nb_evs_invalid);
806 
807 		sw->stats.ev_enq_count += nb_evs_flushed;
808 		sw->stats.ev_inv_count += nb_evs_invalid;
809 		sw->stats.adapter_tick_count++;
810 	}
811 
812 	rte_event_maintain(adapter->data->event_dev_id,
813 			   adapter->data->event_port_id, 0);
814 
815 	return prior_enq_count == sw->stats.ev_enq_count ? -EAGAIN : 0;
816 }
817 
818 /* The adapter initialization function rounds the mempool size up to the next
819  * power of 2, so we can take the difference between that value and what the
820  * user requested, and use the space for caches.  This avoids a scenario where a
821  * user can't arm the number of timers the adapter was configured with because
822  * mempool objects have been lost to caches.
823  *
824  * nb_actual should always be a power of 2, so we can iterate over the powers
825  * of 2 to see what the largest cache size we can use is.
826  */
827 static int
828 compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
829 {
830 	int i;
831 	int size;
832 	int cache_size = 0;
833 
834 	for (i = 0;; i++) {
835 		size = 1 << i;
836 
837 		if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
838 		    size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
839 		    size <= nb_actual / 1.5)
840 			cache_size = size;
841 		else
842 			break;
843 	}
844 
845 	return cache_size;
846 }
847 
848 static int
849 swtim_init(struct rte_event_timer_adapter *adapter)
850 {
851 	int i, ret;
852 	struct swtim *sw;
853 	unsigned int flags;
854 	struct rte_service_spec service;
855 
856 	/* Allocate storage for private data area */
857 #define SWTIM_NAMESIZE 32
858 	char swtim_name[SWTIM_NAMESIZE];
859 	snprintf(swtim_name, SWTIM_NAMESIZE, "swtim_%"PRIu8,
860 			adapter->data->id);
861 	sw = rte_zmalloc_socket(swtim_name, sizeof(*sw), RTE_CACHE_LINE_SIZE,
862 			adapter->data->socket_id);
863 	if (sw == NULL) {
864 		EVTIM_LOG_ERR("failed to allocate space for private data");
865 		rte_errno = ENOMEM;
866 		return -1;
867 	}
868 
869 	/* Connect storage to adapter instance */
870 	adapter->data->adapter_priv = sw;
871 	sw->adapter = adapter;
872 
873 	sw->timer_tick_ns = adapter->data->conf.timer_tick_ns;
874 	sw->max_tmo_ns = adapter->data->conf.max_tmo_ns;
875 
876 	/* Create a timer pool */
877 	char pool_name[SWTIM_NAMESIZE];
878 	snprintf(pool_name, SWTIM_NAMESIZE, "swtim_pool_%"PRIu8,
879 		 adapter->data->id);
880 	/* Optimal mempool size is a power of 2 minus one */
881 	uint64_t nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
882 	int pool_size = nb_timers - 1;
883 	int cache_size = compute_msg_mempool_cache_size(
884 				adapter->data->conf.nb_timers, nb_timers);
885 	flags = 0; /* pool is multi-producer, multi-consumer */
886 	sw->tim_pool = rte_mempool_create(pool_name, pool_size,
887 			sizeof(struct rte_timer), cache_size, 0, NULL, NULL,
888 			NULL, NULL, adapter->data->socket_id, flags);
889 	if (sw->tim_pool == NULL) {
890 		EVTIM_LOG_ERR("failed to create timer object mempool");
891 		rte_errno = ENOMEM;
892 		goto free_alloc;
893 	}
894 
895 	/* Initialize the variables that track in-use timer lists */
896 	for (i = 0; i < RTE_MAX_LCORE; i++)
897 		sw->in_use[i].v = 0;
898 
899 	/* Initialize the timer subsystem and allocate timer data instance */
900 	ret = rte_timer_subsystem_init();
901 	if (ret < 0) {
902 		if (ret != -EALREADY) {
903 			EVTIM_LOG_ERR("failed to initialize timer subsystem");
904 			rte_errno = -ret;
905 			goto free_mempool;
906 		}
907 	}
908 
909 	ret = rte_timer_data_alloc(&sw->timer_data_id);
910 	if (ret < 0) {
911 		EVTIM_LOG_ERR("failed to allocate timer data instance");
912 		rte_errno = -ret;
913 		goto free_mempool;
914 	}
915 
916 	/* Initialize timer event buffer */
917 	event_buffer_init(&sw->buffer);
918 
919 	sw->adapter = adapter;
920 
921 	/* Register a service component to run adapter logic */
922 	memset(&service, 0, sizeof(service));
923 	snprintf(service.name, RTE_SERVICE_NAME_MAX,
924 		 "swtim_svc_%"PRIu8, adapter->data->id);
925 	service.socket_id = adapter->data->socket_id;
926 	service.callback = swtim_service_func;
927 	service.callback_userdata = adapter;
928 	service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
929 	ret = rte_service_component_register(&service, &sw->service_id);
930 	if (ret < 0) {
931 		EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
932 			      ": err = %d", service.name, sw->service_id,
933 			      ret);
934 
935 		rte_errno = ENOSPC;
936 		goto free_mempool;
937 	}
938 
939 	EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
940 		      sw->service_id);
941 
942 	adapter->data->service_id = sw->service_id;
943 	adapter->data->service_inited = 1;
944 
945 	return 0;
946 free_mempool:
947 	rte_mempool_free(sw->tim_pool);
948 free_alloc:
949 	rte_free(sw);
950 	return -1;
951 }
952 
953 static void
954 swtim_free_tim(struct rte_timer *tim, void *arg)
955 {
956 	struct swtim *sw = arg;
957 
958 	rte_mempool_put(sw->tim_pool, tim);
959 }
960 
961 /* Traverse the list of outstanding timers and put them back in the mempool
962  * before freeing the adapter to avoid leaking the memory.
963  */
964 static int
965 swtim_uninit(struct rte_event_timer_adapter *adapter)
966 {
967 	int ret;
968 	struct swtim *sw = swtim_pmd_priv(adapter);
969 
970 	/* Free outstanding timers */
971 	rte_timer_stop_all(sw->timer_data_id,
972 			   sw->poll_lcores,
973 			   sw->n_poll_lcores,
974 			   swtim_free_tim,
975 			   sw);
976 
977 	ret = rte_timer_data_dealloc(sw->timer_data_id);
978 	if (ret < 0) {
979 		EVTIM_LOG_ERR("failed to deallocate timer data instance");
980 		return ret;
981 	}
982 
983 	ret = rte_service_component_unregister(sw->service_id);
984 	if (ret < 0) {
985 		EVTIM_LOG_ERR("failed to unregister service component");
986 		return ret;
987 	}
988 
989 	rte_mempool_free(sw->tim_pool);
990 	rte_free(sw);
991 	adapter->data->adapter_priv = NULL;
992 
993 	return 0;
994 }
995 
996 static inline int32_t
997 get_mapped_count_for_service(uint32_t service_id)
998 {
999 	int32_t core_count, i, mapped_count = 0;
1000 	uint32_t lcore_arr[RTE_MAX_LCORE];
1001 
1002 	core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
1003 
1004 	for (i = 0; i < core_count; i++)
1005 		if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
1006 			mapped_count++;
1007 
1008 	return mapped_count;
1009 }
1010 
1011 static int
1012 swtim_start(const struct rte_event_timer_adapter *adapter)
1013 {
1014 	int mapped_count;
1015 	struct swtim *sw = swtim_pmd_priv(adapter);
1016 
1017 	/* Mapping the service to more than one service core can introduce
1018 	 * delays while one thread is waiting to acquire a lock, so only allow
1019 	 * one core to be mapped to the service.
1020 	 *
1021 	 * Note: the service could be modified such that it spreads cores to
1022 	 * poll over multiple service instances.
1023 	 */
1024 	mapped_count = get_mapped_count_for_service(sw->service_id);
1025 
1026 	if (mapped_count != 1)
1027 		return mapped_count < 1 ? -ENOENT : -ENOTSUP;
1028 
1029 	return rte_service_component_runstate_set(sw->service_id, 1);
1030 }
1031 
1032 static int
1033 swtim_stop(const struct rte_event_timer_adapter *adapter)
1034 {
1035 	int ret;
1036 	struct swtim *sw = swtim_pmd_priv(adapter);
1037 
1038 	ret = rte_service_component_runstate_set(sw->service_id, 0);
1039 	if (ret < 0)
1040 		return ret;
1041 
1042 	/* Wait for the service to complete its final iteration */
1043 	while (rte_service_may_be_active(sw->service_id))
1044 		rte_pause();
1045 
1046 	return 0;
1047 }
1048 
1049 static void
1050 swtim_get_info(const struct rte_event_timer_adapter *adapter,
1051 		struct rte_event_timer_adapter_info *adapter_info)
1052 {
1053 	struct swtim *sw = swtim_pmd_priv(adapter);
1054 	adapter_info->min_resolution_ns = sw->timer_tick_ns;
1055 	adapter_info->max_tmo_ns = sw->max_tmo_ns;
1056 }
1057 
1058 static int
1059 swtim_stats_get(const struct rte_event_timer_adapter *adapter,
1060 		struct rte_event_timer_adapter_stats *stats)
1061 {
1062 	struct swtim *sw = swtim_pmd_priv(adapter);
1063 	*stats = sw->stats; /* structure copy */
1064 	return 0;
1065 }
1066 
1067 static int
1068 swtim_stats_reset(const struct rte_event_timer_adapter *adapter)
1069 {
1070 	struct swtim *sw = swtim_pmd_priv(adapter);
1071 	memset(&sw->stats, 0, sizeof(sw->stats));
1072 	return 0;
1073 }
1074 
1075 static uint16_t
1076 __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1077 		struct rte_event_timer **evtims,
1078 		uint16_t nb_evtims)
1079 {
1080 	int i, ret;
1081 	struct swtim *sw = swtim_pmd_priv(adapter);
1082 	uint32_t lcore_id = rte_lcore_id();
1083 	struct rte_timer *tim, *tims[nb_evtims];
1084 	uint64_t cycles;
1085 	int n_lcores;
1086 	/* Timer list for this lcore is not in use. */
1087 	uint16_t exp_state = 0;
1088 	enum rte_event_timer_state n_state;
1089 	enum rte_timer_type type = SINGLE;
1090 
1091 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1092 	/* Check that the service is running. */
1093 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1094 		rte_errno = EINVAL;
1095 		return 0;
1096 	}
1097 #endif
1098 
1099 	/* Adjust lcore_id if non-EAL thread. Arbitrarily pick the timer list of
1100 	 * the highest lcore to insert such timers into
1101 	 */
1102 	if (lcore_id == LCORE_ID_ANY)
1103 		lcore_id = RTE_MAX_LCORE - 1;
1104 
1105 	/* If this is the first time we're arming an event timer on this lcore,
1106 	 * mark this lcore as "in use"; this will cause the service
1107 	 * function to process the timer list that corresponds to this lcore.
1108 	 * The atomic compare-and-swap operation can prevent the race condition
1109 	 * on in_use flag between multiple non-EAL threads.
1110 	 */
1111 	if (unlikely(__atomic_compare_exchange_n(&sw->in_use[lcore_id].v,
1112 			&exp_state, 1, 0,
1113 			__ATOMIC_RELAXED, __ATOMIC_RELAXED))) {
1114 		EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll",
1115 			      lcore_id);
1116 		n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
1117 					     __ATOMIC_RELAXED);
1118 		__atomic_store_n(&sw->poll_lcores[n_lcores], lcore_id,
1119 				__ATOMIC_RELAXED);
1120 	}
1121 
1122 	ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims,
1123 				   nb_evtims);
1124 	if (ret < 0) {
1125 		rte_errno = ENOSPC;
1126 		return 0;
1127 	}
1128 
1129 	/* update timer type for periodic adapter */
1130 	type = get_timer_type(adapter);
1131 
1132 	for (i = 0; i < nb_evtims; i++) {
1133 		n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1134 		if (n_state == RTE_EVENT_TIMER_ARMED) {
1135 			rte_errno = EALREADY;
1136 			break;
1137 		} else if (!(n_state == RTE_EVENT_TIMER_NOT_ARMED ||
1138 			     n_state == RTE_EVENT_TIMER_CANCELED)) {
1139 			rte_errno = EINVAL;
1140 			break;
1141 		}
1142 
1143 		ret = check_timeout(evtims[i], adapter);
1144 		if (unlikely(ret == -1)) {
1145 			__atomic_store_n(&evtims[i]->state,
1146 					RTE_EVENT_TIMER_ERROR_TOOLATE,
1147 					__ATOMIC_RELAXED);
1148 			rte_errno = EINVAL;
1149 			break;
1150 		} else if (unlikely(ret == -2)) {
1151 			__atomic_store_n(&evtims[i]->state,
1152 					RTE_EVENT_TIMER_ERROR_TOOEARLY,
1153 					__ATOMIC_RELAXED);
1154 			rte_errno = EINVAL;
1155 			break;
1156 		}
1157 
1158 		if (unlikely(check_destination_event_queue(evtims[i],
1159 							   adapter) < 0)) {
1160 			__atomic_store_n(&evtims[i]->state,
1161 					RTE_EVENT_TIMER_ERROR,
1162 					__ATOMIC_RELAXED);
1163 			rte_errno = EINVAL;
1164 			break;
1165 		}
1166 
1167 		tim = tims[i];
1168 		rte_timer_init(tim);
1169 
1170 		evtims[i]->impl_opaque[0] = (uintptr_t)tim;
1171 		evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
1172 
1173 		cycles = get_timeout_cycles(evtims[i], adapter);
1174 		ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
1175 					  type, lcore_id, NULL, evtims[i]);
1176 		if (ret < 0) {
1177 			/* tim was in RUNNING or CONFIG state */
1178 			__atomic_store_n(&evtims[i]->state,
1179 					RTE_EVENT_TIMER_ERROR,
1180 					__ATOMIC_RELEASE);
1181 			break;
1182 		}
1183 
1184 		EVTIM_LOG_DBG("armed an event timer");
1185 		/* RELEASE ordering guarantees the adapter specific value
1186 		 * changes observed before the update of state.
1187 		 */
1188 		__atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_ARMED,
1189 				__ATOMIC_RELEASE);
1190 	}
1191 
1192 	if (i < nb_evtims)
1193 		rte_mempool_put_bulk(sw->tim_pool,
1194 				     (void **)&tims[i], nb_evtims - i);
1195 
1196 	return i;
1197 }
1198 
1199 static uint16_t
1200 swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1201 		struct rte_event_timer **evtims,
1202 		uint16_t nb_evtims)
1203 {
1204 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
1205 }
1206 
1207 static uint16_t
1208 swtim_cancel_burst(const struct rte_event_timer_adapter *adapter,
1209 		   struct rte_event_timer **evtims,
1210 		   uint16_t nb_evtims)
1211 {
1212 	int i, ret;
1213 	struct rte_timer *timp;
1214 	uint64_t opaque;
1215 	struct swtim *sw = swtim_pmd_priv(adapter);
1216 	enum rte_event_timer_state n_state;
1217 
1218 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1219 	/* Check that the service is running. */
1220 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1221 		rte_errno = EINVAL;
1222 		return 0;
1223 	}
1224 #endif
1225 
1226 	for (i = 0; i < nb_evtims; i++) {
1227 		/* Don't modify the event timer state in these cases */
1228 		/* ACQUIRE ordering guarantees the access of implementation
1229 		 * specific opaque data under the correct state.
1230 		 */
1231 		n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1232 		if (n_state == RTE_EVENT_TIMER_CANCELED) {
1233 			rte_errno = EALREADY;
1234 			break;
1235 		} else if (n_state != RTE_EVENT_TIMER_ARMED) {
1236 			rte_errno = EINVAL;
1237 			break;
1238 		}
1239 
1240 		opaque = evtims[i]->impl_opaque[0];
1241 		timp = (struct rte_timer *)(uintptr_t)opaque;
1242 		RTE_ASSERT(timp != NULL);
1243 
1244 		ret = rte_timer_alt_stop(sw->timer_data_id, timp);
1245 		if (ret < 0) {
1246 			/* Timer is running or being configured */
1247 			rte_errno = EAGAIN;
1248 			break;
1249 		}
1250 
1251 		rte_mempool_put(sw->tim_pool, (void **)timp);
1252 
1253 		/* The RELEASE ordering here pairs with atomic ordering
1254 		 * to make sure the state update data observed between
1255 		 * threads.
1256 		 */
1257 		__atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED,
1258 				__ATOMIC_RELEASE);
1259 	}
1260 
1261 	return i;
1262 }
1263 
1264 static uint16_t
1265 swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
1266 			 struct rte_event_timer **evtims,
1267 			 uint64_t timeout_ticks,
1268 			 uint16_t nb_evtims)
1269 {
1270 	int i;
1271 
1272 	for (i = 0; i < nb_evtims; i++)
1273 		evtims[i]->timeout_ticks = timeout_ticks;
1274 
1275 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
1276 }
1277 
1278 static const struct event_timer_adapter_ops swtim_ops = {
1279 	.init = swtim_init,
1280 	.uninit = swtim_uninit,
1281 	.start = swtim_start,
1282 	.stop = swtim_stop,
1283 	.get_info = swtim_get_info,
1284 	.stats_get = swtim_stats_get,
1285 	.stats_reset = swtim_stats_reset,
1286 	.arm_burst = swtim_arm_burst,
1287 	.arm_tmo_tick_burst = swtim_arm_tmo_tick_burst,
1288 	.cancel_burst = swtim_cancel_burst,
1289 };
1290 
1291 static int
1292 handle_ta_info(const char *cmd __rte_unused, const char *params,
1293 		struct rte_tel_data *d)
1294 {
1295 	struct rte_event_timer_adapter_info adapter_info;
1296 	struct rte_event_timer_adapter *adapter;
1297 	uint16_t adapter_id;
1298 	int ret;
1299 
1300 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1301 		return -1;
1302 
1303 	adapter_id = atoi(params);
1304 
1305 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
1306 		EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id);
1307 		return -EINVAL;
1308 	}
1309 
1310 	adapter = &adapters[adapter_id];
1311 
1312 	ret = rte_event_timer_adapter_get_info(adapter, &adapter_info);
1313 	if (ret < 0) {
1314 		EVTIM_LOG_ERR("Failed to get info for timer adapter id %u", adapter_id);
1315 		return ret;
1316 	}
1317 
1318 	rte_tel_data_start_dict(d);
1319 	rte_tel_data_add_dict_u64(d, "timer_adapter_id", adapter_id);
1320 	rte_tel_data_add_dict_u64(d, "min_resolution_ns", adapter_info.min_resolution_ns);
1321 	rte_tel_data_add_dict_u64(d, "max_tmo_ns", adapter_info.max_tmo_ns);
1322 	rte_tel_data_add_dict_u64(d, "event_dev_id", adapter_info.conf.event_dev_id);
1323 	rte_tel_data_add_dict_u64(d, "socket_id", adapter_info.conf.socket_id);
1324 	rte_tel_data_add_dict_u64(d, "clk_src", adapter_info.conf.clk_src);
1325 	rte_tel_data_add_dict_u64(d, "timer_tick_ns", adapter_info.conf.timer_tick_ns);
1326 	rte_tel_data_add_dict_u64(d, "nb_timers", adapter_info.conf.nb_timers);
1327 	rte_tel_data_add_dict_u64(d, "flags", adapter_info.conf.flags);
1328 
1329 	return 0;
1330 }
1331 
1332 static int
1333 handle_ta_stats(const char *cmd __rte_unused, const char *params,
1334 		struct rte_tel_data *d)
1335 {
1336 	struct rte_event_timer_adapter_stats stats;
1337 	struct rte_event_timer_adapter *adapter;
1338 	uint16_t adapter_id;
1339 	int ret;
1340 
1341 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1342 		return -1;
1343 
1344 	adapter_id = atoi(params);
1345 
1346 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
1347 		EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id);
1348 		return -EINVAL;
1349 	}
1350 
1351 	adapter = &adapters[adapter_id];
1352 
1353 	ret = rte_event_timer_adapter_stats_get(adapter, &stats);
1354 	if (ret < 0) {
1355 		EVTIM_LOG_ERR("Failed to get stats for timer adapter id %u", adapter_id);
1356 		return ret;
1357 	}
1358 
1359 	rte_tel_data_start_dict(d);
1360 	rte_tel_data_add_dict_u64(d, "timer_adapter_id", adapter_id);
1361 	rte_tel_data_add_dict_u64(d, "evtim_exp_count", stats.evtim_exp_count);
1362 	rte_tel_data_add_dict_u64(d, "ev_enq_count", stats.ev_enq_count);
1363 	rte_tel_data_add_dict_u64(d, "ev_inv_count", stats.ev_inv_count);
1364 	rte_tel_data_add_dict_u64(d, "evtim_retry_count", stats.evtim_retry_count);
1365 	rte_tel_data_add_dict_u64(d, "adapter_tick_count", stats.adapter_tick_count);
1366 
1367 	return 0;
1368 }
1369 
1370 RTE_INIT(ta_init_telemetry)
1371 {
1372 	rte_telemetry_register_cmd("/eventdev/ta_info",
1373 		handle_ta_info,
1374 		"Returns Timer adapter info. Parameter: Timer adapter id");
1375 
1376 	rte_telemetry_register_cmd("/eventdev/ta_stats",
1377 		handle_ta_stats,
1378 		"Returns Timer adapter stats. Parameter: Timer adapter id");
1379 }
1380