xref: /dpdk/lib/eventdev/rte_event_timer_adapter.c (revision 8f1d23ece06adff5eae9f1b4365bdbbd3abee2b2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation.
3  * All rights reserved.
4  */
5 
6 #include <ctype.h>
7 #include <string.h>
8 #include <inttypes.h>
9 #include <stdbool.h>
10 #include <stdlib.h>
11 
12 #include <rte_memzone.h>
13 #include <rte_errno.h>
14 #include <rte_malloc.h>
15 #include <rte_mempool.h>
16 #include <rte_common.h>
17 #include <rte_timer.h>
18 #include <rte_service_component.h>
19 #include <rte_telemetry.h>
20 
21 #include "event_timer_adapter_pmd.h"
22 #include "eventdev_pmd.h"
23 #include "rte_event_timer_adapter.h"
24 #include "rte_eventdev.h"
25 #include "eventdev_trace.h"
26 
27 #define DATA_MZ_NAME_MAX_LEN 64
28 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
29 
30 RTE_LOG_REGISTER_SUFFIX(evtim_logtype, adapter.timer, NOTICE);
31 RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
32 RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
33 
34 static struct rte_event_timer_adapter *adapters;
35 
36 static const struct event_timer_adapter_ops swtim_ops;
37 
38 #define EVTIM_LOG(level, logtype, ...) \
39 	rte_log(RTE_LOG_ ## level, logtype, \
40 		RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
41 			"\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
42 
43 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
44 
45 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
46 #define EVTIM_LOG_DBG(...) \
47 	EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
48 #define EVTIM_BUF_LOG_DBG(...) \
49 	EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
50 #define EVTIM_SVC_LOG_DBG(...) \
51 	EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
52 #else
53 #define EVTIM_LOG_DBG(...) (void)0
54 #define EVTIM_BUF_LOG_DBG(...) (void)0
55 #define EVTIM_SVC_LOG_DBG(...) (void)0
56 #endif
57 
58 static int
59 default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
60 		     void *conf_arg)
61 {
62 	struct rte_event_timer_adapter *adapter;
63 	struct rte_eventdev *dev;
64 	struct rte_event_dev_config dev_conf;
65 	struct rte_event_port_conf *port_conf, def_port_conf = {0};
66 	int started;
67 	uint8_t port_id;
68 	uint8_t dev_id;
69 	int ret;
70 
71 	RTE_SET_USED(event_dev_id);
72 
73 	adapter = &adapters[id];
74 	dev = &rte_eventdevs[adapter->data->event_dev_id];
75 	dev_id = dev->data->dev_id;
76 	dev_conf = dev->data->dev_conf;
77 
78 	started = dev->data->dev_started;
79 	if (started)
80 		rte_event_dev_stop(dev_id);
81 
82 	port_id = dev_conf.nb_event_ports;
83 	dev_conf.nb_event_ports += 1;
84 	ret = rte_event_dev_configure(dev_id, &dev_conf);
85 	if (ret < 0) {
86 		EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
87 		if (started)
88 			if (rte_event_dev_start(dev_id))
89 				return -EIO;
90 
91 		return ret;
92 	}
93 
94 	if (conf_arg != NULL)
95 		port_conf = conf_arg;
96 	else {
97 		port_conf = &def_port_conf;
98 		ret = rte_event_port_default_conf_get(dev_id, port_id,
99 						      port_conf);
100 		if (ret < 0)
101 			return ret;
102 	}
103 
104 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
105 	if (ret < 0) {
106 		EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
107 			      port_id, dev_id);
108 		return ret;
109 	}
110 
111 	*event_port_id = port_id;
112 
113 	if (started)
114 		ret = rte_event_dev_start(dev_id);
115 
116 	return ret;
117 }
118 
119 struct rte_event_timer_adapter *
120 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
121 {
122 	return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
123 						  NULL);
124 }
125 
126 struct rte_event_timer_adapter *
127 rte_event_timer_adapter_create_ext(
128 		const struct rte_event_timer_adapter_conf *conf,
129 		rte_event_timer_adapter_port_conf_cb_t conf_cb,
130 		void *conf_arg)
131 {
132 	uint16_t adapter_id;
133 	struct rte_event_timer_adapter *adapter;
134 	const struct rte_memzone *mz;
135 	char mz_name[DATA_MZ_NAME_MAX_LEN];
136 	int n, ret;
137 	struct rte_eventdev *dev;
138 
139 	if (adapters == NULL) {
140 		adapters = rte_zmalloc("Eventdev",
141 				       sizeof(struct rte_event_timer_adapter) *
142 					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
143 				       RTE_CACHE_LINE_SIZE);
144 		if (adapters == NULL) {
145 			rte_errno = ENOMEM;
146 			return NULL;
147 		}
148 	}
149 
150 	if (conf == NULL) {
151 		rte_errno = EINVAL;
152 		return NULL;
153 	}
154 
155 	/* Check eventdev ID */
156 	if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
157 		rte_errno = EINVAL;
158 		return NULL;
159 	}
160 	dev = &rte_eventdevs[conf->event_dev_id];
161 
162 	adapter_id = conf->timer_adapter_id;
163 
164 	/* Check that adapter_id is in range */
165 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
166 		rte_errno = EINVAL;
167 		return NULL;
168 	}
169 
170 	/* Check adapter ID not already allocated */
171 	adapter = &adapters[adapter_id];
172 	if (adapter->allocated) {
173 		rte_errno = EEXIST;
174 		return NULL;
175 	}
176 
177 	/* Create shared data area. */
178 	n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
179 	if (n >= (int)sizeof(mz_name)) {
180 		rte_errno = EINVAL;
181 		return NULL;
182 	}
183 	mz = rte_memzone_reserve(mz_name,
184 				 sizeof(struct rte_event_timer_adapter_data),
185 				 conf->socket_id, 0);
186 	if (mz == NULL)
187 		/* rte_errno set by rte_memzone_reserve */
188 		return NULL;
189 
190 	adapter->data = mz->addr;
191 	memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
192 
193 	adapter->data->mz = mz;
194 	adapter->data->event_dev_id = conf->event_dev_id;
195 	adapter->data->id = adapter_id;
196 	adapter->data->socket_id = conf->socket_id;
197 	adapter->data->conf = *conf;  /* copy conf structure */
198 
199 	/* Query eventdev PMD for timer adapter capabilities and ops */
200 	ret = dev->dev_ops->timer_adapter_caps_get(dev,
201 						   adapter->data->conf.flags,
202 						   &adapter->data->caps,
203 						   &adapter->ops);
204 	if (ret < 0) {
205 		rte_errno = -ret;
206 		goto free_memzone;
207 	}
208 
209 	if (!(adapter->data->caps &
210 	      RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
211 		FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, EINVAL);
212 		ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
213 			      &adapter->data->event_port_id, conf_arg);
214 		if (ret < 0) {
215 			rte_errno = -ret;
216 			goto free_memzone;
217 		}
218 	}
219 
220 	/* If eventdev PMD did not provide ops, use default software
221 	 * implementation.
222 	 */
223 	if (adapter->ops == NULL)
224 		adapter->ops = &swtim_ops;
225 
226 	/* Allow driver to do some setup */
227 	FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, ENOTSUP);
228 	ret = adapter->ops->init(adapter);
229 	if (ret < 0) {
230 		rte_errno = -ret;
231 		goto free_memzone;
232 	}
233 
234 	/* Set fast-path function pointers */
235 	adapter->arm_burst = adapter->ops->arm_burst;
236 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
237 	adapter->cancel_burst = adapter->ops->cancel_burst;
238 
239 	adapter->allocated = 1;
240 
241 	rte_eventdev_trace_timer_adapter_create(adapter_id, adapter, conf,
242 		conf_cb);
243 	return adapter;
244 
245 free_memzone:
246 	rte_memzone_free(adapter->data->mz);
247 	return NULL;
248 }
249 
250 int
251 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
252 		struct rte_event_timer_adapter_info *adapter_info)
253 {
254 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
255 
256 	if (adapter->ops->get_info)
257 		/* let driver set values it knows */
258 		adapter->ops->get_info(adapter, adapter_info);
259 
260 	/* Set common values */
261 	adapter_info->conf = adapter->data->conf;
262 	adapter_info->event_dev_port_id = adapter->data->event_port_id;
263 	adapter_info->caps = adapter->data->caps;
264 
265 	return 0;
266 }
267 
268 int
269 rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
270 {
271 	int ret;
272 
273 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
274 	FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
275 
276 	if (adapter->data->started) {
277 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" already started",
278 			      adapter->data->id);
279 		return -EALREADY;
280 	}
281 
282 	ret = adapter->ops->start(adapter);
283 	if (ret < 0)
284 		return ret;
285 
286 	adapter->data->started = 1;
287 	rte_eventdev_trace_timer_adapter_start(adapter);
288 	return 0;
289 }
290 
291 int
292 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
293 {
294 	int ret;
295 
296 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
297 	FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
298 
299 	if (adapter->data->started == 0) {
300 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
301 			      adapter->data->id);
302 		return 0;
303 	}
304 
305 	ret = adapter->ops->stop(adapter);
306 	if (ret < 0)
307 		return ret;
308 
309 	adapter->data->started = 0;
310 	rte_eventdev_trace_timer_adapter_stop(adapter);
311 	return 0;
312 }
313 
314 struct rte_event_timer_adapter *
315 rte_event_timer_adapter_lookup(uint16_t adapter_id)
316 {
317 	char name[DATA_MZ_NAME_MAX_LEN];
318 	const struct rte_memzone *mz;
319 	struct rte_event_timer_adapter_data *data;
320 	struct rte_event_timer_adapter *adapter;
321 	int ret;
322 	struct rte_eventdev *dev;
323 
324 	if (adapters == NULL) {
325 		adapters = rte_zmalloc("Eventdev",
326 				       sizeof(struct rte_event_timer_adapter) *
327 					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
328 				       RTE_CACHE_LINE_SIZE);
329 		if (adapters == NULL) {
330 			rte_errno = ENOMEM;
331 			return NULL;
332 		}
333 	}
334 
335 	if (adapters[adapter_id].allocated)
336 		return &adapters[adapter_id]; /* Adapter is already loaded */
337 
338 	snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
339 	mz = rte_memzone_lookup(name);
340 	if (mz == NULL) {
341 		rte_errno = ENOENT;
342 		return NULL;
343 	}
344 
345 	data = mz->addr;
346 
347 	adapter = &adapters[data->id];
348 	adapter->data = data;
349 
350 	dev = &rte_eventdevs[adapter->data->event_dev_id];
351 
352 	/* Query eventdev PMD for timer adapter capabilities and ops */
353 	ret = dev->dev_ops->timer_adapter_caps_get(dev,
354 						   adapter->data->conf.flags,
355 						   &adapter->data->caps,
356 						   &adapter->ops);
357 	if (ret < 0) {
358 		rte_errno = EINVAL;
359 		return NULL;
360 	}
361 
362 	/* If eventdev PMD did not provide ops, use default software
363 	 * implementation.
364 	 */
365 	if (adapter->ops == NULL)
366 		adapter->ops = &swtim_ops;
367 
368 	/* Set fast-path function pointers */
369 	adapter->arm_burst = adapter->ops->arm_burst;
370 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
371 	adapter->cancel_burst = adapter->ops->cancel_burst;
372 
373 	adapter->allocated = 1;
374 
375 	return adapter;
376 }
377 
378 int
379 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
380 {
381 	int i, ret;
382 
383 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
384 	FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
385 
386 	if (adapter->data->started == 1) {
387 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
388 			      "before freeing", adapter->data->id);
389 		return -EBUSY;
390 	}
391 
392 	/* free impl priv data */
393 	ret = adapter->ops->uninit(adapter);
394 	if (ret < 0)
395 		return ret;
396 
397 	/* free shared data area */
398 	ret = rte_memzone_free(adapter->data->mz);
399 	if (ret < 0)
400 		return ret;
401 
402 	adapter->data = NULL;
403 	adapter->allocated = 0;
404 
405 	ret = 0;
406 	for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++)
407 		if (adapters[i].allocated)
408 			ret = adapters[i].allocated;
409 
410 	if (!ret) {
411 		rte_free(adapters);
412 		adapters = NULL;
413 	}
414 
415 	rte_eventdev_trace_timer_adapter_free(adapter);
416 	return 0;
417 }
418 
419 int
420 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
421 				       uint32_t *service_id)
422 {
423 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
424 
425 	if (adapter->data->service_inited && service_id != NULL)
426 		*service_id = adapter->data->service_id;
427 
428 	return adapter->data->service_inited ? 0 : -ESRCH;
429 }
430 
431 int
432 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
433 				  struct rte_event_timer_adapter_stats *stats)
434 {
435 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
436 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
437 	if (stats == NULL)
438 		return -EINVAL;
439 
440 	return adapter->ops->stats_get(adapter, stats);
441 }
442 
443 int
444 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
445 {
446 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
447 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
448 	return adapter->ops->stats_reset(adapter);
449 }
450 
451 /*
452  * Software event timer adapter buffer helper functions
453  */
454 
455 #define NSECPERSEC 1E9
456 
457 /* Optimizations used to index into the buffer require that the buffer size
458  * be a power of 2.
459  */
460 #define EVENT_BUFFER_SZ 4096
461 #define EVENT_BUFFER_BATCHSZ 32
462 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
463 
464 #define EXP_TIM_BUF_SZ 128
465 
466 struct event_buffer {
467 	size_t head;
468 	size_t tail;
469 	struct rte_event events[EVENT_BUFFER_SZ];
470 } __rte_cache_aligned;
471 
472 static inline bool
473 event_buffer_full(struct event_buffer *bufp)
474 {
475 	return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
476 }
477 
478 static inline bool
479 event_buffer_batch_ready(struct event_buffer *bufp)
480 {
481 	return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
482 }
483 
484 static void
485 event_buffer_init(struct event_buffer *bufp)
486 {
487 	bufp->head = bufp->tail = 0;
488 	memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
489 }
490 
491 static int
492 event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
493 {
494 	size_t head_idx;
495 	struct rte_event *buf_eventp;
496 
497 	if (event_buffer_full(bufp))
498 		return -1;
499 
500 	/* Instead of modulus, bitwise AND with mask to get head_idx. */
501 	head_idx = bufp->head & EVENT_BUFFER_MASK;
502 	buf_eventp = &bufp->events[head_idx];
503 	rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
504 
505 	/* Wrap automatically when overflow occurs. */
506 	bufp->head++;
507 
508 	return 0;
509 }
510 
511 static void
512 event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
513 		   uint16_t *nb_events_flushed,
514 		   uint16_t *nb_events_inv)
515 {
516 	struct rte_event *events = bufp->events;
517 	size_t head_idx, tail_idx;
518 	uint16_t n = 0;
519 
520 	/* Instead of modulus, bitwise AND with mask to get index. */
521 	head_idx = bufp->head & EVENT_BUFFER_MASK;
522 	tail_idx = bufp->tail & EVENT_BUFFER_MASK;
523 
524 	RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ);
525 
526 	/* Determine the largest contiguous run we can attempt to enqueue to the
527 	 * event device.
528 	 */
529 	if (head_idx > tail_idx)
530 		n = head_idx - tail_idx;
531 	else if (head_idx < tail_idx)
532 		n = EVENT_BUFFER_SZ - tail_idx;
533 	else if (event_buffer_full(bufp))
534 		n = EVENT_BUFFER_SZ - tail_idx;
535 	else {
536 		*nb_events_flushed = 0;
537 		return;
538 	}
539 
540 	n = RTE_MIN(EVENT_BUFFER_BATCHSZ, n);
541 	*nb_events_inv = 0;
542 
543 	*nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
544 						     &events[tail_idx], n);
545 	if (*nb_events_flushed != n) {
546 		if (rte_errno == EINVAL) {
547 			EVTIM_LOG_ERR("failed to enqueue invalid event - "
548 				      "dropping it");
549 			(*nb_events_inv)++;
550 		} else if (rte_errno == ENOSPC)
551 			rte_pause();
552 	}
553 
554 	if (*nb_events_flushed > 0)
555 		EVTIM_BUF_LOG_DBG("enqueued %"PRIu16" timer events to event "
556 				  "device", *nb_events_flushed);
557 
558 	bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
559 }
560 
561 /*
562  * Software event timer adapter implementation
563  */
564 struct swtim {
565 	/* Identifier of service executing timer management logic. */
566 	uint32_t service_id;
567 	/* The cycle count at which the adapter should next tick */
568 	uint64_t next_tick_cycles;
569 	/* The tick resolution used by adapter instance. May have been
570 	 * adjusted from what user requested
571 	 */
572 	uint64_t timer_tick_ns;
573 	/* Maximum timeout in nanoseconds allowed by adapter instance. */
574 	uint64_t max_tmo_ns;
575 	/* Buffered timer expiry events to be enqueued to an event device. */
576 	struct event_buffer buffer;
577 	/* Statistics */
578 	struct rte_event_timer_adapter_stats stats;
579 	/* Mempool of timer objects */
580 	struct rte_mempool *tim_pool;
581 	/* Back pointer for convenience */
582 	struct rte_event_timer_adapter *adapter;
583 	/* Identifier of timer data instance */
584 	uint32_t timer_data_id;
585 	/* Track which cores have actually armed a timer */
586 	struct {
587 		uint16_t v;
588 	} __rte_cache_aligned in_use[RTE_MAX_LCORE];
589 	/* Track which cores' timer lists should be polled */
590 	unsigned int poll_lcores[RTE_MAX_LCORE];
591 	/* The number of lists that should be polled */
592 	int n_poll_lcores;
593 	/* Timers which have expired and can be returned to a mempool */
594 	struct rte_timer *expired_timers[EXP_TIM_BUF_SZ];
595 	/* The number of timers that can be returned to a mempool */
596 	size_t n_expired_timers;
597 };
598 
599 static inline struct swtim *
600 swtim_pmd_priv(const struct rte_event_timer_adapter *adapter)
601 {
602 	return adapter->data->adapter_priv;
603 }
604 
605 static void
606 swtim_callback(struct rte_timer *tim)
607 {
608 	struct rte_event_timer *evtim = tim->arg;
609 	struct rte_event_timer_adapter *adapter;
610 	unsigned int lcore = rte_lcore_id();
611 	struct swtim *sw;
612 	uint16_t nb_evs_flushed = 0;
613 	uint16_t nb_evs_invalid = 0;
614 	uint64_t opaque;
615 	int ret;
616 	int n_lcores;
617 
618 	opaque = evtim->impl_opaque[1];
619 	adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
620 	sw = swtim_pmd_priv(adapter);
621 
622 	ret = event_buffer_add(&sw->buffer, &evtim->ev);
623 	if (ret < 0) {
624 		/* If event buffer is full, put timer back in list with
625 		 * immediate expiry value, so that we process it again on the
626 		 * next iteration.
627 		 */
628 		ret = rte_timer_alt_reset(sw->timer_data_id, tim, 0, SINGLE,
629 					  lcore, NULL, evtim);
630 		if (ret < 0) {
631 			EVTIM_LOG_DBG("event buffer full, failed to reset "
632 				      "timer with immediate expiry value");
633 		} else {
634 			sw->stats.evtim_retry_count++;
635 			EVTIM_LOG_DBG("event buffer full, resetting rte_timer "
636 				      "with immediate expiry value");
637 		}
638 
639 		if (unlikely(sw->in_use[lcore].v == 0)) {
640 			sw->in_use[lcore].v = 1;
641 			n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
642 						     __ATOMIC_RELAXED);
643 			__atomic_store_n(&sw->poll_lcores[n_lcores], lcore,
644 					__ATOMIC_RELAXED);
645 		}
646 	} else {
647 		EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
648 
649 		/* Empty the buffer here, if necessary, to free older expired
650 		 * timers only
651 		 */
652 		if (unlikely(sw->n_expired_timers == EXP_TIM_BUF_SZ)) {
653 			rte_mempool_put_bulk(sw->tim_pool,
654 					     (void **)sw->expired_timers,
655 					     sw->n_expired_timers);
656 			sw->n_expired_timers = 0;
657 		}
658 
659 		sw->expired_timers[sw->n_expired_timers++] = tim;
660 		sw->stats.evtim_exp_count++;
661 
662 		__atomic_store_n(&evtim->state, RTE_EVENT_TIMER_NOT_ARMED,
663 				__ATOMIC_RELEASE);
664 	}
665 
666 	if (event_buffer_batch_ready(&sw->buffer)) {
667 		event_buffer_flush(&sw->buffer,
668 				   adapter->data->event_dev_id,
669 				   adapter->data->event_port_id,
670 				   &nb_evs_flushed,
671 				   &nb_evs_invalid);
672 
673 		sw->stats.ev_enq_count += nb_evs_flushed;
674 		sw->stats.ev_inv_count += nb_evs_invalid;
675 	}
676 }
677 
678 static __rte_always_inline uint64_t
679 get_timeout_cycles(struct rte_event_timer *evtim,
680 		   const struct rte_event_timer_adapter *adapter)
681 {
682 	struct swtim *sw = swtim_pmd_priv(adapter);
683 	uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
684 	return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
685 }
686 
687 /* This function returns true if one or more (adapter) ticks have occurred since
688  * the last time it was called.
689  */
690 static inline bool
691 swtim_did_tick(struct swtim *sw)
692 {
693 	uint64_t cycles_per_adapter_tick, start_cycles;
694 	uint64_t *next_tick_cyclesp;
695 
696 	next_tick_cyclesp = &sw->next_tick_cycles;
697 	cycles_per_adapter_tick = sw->timer_tick_ns *
698 			(rte_get_timer_hz() / NSECPERSEC);
699 	start_cycles = rte_get_timer_cycles();
700 
701 	/* Note: initially, *next_tick_cyclesp == 0, so the clause below will
702 	 * execute, and set things going.
703 	 */
704 
705 	if (start_cycles >= *next_tick_cyclesp) {
706 		/* Snap the current cycle count to the preceding adapter tick
707 		 * boundary.
708 		 */
709 		start_cycles -= start_cycles % cycles_per_adapter_tick;
710 		*next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
711 
712 		return true;
713 	}
714 
715 	return false;
716 }
717 
718 /* Check that event timer timeout value is in range */
719 static __rte_always_inline int
720 check_timeout(struct rte_event_timer *evtim,
721 	      const struct rte_event_timer_adapter *adapter)
722 {
723 	uint64_t tmo_nsec;
724 	struct swtim *sw = swtim_pmd_priv(adapter);
725 
726 	tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
727 	if (tmo_nsec > sw->max_tmo_ns)
728 		return -1;
729 	if (tmo_nsec < sw->timer_tick_ns)
730 		return -2;
731 
732 	return 0;
733 }
734 
735 /* Check that event timer event queue sched type matches destination event queue
736  * sched type
737  */
738 static __rte_always_inline int
739 check_destination_event_queue(struct rte_event_timer *evtim,
740 			      const struct rte_event_timer_adapter *adapter)
741 {
742 	int ret;
743 	uint32_t sched_type;
744 
745 	ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
746 				       evtim->ev.queue_id,
747 				       RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
748 				       &sched_type);
749 
750 	if ((ret == 0 && evtim->ev.sched_type == sched_type) ||
751 	    ret == -EOVERFLOW)
752 		return 0;
753 
754 	return -1;
755 }
756 
757 static int
758 swtim_service_func(void *arg)
759 {
760 	struct rte_event_timer_adapter *adapter = arg;
761 	struct swtim *sw = swtim_pmd_priv(adapter);
762 	uint16_t nb_evs_flushed = 0;
763 	uint16_t nb_evs_invalid = 0;
764 
765 	if (swtim_did_tick(sw)) {
766 		rte_timer_alt_manage(sw->timer_data_id,
767 				     sw->poll_lcores,
768 				     sw->n_poll_lcores,
769 				     swtim_callback);
770 
771 		/* Return expired timer objects back to mempool */
772 		rte_mempool_put_bulk(sw->tim_pool, (void **)sw->expired_timers,
773 				     sw->n_expired_timers);
774 		sw->n_expired_timers = 0;
775 
776 		event_buffer_flush(&sw->buffer,
777 				   adapter->data->event_dev_id,
778 				   adapter->data->event_port_id,
779 				   &nb_evs_flushed,
780 				   &nb_evs_invalid);
781 
782 		sw->stats.ev_enq_count += nb_evs_flushed;
783 		sw->stats.ev_inv_count += nb_evs_invalid;
784 		sw->stats.adapter_tick_count++;
785 	}
786 
787 	rte_event_maintain(adapter->data->event_dev_id,
788 			   adapter->data->event_port_id, 0);
789 
790 	return 0;
791 }
792 
793 /* The adapter initialization function rounds the mempool size up to the next
794  * power of 2, so we can take the difference between that value and what the
795  * user requested, and use the space for caches.  This avoids a scenario where a
796  * user can't arm the number of timers the adapter was configured with because
797  * mempool objects have been lost to caches.
798  *
799  * nb_actual should always be a power of 2, so we can iterate over the powers
800  * of 2 to see what the largest cache size we can use is.
801  */
802 static int
803 compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
804 {
805 	int i;
806 	int size;
807 	int cache_size = 0;
808 
809 	for (i = 0;; i++) {
810 		size = 1 << i;
811 
812 		if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
813 		    size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
814 		    size <= nb_actual / 1.5)
815 			cache_size = size;
816 		else
817 			break;
818 	}
819 
820 	return cache_size;
821 }
822 
823 static int
824 swtim_init(struct rte_event_timer_adapter *adapter)
825 {
826 	int i, ret;
827 	struct swtim *sw;
828 	unsigned int flags;
829 	struct rte_service_spec service;
830 
831 	/* Allocate storage for private data area */
832 #define SWTIM_NAMESIZE 32
833 	char swtim_name[SWTIM_NAMESIZE];
834 	snprintf(swtim_name, SWTIM_NAMESIZE, "swtim_%"PRIu8,
835 			adapter->data->id);
836 	sw = rte_zmalloc_socket(swtim_name, sizeof(*sw), RTE_CACHE_LINE_SIZE,
837 			adapter->data->socket_id);
838 	if (sw == NULL) {
839 		EVTIM_LOG_ERR("failed to allocate space for private data");
840 		rte_errno = ENOMEM;
841 		return -1;
842 	}
843 
844 	/* Connect storage to adapter instance */
845 	adapter->data->adapter_priv = sw;
846 	sw->adapter = adapter;
847 
848 	sw->timer_tick_ns = adapter->data->conf.timer_tick_ns;
849 	sw->max_tmo_ns = adapter->data->conf.max_tmo_ns;
850 
851 	/* Create a timer pool */
852 	char pool_name[SWTIM_NAMESIZE];
853 	snprintf(pool_name, SWTIM_NAMESIZE, "swtim_pool_%"PRIu8,
854 		 adapter->data->id);
855 	/* Optimal mempool size is a power of 2 minus one */
856 	uint64_t nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
857 	int pool_size = nb_timers - 1;
858 	int cache_size = compute_msg_mempool_cache_size(
859 				adapter->data->conf.nb_timers, nb_timers);
860 	flags = 0; /* pool is multi-producer, multi-consumer */
861 	sw->tim_pool = rte_mempool_create(pool_name, pool_size,
862 			sizeof(struct rte_timer), cache_size, 0, NULL, NULL,
863 			NULL, NULL, adapter->data->socket_id, flags);
864 	if (sw->tim_pool == NULL) {
865 		EVTIM_LOG_ERR("failed to create timer object mempool");
866 		rte_errno = ENOMEM;
867 		goto free_alloc;
868 	}
869 
870 	/* Initialize the variables that track in-use timer lists */
871 	for (i = 0; i < RTE_MAX_LCORE; i++)
872 		sw->in_use[i].v = 0;
873 
874 	/* Initialize the timer subsystem and allocate timer data instance */
875 	ret = rte_timer_subsystem_init();
876 	if (ret < 0) {
877 		if (ret != -EALREADY) {
878 			EVTIM_LOG_ERR("failed to initialize timer subsystem");
879 			rte_errno = -ret;
880 			goto free_mempool;
881 		}
882 	}
883 
884 	ret = rte_timer_data_alloc(&sw->timer_data_id);
885 	if (ret < 0) {
886 		EVTIM_LOG_ERR("failed to allocate timer data instance");
887 		rte_errno = -ret;
888 		goto free_mempool;
889 	}
890 
891 	/* Initialize timer event buffer */
892 	event_buffer_init(&sw->buffer);
893 
894 	sw->adapter = adapter;
895 
896 	/* Register a service component to run adapter logic */
897 	memset(&service, 0, sizeof(service));
898 	snprintf(service.name, RTE_SERVICE_NAME_MAX,
899 		 "swtim_svc_%"PRIu8, adapter->data->id);
900 	service.socket_id = adapter->data->socket_id;
901 	service.callback = swtim_service_func;
902 	service.callback_userdata = adapter;
903 	service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
904 	ret = rte_service_component_register(&service, &sw->service_id);
905 	if (ret < 0) {
906 		EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
907 			      ": err = %d", service.name, sw->service_id,
908 			      ret);
909 
910 		rte_errno = ENOSPC;
911 		goto free_mempool;
912 	}
913 
914 	EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
915 		      sw->service_id);
916 
917 	adapter->data->service_id = sw->service_id;
918 	adapter->data->service_inited = 1;
919 
920 	return 0;
921 free_mempool:
922 	rte_mempool_free(sw->tim_pool);
923 free_alloc:
924 	rte_free(sw);
925 	return -1;
926 }
927 
928 static void
929 swtim_free_tim(struct rte_timer *tim, void *arg)
930 {
931 	struct swtim *sw = arg;
932 
933 	rte_mempool_put(sw->tim_pool, tim);
934 }
935 
936 /* Traverse the list of outstanding timers and put them back in the mempool
937  * before freeing the adapter to avoid leaking the memory.
938  */
939 static int
940 swtim_uninit(struct rte_event_timer_adapter *adapter)
941 {
942 	int ret;
943 	struct swtim *sw = swtim_pmd_priv(adapter);
944 
945 	/* Free outstanding timers */
946 	rte_timer_stop_all(sw->timer_data_id,
947 			   sw->poll_lcores,
948 			   sw->n_poll_lcores,
949 			   swtim_free_tim,
950 			   sw);
951 
952 	ret = rte_service_component_unregister(sw->service_id);
953 	if (ret < 0) {
954 		EVTIM_LOG_ERR("failed to unregister service component");
955 		return ret;
956 	}
957 
958 	rte_mempool_free(sw->tim_pool);
959 	rte_free(sw);
960 	adapter->data->adapter_priv = NULL;
961 
962 	return 0;
963 }
964 
965 static inline int32_t
966 get_mapped_count_for_service(uint32_t service_id)
967 {
968 	int32_t core_count, i, mapped_count = 0;
969 	uint32_t lcore_arr[RTE_MAX_LCORE];
970 
971 	core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
972 
973 	for (i = 0; i < core_count; i++)
974 		if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
975 			mapped_count++;
976 
977 	return mapped_count;
978 }
979 
980 static int
981 swtim_start(const struct rte_event_timer_adapter *adapter)
982 {
983 	int mapped_count;
984 	struct swtim *sw = swtim_pmd_priv(adapter);
985 
986 	/* Mapping the service to more than one service core can introduce
987 	 * delays while one thread is waiting to acquire a lock, so only allow
988 	 * one core to be mapped to the service.
989 	 *
990 	 * Note: the service could be modified such that it spreads cores to
991 	 * poll over multiple service instances.
992 	 */
993 	mapped_count = get_mapped_count_for_service(sw->service_id);
994 
995 	if (mapped_count != 1)
996 		return mapped_count < 1 ? -ENOENT : -ENOTSUP;
997 
998 	return rte_service_component_runstate_set(sw->service_id, 1);
999 }
1000 
1001 static int
1002 swtim_stop(const struct rte_event_timer_adapter *adapter)
1003 {
1004 	int ret;
1005 	struct swtim *sw = swtim_pmd_priv(adapter);
1006 
1007 	ret = rte_service_component_runstate_set(sw->service_id, 0);
1008 	if (ret < 0)
1009 		return ret;
1010 
1011 	/* Wait for the service to complete its final iteration */
1012 	while (rte_service_may_be_active(sw->service_id))
1013 		rte_pause();
1014 
1015 	return 0;
1016 }
1017 
1018 static void
1019 swtim_get_info(const struct rte_event_timer_adapter *adapter,
1020 		struct rte_event_timer_adapter_info *adapter_info)
1021 {
1022 	struct swtim *sw = swtim_pmd_priv(adapter);
1023 	adapter_info->min_resolution_ns = sw->timer_tick_ns;
1024 	adapter_info->max_tmo_ns = sw->max_tmo_ns;
1025 }
1026 
1027 static int
1028 swtim_stats_get(const struct rte_event_timer_adapter *adapter,
1029 		struct rte_event_timer_adapter_stats *stats)
1030 {
1031 	struct swtim *sw = swtim_pmd_priv(adapter);
1032 	*stats = sw->stats; /* structure copy */
1033 	return 0;
1034 }
1035 
1036 static int
1037 swtim_stats_reset(const struct rte_event_timer_adapter *adapter)
1038 {
1039 	struct swtim *sw = swtim_pmd_priv(adapter);
1040 	memset(&sw->stats, 0, sizeof(sw->stats));
1041 	return 0;
1042 }
1043 
1044 static uint16_t
1045 __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1046 		struct rte_event_timer **evtims,
1047 		uint16_t nb_evtims)
1048 {
1049 	int i, ret;
1050 	struct swtim *sw = swtim_pmd_priv(adapter);
1051 	uint32_t lcore_id = rte_lcore_id();
1052 	struct rte_timer *tim, *tims[nb_evtims];
1053 	uint64_t cycles;
1054 	int n_lcores;
1055 	/* Timer list for this lcore is not in use. */
1056 	uint16_t exp_state = 0;
1057 	enum rte_event_timer_state n_state;
1058 
1059 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1060 	/* Check that the service is running. */
1061 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1062 		rte_errno = EINVAL;
1063 		return 0;
1064 	}
1065 #endif
1066 
1067 	/* Adjust lcore_id if non-EAL thread. Arbitrarily pick the timer list of
1068 	 * the highest lcore to insert such timers into
1069 	 */
1070 	if (lcore_id == LCORE_ID_ANY)
1071 		lcore_id = RTE_MAX_LCORE - 1;
1072 
1073 	/* If this is the first time we're arming an event timer on this lcore,
1074 	 * mark this lcore as "in use"; this will cause the service
1075 	 * function to process the timer list that corresponds to this lcore.
1076 	 * The atomic compare-and-swap operation can prevent the race condition
1077 	 * on in_use flag between multiple non-EAL threads.
1078 	 */
1079 	if (unlikely(__atomic_compare_exchange_n(&sw->in_use[lcore_id].v,
1080 			&exp_state, 1, 0,
1081 			__ATOMIC_RELAXED, __ATOMIC_RELAXED))) {
1082 		EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll",
1083 			      lcore_id);
1084 		n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
1085 					     __ATOMIC_RELAXED);
1086 		__atomic_store_n(&sw->poll_lcores[n_lcores], lcore_id,
1087 				__ATOMIC_RELAXED);
1088 	}
1089 
1090 	ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims,
1091 				   nb_evtims);
1092 	if (ret < 0) {
1093 		rte_errno = ENOSPC;
1094 		return 0;
1095 	}
1096 
1097 	for (i = 0; i < nb_evtims; i++) {
1098 		n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1099 		if (n_state == RTE_EVENT_TIMER_ARMED) {
1100 			rte_errno = EALREADY;
1101 			break;
1102 		} else if (!(n_state == RTE_EVENT_TIMER_NOT_ARMED ||
1103 			     n_state == RTE_EVENT_TIMER_CANCELED)) {
1104 			rte_errno = EINVAL;
1105 			break;
1106 		}
1107 
1108 		ret = check_timeout(evtims[i], adapter);
1109 		if (unlikely(ret == -1)) {
1110 			__atomic_store_n(&evtims[i]->state,
1111 					RTE_EVENT_TIMER_ERROR_TOOLATE,
1112 					__ATOMIC_RELAXED);
1113 			rte_errno = EINVAL;
1114 			break;
1115 		} else if (unlikely(ret == -2)) {
1116 			__atomic_store_n(&evtims[i]->state,
1117 					RTE_EVENT_TIMER_ERROR_TOOEARLY,
1118 					__ATOMIC_RELAXED);
1119 			rte_errno = EINVAL;
1120 			break;
1121 		}
1122 
1123 		if (unlikely(check_destination_event_queue(evtims[i],
1124 							   adapter) < 0)) {
1125 			__atomic_store_n(&evtims[i]->state,
1126 					RTE_EVENT_TIMER_ERROR,
1127 					__ATOMIC_RELAXED);
1128 			rte_errno = EINVAL;
1129 			break;
1130 		}
1131 
1132 		tim = tims[i];
1133 		rte_timer_init(tim);
1134 
1135 		evtims[i]->impl_opaque[0] = (uintptr_t)tim;
1136 		evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
1137 
1138 		cycles = get_timeout_cycles(evtims[i], adapter);
1139 		ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
1140 					  SINGLE, lcore_id, NULL, evtims[i]);
1141 		if (ret < 0) {
1142 			/* tim was in RUNNING or CONFIG state */
1143 			__atomic_store_n(&evtims[i]->state,
1144 					RTE_EVENT_TIMER_ERROR,
1145 					__ATOMIC_RELEASE);
1146 			break;
1147 		}
1148 
1149 		EVTIM_LOG_DBG("armed an event timer");
1150 		/* RELEASE ordering guarantees the adapter specific value
1151 		 * changes observed before the update of state.
1152 		 */
1153 		__atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_ARMED,
1154 				__ATOMIC_RELEASE);
1155 	}
1156 
1157 	if (i < nb_evtims)
1158 		rte_mempool_put_bulk(sw->tim_pool,
1159 				     (void **)&tims[i], nb_evtims - i);
1160 
1161 	return i;
1162 }
1163 
1164 static uint16_t
1165 swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1166 		struct rte_event_timer **evtims,
1167 		uint16_t nb_evtims)
1168 {
1169 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
1170 }
1171 
1172 static uint16_t
1173 swtim_cancel_burst(const struct rte_event_timer_adapter *adapter,
1174 		   struct rte_event_timer **evtims,
1175 		   uint16_t nb_evtims)
1176 {
1177 	int i, ret;
1178 	struct rte_timer *timp;
1179 	uint64_t opaque;
1180 	struct swtim *sw = swtim_pmd_priv(adapter);
1181 	enum rte_event_timer_state n_state;
1182 
1183 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1184 	/* Check that the service is running. */
1185 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1186 		rte_errno = EINVAL;
1187 		return 0;
1188 	}
1189 #endif
1190 
1191 	for (i = 0; i < nb_evtims; i++) {
1192 		/* Don't modify the event timer state in these cases */
1193 		/* ACQUIRE ordering guarantees the access of implementation
1194 		 * specific opaque data under the correct state.
1195 		 */
1196 		n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1197 		if (n_state == RTE_EVENT_TIMER_CANCELED) {
1198 			rte_errno = EALREADY;
1199 			break;
1200 		} else if (n_state != RTE_EVENT_TIMER_ARMED) {
1201 			rte_errno = EINVAL;
1202 			break;
1203 		}
1204 
1205 		opaque = evtims[i]->impl_opaque[0];
1206 		timp = (struct rte_timer *)(uintptr_t)opaque;
1207 		RTE_ASSERT(timp != NULL);
1208 
1209 		ret = rte_timer_alt_stop(sw->timer_data_id, timp);
1210 		if (ret < 0) {
1211 			/* Timer is running or being configured */
1212 			rte_errno = EAGAIN;
1213 			break;
1214 		}
1215 
1216 		rte_mempool_put(sw->tim_pool, (void **)timp);
1217 
1218 		/* The RELEASE ordering here pairs with atomic ordering
1219 		 * to make sure the state update data observed between
1220 		 * threads.
1221 		 */
1222 		__atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED,
1223 				__ATOMIC_RELEASE);
1224 	}
1225 
1226 	return i;
1227 }
1228 
1229 static uint16_t
1230 swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
1231 			 struct rte_event_timer **evtims,
1232 			 uint64_t timeout_ticks,
1233 			 uint16_t nb_evtims)
1234 {
1235 	int i;
1236 
1237 	for (i = 0; i < nb_evtims; i++)
1238 		evtims[i]->timeout_ticks = timeout_ticks;
1239 
1240 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
1241 }
1242 
1243 static const struct event_timer_adapter_ops swtim_ops = {
1244 	.init = swtim_init,
1245 	.uninit = swtim_uninit,
1246 	.start = swtim_start,
1247 	.stop = swtim_stop,
1248 	.get_info = swtim_get_info,
1249 	.stats_get = swtim_stats_get,
1250 	.stats_reset = swtim_stats_reset,
1251 	.arm_burst = swtim_arm_burst,
1252 	.arm_tmo_tick_burst = swtim_arm_tmo_tick_burst,
1253 	.cancel_burst = swtim_cancel_burst,
1254 };
1255 
1256 static int
1257 handle_ta_info(const char *cmd __rte_unused, const char *params,
1258 		struct rte_tel_data *d)
1259 {
1260 	struct rte_event_timer_adapter_info adapter_info;
1261 	struct rte_event_timer_adapter *adapter;
1262 	uint16_t adapter_id;
1263 	int ret;
1264 
1265 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1266 		return -1;
1267 
1268 	adapter_id = atoi(params);
1269 
1270 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
1271 		EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id);
1272 		return -EINVAL;
1273 	}
1274 
1275 	adapter = &adapters[adapter_id];
1276 
1277 	ret = rte_event_timer_adapter_get_info(adapter, &adapter_info);
1278 	if (ret < 0) {
1279 		EVTIM_LOG_ERR("Failed to get info for timer adapter id %u", adapter_id);
1280 		return ret;
1281 	}
1282 
1283 	rte_tel_data_start_dict(d);
1284 	rte_tel_data_add_dict_u64(d, "timer_adapter_id", adapter_id);
1285 	rte_tel_data_add_dict_u64(d, "min_resolution_ns", adapter_info.min_resolution_ns);
1286 	rte_tel_data_add_dict_u64(d, "max_tmo_ns", adapter_info.max_tmo_ns);
1287 	rte_tel_data_add_dict_u64(d, "event_dev_id", adapter_info.conf.event_dev_id);
1288 	rte_tel_data_add_dict_u64(d, "socket_id", adapter_info.conf.socket_id);
1289 	rte_tel_data_add_dict_u64(d, "clk_src", adapter_info.conf.clk_src);
1290 	rte_tel_data_add_dict_u64(d, "timer_tick_ns", adapter_info.conf.timer_tick_ns);
1291 	rte_tel_data_add_dict_u64(d, "nb_timers", adapter_info.conf.nb_timers);
1292 	rte_tel_data_add_dict_u64(d, "flags", adapter_info.conf.flags);
1293 
1294 	return 0;
1295 }
1296 
1297 static int
1298 handle_ta_stats(const char *cmd __rte_unused, const char *params,
1299 		struct rte_tel_data *d)
1300 {
1301 	struct rte_event_timer_adapter_stats stats;
1302 	struct rte_event_timer_adapter *adapter;
1303 	uint16_t adapter_id;
1304 	int ret;
1305 
1306 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1307 		return -1;
1308 
1309 	adapter_id = atoi(params);
1310 
1311 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
1312 		EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id);
1313 		return -EINVAL;
1314 	}
1315 
1316 	adapter = &adapters[adapter_id];
1317 
1318 	ret = rte_event_timer_adapter_stats_get(adapter, &stats);
1319 	if (ret < 0) {
1320 		EVTIM_LOG_ERR("Failed to get stats for timer adapter id %u", adapter_id);
1321 		return ret;
1322 	}
1323 
1324 	rte_tel_data_start_dict(d);
1325 	rte_tel_data_add_dict_u64(d, "timer_adapter_id", adapter_id);
1326 	rte_tel_data_add_dict_u64(d, "evtim_exp_count", stats.evtim_exp_count);
1327 	rte_tel_data_add_dict_u64(d, "ev_enq_count", stats.ev_enq_count);
1328 	rte_tel_data_add_dict_u64(d, "ev_inv_count", stats.ev_inv_count);
1329 	rte_tel_data_add_dict_u64(d, "evtim_retry_count", stats.evtim_retry_count);
1330 	rte_tel_data_add_dict_u64(d, "adapter_tick_count", stats.adapter_tick_count);
1331 
1332 	return 0;
1333 }
1334 
1335 RTE_INIT(ta_init_telemetry)
1336 {
1337 	rte_telemetry_register_cmd("/eventdev/ta_info",
1338 		handle_ta_info,
1339 		"Returns Timer adapter info. Parameter: Timer adapter id");
1340 
1341 	rte_telemetry_register_cmd("/eventdev/ta_stats",
1342 		handle_ta_stats,
1343 		"Returns Timer adapter stats. Parameter: Timer adapter id");
1344 }
1345