xref: /dpdk/lib/eventdev/rte_event_timer_adapter.c (revision 2d9c7e56e52ceb2e14b5134dcd9673dd227e3072)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation.
3  * All rights reserved.
4  */
5 
6 #include <ctype.h>
7 #include <string.h>
8 #include <inttypes.h>
9 #include <stdbool.h>
10 #include <stdlib.h>
11 #include <math.h>
12 
13 #include <rte_memzone.h>
14 #include <rte_errno.h>
15 #include <rte_malloc.h>
16 #include <rte_mempool.h>
17 #include <rte_common.h>
18 #include <rte_timer.h>
19 #include <rte_service_component.h>
20 #include <rte_telemetry.h>
21 #include <rte_reciprocal.h>
22 
23 #include "event_timer_adapter_pmd.h"
24 #include "eventdev_pmd.h"
25 #include "rte_event_timer_adapter.h"
26 #include "rte_eventdev.h"
27 #include "eventdev_trace.h"
28 
29 #define DATA_MZ_NAME_MAX_LEN 64
30 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
31 
32 RTE_LOG_REGISTER_SUFFIX(evtim_logtype, adapter.timer, NOTICE);
33 #define RTE_LOGTYPE_EVTIM evtim_logtype
34 RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
35 #define RTE_LOGTYPE_EVTIM_BUF evtim_buffer_logtype
36 RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
37 #define RTE_LOGTYPE_EVTIM_SVC evtim_svc_logtype
38 
39 static struct rte_event_timer_adapter *adapters;
40 
41 static const struct event_timer_adapter_ops swtim_ops;
42 
43 #define EVTIM_LOG(level, logtype, ...) \
44 	RTE_LOG_LINE_PREFIX(level, logtype, \
45 		"EVTIMER: %s() line %u: ", __func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
46 
47 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, EVTIM, __VA_ARGS__)
48 
49 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
50 #define EVTIM_LOG_DBG(...) \
51 	EVTIM_LOG(DEBUG, EVTIM, __VA_ARGS__)
52 #define EVTIM_BUF_LOG_DBG(...) \
53 	EVTIM_LOG(DEBUG, EVTIM_BUF, __VA_ARGS__)
54 #define EVTIM_SVC_LOG_DBG(...) \
55 	EVTIM_LOG(DEBUG, EVTIM_SVC, __VA_ARGS__)
56 #else
57 #define EVTIM_LOG_DBG(...) (void)0
58 #define EVTIM_BUF_LOG_DBG(...) (void)0
59 #define EVTIM_SVC_LOG_DBG(...) (void)0
60 #endif
61 
62 static inline enum rte_timer_type
63 get_timer_type(const struct rte_event_timer_adapter *adapter)
64 {
65 	return (adapter->data->conf.flags &
66 			RTE_EVENT_TIMER_ADAPTER_F_PERIODIC) ?
67 			PERIODICAL : SINGLE;
68 }
69 
70 static int
71 default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
72 		     void *conf_arg)
73 {
74 	struct rte_event_timer_adapter *adapter;
75 	struct rte_eventdev *dev;
76 	struct rte_event_dev_config dev_conf;
77 	struct rte_event_port_conf *port_conf, def_port_conf = {0};
78 	int started;
79 	uint8_t port_id;
80 	uint8_t dev_id;
81 	int ret;
82 
83 	RTE_SET_USED(event_dev_id);
84 
85 	adapter = &adapters[id];
86 	dev = &rte_eventdevs[adapter->data->event_dev_id];
87 	dev_id = dev->data->dev_id;
88 	dev_conf = dev->data->dev_conf;
89 
90 	started = dev->data->dev_started;
91 	if (started)
92 		rte_event_dev_stop(dev_id);
93 
94 	port_id = dev_conf.nb_event_ports;
95 	if (conf_arg != NULL)
96 		port_conf = conf_arg;
97 	else {
98 		port_conf = &def_port_conf;
99 		ret = rte_event_port_default_conf_get(dev_id, (port_id - 1),
100 						      port_conf);
101 		if (ret < 0)
102 			return ret;
103 	}
104 
105 	dev_conf.nb_event_ports += 1;
106 	if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_SINGLE_LINK)
107 		dev_conf.nb_single_link_event_port_queues += 1;
108 
109 	ret = rte_event_dev_configure(dev_id, &dev_conf);
110 	if (ret < 0) {
111 		EVTIM_LOG_ERR("failed to configure event dev %u", dev_id);
112 		if (started)
113 			if (rte_event_dev_start(dev_id))
114 				return -EIO;
115 
116 		return ret;
117 	}
118 
119 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
120 	if (ret < 0) {
121 		EVTIM_LOG_ERR("failed to setup event port %u on event dev %u",
122 			      port_id, dev_id);
123 		return ret;
124 	}
125 
126 	*event_port_id = port_id;
127 
128 	if (started)
129 		ret = rte_event_dev_start(dev_id);
130 
131 	return ret;
132 }
133 
134 struct rte_event_timer_adapter *
135 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
136 {
137 	return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
138 						  NULL);
139 }
140 
141 struct rte_event_timer_adapter *
142 rte_event_timer_adapter_create_ext(
143 		const struct rte_event_timer_adapter_conf *conf,
144 		rte_event_timer_adapter_port_conf_cb_t conf_cb,
145 		void *conf_arg)
146 {
147 	uint16_t adapter_id;
148 	struct rte_event_timer_adapter *adapter;
149 	const struct rte_memzone *mz;
150 	char mz_name[DATA_MZ_NAME_MAX_LEN];
151 	int n, ret;
152 	struct rte_eventdev *dev;
153 
154 	if (adapters == NULL) {
155 		adapters = rte_zmalloc("Eventdev",
156 				       sizeof(struct rte_event_timer_adapter) *
157 					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
158 				       RTE_CACHE_LINE_SIZE);
159 		if (adapters == NULL) {
160 			rte_errno = ENOMEM;
161 			return NULL;
162 		}
163 	}
164 
165 	if (conf == NULL) {
166 		rte_errno = EINVAL;
167 		return NULL;
168 	}
169 
170 	/* Check eventdev ID */
171 	if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
172 		rte_errno = EINVAL;
173 		return NULL;
174 	}
175 	dev = &rte_eventdevs[conf->event_dev_id];
176 
177 	adapter_id = conf->timer_adapter_id;
178 
179 	/* Check that adapter_id is in range */
180 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
181 		rte_errno = EINVAL;
182 		return NULL;
183 	}
184 
185 	/* Check adapter ID not already allocated */
186 	adapter = &adapters[adapter_id];
187 	if (adapter->allocated) {
188 		rte_errno = EEXIST;
189 		return NULL;
190 	}
191 
192 	/* Create shared data area. */
193 	n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
194 	if (n >= (int)sizeof(mz_name)) {
195 		rte_errno = EINVAL;
196 		return NULL;
197 	}
198 	mz = rte_memzone_reserve(mz_name,
199 				 sizeof(struct rte_event_timer_adapter_data),
200 				 conf->socket_id, 0);
201 	if (mz == NULL)
202 		/* rte_errno set by rte_memzone_reserve */
203 		return NULL;
204 
205 	adapter->data = mz->addr;
206 	memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
207 
208 	adapter->data->mz = mz;
209 	adapter->data->event_dev_id = conf->event_dev_id;
210 	adapter->data->id = adapter_id;
211 	adapter->data->socket_id = conf->socket_id;
212 	adapter->data->conf = *conf;  /* copy conf structure */
213 
214 	/* Query eventdev PMD for timer adapter capabilities and ops */
215 	if (dev->dev_ops->timer_adapter_caps_get) {
216 		ret = dev->dev_ops->timer_adapter_caps_get(dev,
217 				adapter->data->conf.flags,
218 				&adapter->data->caps, &adapter->ops);
219 		if (ret < 0) {
220 			rte_errno = -ret;
221 			goto free_memzone;
222 		}
223 	}
224 
225 	if (!(adapter->data->caps &
226 	      RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
227 		FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, EINVAL);
228 		ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
229 			      &adapter->data->event_port_id, conf_arg);
230 		if (ret < 0) {
231 			rte_errno = -ret;
232 			goto free_memzone;
233 		}
234 	}
235 
236 	/* If eventdev PMD did not provide ops, use default software
237 	 * implementation.
238 	 */
239 	if (adapter->ops == NULL)
240 		adapter->ops = &swtim_ops;
241 
242 	/* Allow driver to do some setup */
243 	FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, ENOTSUP);
244 	ret = adapter->ops->init(adapter);
245 	if (ret < 0) {
246 		rte_errno = -ret;
247 		goto free_memzone;
248 	}
249 
250 	/* Set fast-path function pointers */
251 	adapter->arm_burst = adapter->ops->arm_burst;
252 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
253 	adapter->cancel_burst = adapter->ops->cancel_burst;
254 
255 	adapter->allocated = 1;
256 
257 	rte_eventdev_trace_timer_adapter_create(adapter_id, adapter, conf,
258 		conf_cb);
259 	return adapter;
260 
261 free_memzone:
262 	rte_memzone_free(adapter->data->mz);
263 	return NULL;
264 }
265 
266 int
267 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
268 		struct rte_event_timer_adapter_info *adapter_info)
269 {
270 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
271 
272 	if (adapter->ops->get_info)
273 		/* let driver set values it knows */
274 		adapter->ops->get_info(adapter, adapter_info);
275 
276 	/* Set common values */
277 	adapter_info->conf = adapter->data->conf;
278 	adapter_info->event_dev_port_id = adapter->data->event_port_id;
279 	adapter_info->caps = adapter->data->caps;
280 
281 	rte_eventdev_trace_timer_adapter_get_info(adapter, adapter_info);
282 
283 	return 0;
284 }
285 
286 int
287 rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
288 {
289 	int ret;
290 
291 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
292 	FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
293 
294 	if (adapter->data->started) {
295 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" already started",
296 			      adapter->data->id);
297 		return -EALREADY;
298 	}
299 
300 	ret = adapter->ops->start(adapter);
301 	if (ret < 0)
302 		return ret;
303 
304 	adapter->data->started = 1;
305 	rte_eventdev_trace_timer_adapter_start(adapter);
306 	return 0;
307 }
308 
309 int
310 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
311 {
312 	int ret;
313 
314 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
315 	FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
316 
317 	if (adapter->data->started == 0) {
318 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
319 			      adapter->data->id);
320 		return 0;
321 	}
322 
323 	ret = adapter->ops->stop(adapter);
324 	if (ret < 0)
325 		return ret;
326 
327 	adapter->data->started = 0;
328 	rte_eventdev_trace_timer_adapter_stop(adapter);
329 	return 0;
330 }
331 
332 struct rte_event_timer_adapter *
333 rte_event_timer_adapter_lookup(uint16_t adapter_id)
334 {
335 	char name[DATA_MZ_NAME_MAX_LEN];
336 	const struct rte_memzone *mz;
337 	struct rte_event_timer_adapter_data *data;
338 	struct rte_event_timer_adapter *adapter;
339 	int ret;
340 	struct rte_eventdev *dev;
341 
342 	if (adapters == NULL) {
343 		adapters = rte_zmalloc("Eventdev",
344 				       sizeof(struct rte_event_timer_adapter) *
345 					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
346 				       RTE_CACHE_LINE_SIZE);
347 		if (adapters == NULL) {
348 			rte_errno = ENOMEM;
349 			return NULL;
350 		}
351 	}
352 
353 	if (adapters[adapter_id].allocated)
354 		return &adapters[adapter_id]; /* Adapter is already loaded */
355 
356 	snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
357 	mz = rte_memzone_lookup(name);
358 	if (mz == NULL) {
359 		rte_errno = ENOENT;
360 		return NULL;
361 	}
362 
363 	data = mz->addr;
364 
365 	adapter = &adapters[data->id];
366 	adapter->data = data;
367 
368 	dev = &rte_eventdevs[adapter->data->event_dev_id];
369 
370 	/* Query eventdev PMD for timer adapter capabilities and ops */
371 	if (dev->dev_ops->timer_adapter_caps_get) {
372 		ret = dev->dev_ops->timer_adapter_caps_get(dev,
373 				adapter->data->conf.flags,
374 				&adapter->data->caps, &adapter->ops);
375 		if (ret < 0) {
376 			rte_errno = EINVAL;
377 			return NULL;
378 		}
379 	}
380 
381 	/* If eventdev PMD did not provide ops, use default software
382 	 * implementation.
383 	 */
384 	if (adapter->ops == NULL)
385 		adapter->ops = &swtim_ops;
386 
387 	/* Set fast-path function pointers */
388 	adapter->arm_burst = adapter->ops->arm_burst;
389 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
390 	adapter->cancel_burst = adapter->ops->cancel_burst;
391 
392 	adapter->allocated = 1;
393 
394 	rte_eventdev_trace_timer_adapter_lookup(adapter_id, adapter);
395 
396 	return adapter;
397 }
398 
399 int
400 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
401 {
402 	int i, ret;
403 
404 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
405 	FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
406 
407 	if (adapter->data->started == 1) {
408 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
409 			      "before freeing", adapter->data->id);
410 		return -EBUSY;
411 	}
412 
413 	/* free impl priv data */
414 	ret = adapter->ops->uninit(adapter);
415 	if (ret < 0)
416 		return ret;
417 
418 	/* free shared data area */
419 	ret = rte_memzone_free(adapter->data->mz);
420 	if (ret < 0)
421 		return ret;
422 
423 	adapter->data = NULL;
424 	adapter->allocated = 0;
425 
426 	ret = 0;
427 	for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++)
428 		if (adapters[i].allocated)
429 			ret = adapters[i].allocated;
430 
431 	if (!ret) {
432 		rte_free(adapters);
433 		adapters = NULL;
434 	}
435 
436 	rte_eventdev_trace_timer_adapter_free(adapter);
437 	return 0;
438 }
439 
440 int
441 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
442 				       uint32_t *service_id)
443 {
444 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
445 
446 	if (service_id == NULL)
447 		return -EINVAL;
448 
449 	if (adapter->data->service_inited && service_id != NULL)
450 		*service_id = adapter->data->service_id;
451 
452 	rte_eventdev_trace_timer_adapter_service_id_get(adapter, *service_id);
453 
454 	return adapter->data->service_inited ? 0 : -ESRCH;
455 }
456 
457 int
458 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
459 				  struct rte_event_timer_adapter_stats *stats)
460 {
461 	rte_eventdev_trace_timer_adapter_stats_get(adapter, stats);
462 
463 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
464 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
465 	if (stats == NULL)
466 		return -EINVAL;
467 
468 	return adapter->ops->stats_get(adapter, stats);
469 }
470 
471 int
472 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
473 {
474 	rte_eventdev_trace_timer_adapter_stats_reset(adapter);
475 
476 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
477 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
478 	return adapter->ops->stats_reset(adapter);
479 }
480 
481 int
482 rte_event_timer_remaining_ticks_get(
483 			const struct rte_event_timer_adapter *adapter,
484 			const struct rte_event_timer *evtim,
485 			uint64_t *ticks_remaining)
486 {
487 	rte_eventdev_trace_timer_remaining_ticks_get(adapter, evtim, ticks_remaining);
488 
489 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
490 	FUNC_PTR_OR_ERR_RET(adapter->ops->remaining_ticks_get, -ENOTSUP);
491 
492 	if (ticks_remaining == NULL)
493 		return -EINVAL;
494 
495 	return adapter->ops->remaining_ticks_get(adapter, evtim,
496 						 ticks_remaining);
497 }
498 
499 /*
500  * Software event timer adapter buffer helper functions
501  */
502 
503 #define NSECPERSEC 1E9
504 
505 /* Optimizations used to index into the buffer require that the buffer size
506  * be a power of 2.
507  */
508 #define EVENT_BUFFER_SZ 4096
509 #define EVENT_BUFFER_BATCHSZ 32
510 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
511 
512 #define EXP_TIM_BUF_SZ 128
513 
514 struct event_buffer {
515 	size_t head;
516 	size_t tail;
517 	struct rte_event events[EVENT_BUFFER_SZ];
518 } __rte_cache_aligned;
519 
520 static inline bool
521 event_buffer_full(struct event_buffer *bufp)
522 {
523 	return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
524 }
525 
526 static inline bool
527 event_buffer_batch_ready(struct event_buffer *bufp)
528 {
529 	return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
530 }
531 
532 static void
533 event_buffer_init(struct event_buffer *bufp)
534 {
535 	bufp->head = bufp->tail = 0;
536 	memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
537 }
538 
539 static int
540 event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
541 {
542 	size_t head_idx;
543 	struct rte_event *buf_eventp;
544 
545 	if (event_buffer_full(bufp))
546 		return -1;
547 
548 	/* Instead of modulus, bitwise AND with mask to get head_idx. */
549 	head_idx = bufp->head & EVENT_BUFFER_MASK;
550 	buf_eventp = &bufp->events[head_idx];
551 	rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
552 
553 	/* Wrap automatically when overflow occurs. */
554 	bufp->head++;
555 
556 	return 0;
557 }
558 
559 static void
560 event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
561 		   uint16_t *nb_events_flushed,
562 		   uint16_t *nb_events_inv)
563 {
564 	struct rte_event *events = bufp->events;
565 	size_t head_idx, tail_idx;
566 	uint16_t n = 0;
567 
568 	/* Instead of modulus, bitwise AND with mask to get index. */
569 	head_idx = bufp->head & EVENT_BUFFER_MASK;
570 	tail_idx = bufp->tail & EVENT_BUFFER_MASK;
571 
572 	RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ);
573 
574 	/* Determine the largest contiguous run we can attempt to enqueue to the
575 	 * event device.
576 	 */
577 	if (head_idx > tail_idx)
578 		n = head_idx - tail_idx;
579 	else if (head_idx < tail_idx)
580 		n = EVENT_BUFFER_SZ - tail_idx;
581 	else if (event_buffer_full(bufp))
582 		n = EVENT_BUFFER_SZ - tail_idx;
583 	else {
584 		*nb_events_flushed = 0;
585 		return;
586 	}
587 
588 	n = RTE_MIN(EVENT_BUFFER_BATCHSZ, n);
589 	*nb_events_inv = 0;
590 
591 	*nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
592 						     &events[tail_idx], n);
593 	if (*nb_events_flushed != n) {
594 		if (rte_errno == EINVAL) {
595 			EVTIM_LOG_ERR("failed to enqueue invalid event - "
596 				      "dropping it");
597 			(*nb_events_inv)++;
598 		} else if (rte_errno == ENOSPC)
599 			rte_pause();
600 	}
601 
602 	if (*nb_events_flushed > 0)
603 		EVTIM_BUF_LOG_DBG("enqueued %"PRIu16" timer events to event "
604 				  "device", *nb_events_flushed);
605 
606 	bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
607 }
608 
609 /*
610  * Software event timer adapter implementation
611  */
612 struct swtim {
613 	/* Identifier of service executing timer management logic. */
614 	uint32_t service_id;
615 	/* The cycle count at which the adapter should next tick */
616 	uint64_t next_tick_cycles;
617 	/* The tick resolution used by adapter instance. May have been
618 	 * adjusted from what user requested
619 	 */
620 	uint64_t timer_tick_ns;
621 	/* Maximum timeout in nanoseconds allowed by adapter instance. */
622 	uint64_t max_tmo_ns;
623 	/* Buffered timer expiry events to be enqueued to an event device. */
624 	struct event_buffer buffer;
625 	/* Statistics */
626 	struct rte_event_timer_adapter_stats stats;
627 	/* Mempool of timer objects */
628 	struct rte_mempool *tim_pool;
629 	/* Back pointer for convenience */
630 	struct rte_event_timer_adapter *adapter;
631 	/* Identifier of timer data instance */
632 	uint32_t timer_data_id;
633 	/* Track which cores have actually armed a timer */
634 	struct {
635 		RTE_ATOMIC(uint16_t) v;
636 	} __rte_cache_aligned in_use[RTE_MAX_LCORE];
637 	/* Track which cores' timer lists should be polled */
638 	RTE_ATOMIC(unsigned int) poll_lcores[RTE_MAX_LCORE];
639 	/* The number of lists that should be polled */
640 	RTE_ATOMIC(int) n_poll_lcores;
641 	/* Timers which have expired and can be returned to a mempool */
642 	struct rte_timer *expired_timers[EXP_TIM_BUF_SZ];
643 	/* The number of timers that can be returned to a mempool */
644 	size_t n_expired_timers;
645 };
646 
647 static inline struct swtim *
648 swtim_pmd_priv(const struct rte_event_timer_adapter *adapter)
649 {
650 	return adapter->data->adapter_priv;
651 }
652 
653 static void
654 swtim_callback(struct rte_timer *tim)
655 {
656 	struct rte_event_timer *evtim = tim->arg;
657 	struct rte_event_timer_adapter *adapter;
658 	unsigned int lcore = rte_lcore_id();
659 	struct swtim *sw;
660 	uint16_t nb_evs_flushed = 0;
661 	uint16_t nb_evs_invalid = 0;
662 	uint64_t opaque;
663 	int ret;
664 	int n_lcores;
665 	enum rte_timer_type type;
666 
667 	opaque = evtim->impl_opaque[1];
668 	adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
669 	sw = swtim_pmd_priv(adapter);
670 	type = get_timer_type(adapter);
671 
672 	if (unlikely(sw->in_use[lcore].v == 0)) {
673 		sw->in_use[lcore].v = 1;
674 		n_lcores = rte_atomic_fetch_add_explicit(&sw->n_poll_lcores, 1,
675 					     rte_memory_order_relaxed);
676 		rte_atomic_store_explicit(&sw->poll_lcores[n_lcores], lcore,
677 				rte_memory_order_relaxed);
678 	}
679 
680 	ret = event_buffer_add(&sw->buffer, &evtim->ev);
681 	if (ret < 0) {
682 		if (type == SINGLE) {
683 			/* If event buffer is full, put timer back in list with
684 			 * immediate expiry value, so that we process it again
685 			 * on the next iteration.
686 			 */
687 			ret = rte_timer_alt_reset(sw->timer_data_id, tim, 0,
688 						SINGLE,	lcore, NULL, evtim);
689 			if (ret < 0) {
690 				EVTIM_LOG_DBG("event buffer full, failed to "
691 						"reset timer with immediate "
692 						"expiry value");
693 			} else {
694 				sw->stats.evtim_retry_count++;
695 				EVTIM_LOG_DBG("event buffer full, resetting "
696 						"rte_timer with immediate "
697 						"expiry value");
698 			}
699 		} else {
700 			sw->stats.evtim_drop_count++;
701 		}
702 
703 	} else {
704 		EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
705 
706 		/* Empty the buffer here, if necessary, to free older expired
707 		 * timers only
708 		 */
709 		if (unlikely(sw->n_expired_timers == EXP_TIM_BUF_SZ)) {
710 			rte_mempool_put_bulk(sw->tim_pool,
711 					     (void **)sw->expired_timers,
712 					     sw->n_expired_timers);
713 			sw->n_expired_timers = 0;
714 		}
715 
716 		/* Don't free rte_timer for a periodic event timer until
717 		 * it is cancelled
718 		 */
719 		if (type == SINGLE)
720 			sw->expired_timers[sw->n_expired_timers++] = tim;
721 		sw->stats.evtim_exp_count++;
722 
723 		if (type == SINGLE)
724 			rte_atomic_store_explicit(&evtim->state, RTE_EVENT_TIMER_NOT_ARMED,
725 				rte_memory_order_release);
726 	}
727 
728 	if (event_buffer_batch_ready(&sw->buffer)) {
729 		event_buffer_flush(&sw->buffer,
730 				   adapter->data->event_dev_id,
731 				   adapter->data->event_port_id,
732 				   &nb_evs_flushed,
733 				   &nb_evs_invalid);
734 
735 		sw->stats.ev_enq_count += nb_evs_flushed;
736 		sw->stats.ev_inv_count += nb_evs_invalid;
737 	}
738 }
739 
740 static __rte_always_inline int
741 get_timeout_cycles(struct rte_event_timer *evtim,
742 		   const struct rte_event_timer_adapter *adapter,
743 		   uint64_t *timeout_cycles)
744 {
745 	static struct rte_reciprocal_u64 nsecpersec_inverse;
746 	static uint64_t timer_hz;
747 	uint64_t rem_cycles, secs_cycles = 0;
748 	uint64_t secs, timeout_nsecs;
749 	uint64_t nsecpersec;
750 	struct swtim *sw;
751 
752 	sw = swtim_pmd_priv(adapter);
753 	nsecpersec = (uint64_t)NSECPERSEC;
754 
755 	timeout_nsecs = evtim->timeout_ticks * sw->timer_tick_ns;
756 	if (timeout_nsecs > sw->max_tmo_ns)
757 		return -1;
758 	if (timeout_nsecs < sw->timer_tick_ns)
759 		return -2;
760 
761 	/* Set these values in the first invocation */
762 	if (!timer_hz) {
763 		timer_hz = rte_get_timer_hz();
764 		nsecpersec_inverse = rte_reciprocal_value_u64(nsecpersec);
765 	}
766 
767 	/* If timeout_nsecs > nsecpersec, decrease timeout_nsecs by the number
768 	 * of whole seconds it contains and convert that value to a number
769 	 * of cycles. This keeps timeout_nsecs in the interval [0..nsecpersec)
770 	 * in order to avoid overflow when we later multiply by timer_hz.
771 	 */
772 	if (timeout_nsecs > nsecpersec) {
773 		secs = rte_reciprocal_divide_u64(timeout_nsecs,
774 						 &nsecpersec_inverse);
775 		secs_cycles = secs * timer_hz;
776 		timeout_nsecs -= secs * nsecpersec;
777 	}
778 
779 	rem_cycles = rte_reciprocal_divide_u64(timeout_nsecs * timer_hz,
780 					       &nsecpersec_inverse);
781 
782 	*timeout_cycles = secs_cycles + rem_cycles;
783 
784 	return 0;
785 }
786 
787 /* This function returns true if one or more (adapter) ticks have occurred since
788  * the last time it was called.
789  */
790 static inline bool
791 swtim_did_tick(struct swtim *sw)
792 {
793 	uint64_t cycles_per_adapter_tick, start_cycles;
794 	uint64_t *next_tick_cyclesp;
795 
796 	next_tick_cyclesp = &sw->next_tick_cycles;
797 	cycles_per_adapter_tick = sw->timer_tick_ns *
798 			(rte_get_timer_hz() / NSECPERSEC);
799 	start_cycles = rte_get_timer_cycles();
800 
801 	/* Note: initially, *next_tick_cyclesp == 0, so the clause below will
802 	 * execute, and set things going.
803 	 */
804 
805 	if (start_cycles >= *next_tick_cyclesp) {
806 		/* Snap the current cycle count to the preceding adapter tick
807 		 * boundary.
808 		 */
809 		start_cycles -= start_cycles % cycles_per_adapter_tick;
810 		*next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
811 
812 		return true;
813 	}
814 
815 	return false;
816 }
817 
818 /* Check that event timer event queue sched type matches destination event queue
819  * sched type
820  */
821 static __rte_always_inline int
822 check_destination_event_queue(struct rte_event_timer *evtim,
823 			      const struct rte_event_timer_adapter *adapter)
824 {
825 	int ret;
826 	uint32_t sched_type;
827 
828 	ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
829 				       evtim->ev.queue_id,
830 				       RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
831 				       &sched_type);
832 
833 	if ((ret == 0 && evtim->ev.sched_type == sched_type) ||
834 	    ret == -EOVERFLOW)
835 		return 0;
836 
837 	return -1;
838 }
839 
840 static int
841 swtim_service_func(void *arg)
842 {
843 	struct rte_event_timer_adapter *adapter = arg;
844 	struct swtim *sw = swtim_pmd_priv(adapter);
845 	uint16_t nb_evs_flushed = 0;
846 	uint16_t nb_evs_invalid = 0;
847 	const uint64_t prior_enq_count = sw->stats.ev_enq_count;
848 
849 	if (swtim_did_tick(sw)) {
850 		rte_timer_alt_manage(sw->timer_data_id,
851 				     (unsigned int *)(uintptr_t)sw->poll_lcores,
852 				     sw->n_poll_lcores,
853 				     swtim_callback);
854 
855 		/* Return expired timer objects back to mempool */
856 		rte_mempool_put_bulk(sw->tim_pool, (void **)sw->expired_timers,
857 				     sw->n_expired_timers);
858 		sw->n_expired_timers = 0;
859 
860 		sw->stats.adapter_tick_count++;
861 	}
862 
863 	event_buffer_flush(&sw->buffer,
864 			   adapter->data->event_dev_id,
865 			   adapter->data->event_port_id,
866 			   &nb_evs_flushed,
867 			   &nb_evs_invalid);
868 
869 	sw->stats.ev_enq_count += nb_evs_flushed;
870 	sw->stats.ev_inv_count += nb_evs_invalid;
871 
872 	rte_event_maintain(adapter->data->event_dev_id,
873 			   adapter->data->event_port_id, 0);
874 
875 	return prior_enq_count == sw->stats.ev_enq_count ? -EAGAIN : 0;
876 }
877 
878 /* The adapter initialization function rounds the mempool size up to the next
879  * power of 2, so we can take the difference between that value and what the
880  * user requested, and use the space for caches.  This avoids a scenario where a
881  * user can't arm the number of timers the adapter was configured with because
882  * mempool objects have been lost to caches.
883  *
884  * nb_actual should always be a power of 2, so we can iterate over the powers
885  * of 2 to see what the largest cache size we can use is.
886  */
887 static int
888 compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
889 {
890 	int i;
891 	int size;
892 	int cache_size = 0;
893 
894 	for (i = 0;; i++) {
895 		size = 1 << i;
896 
897 		if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
898 		    size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
899 		    size <= nb_actual / 1.5)
900 			cache_size = size;
901 		else
902 			break;
903 	}
904 
905 	return cache_size;
906 }
907 
908 static int
909 swtim_init(struct rte_event_timer_adapter *adapter)
910 {
911 	int i, ret;
912 	struct swtim *sw;
913 	unsigned int flags;
914 	struct rte_service_spec service;
915 
916 	/* Allocate storage for private data area */
917 #define SWTIM_NAMESIZE 32
918 	char swtim_name[SWTIM_NAMESIZE];
919 	snprintf(swtim_name, SWTIM_NAMESIZE, "swtim_%"PRIu8,
920 			adapter->data->id);
921 	sw = rte_zmalloc_socket(swtim_name, sizeof(*sw), RTE_CACHE_LINE_SIZE,
922 			adapter->data->socket_id);
923 	if (sw == NULL) {
924 		EVTIM_LOG_ERR("failed to allocate space for private data");
925 		rte_errno = ENOMEM;
926 		return -1;
927 	}
928 
929 	/* Connect storage to adapter instance */
930 	adapter->data->adapter_priv = sw;
931 	sw->adapter = adapter;
932 
933 	sw->timer_tick_ns = adapter->data->conf.timer_tick_ns;
934 	sw->max_tmo_ns = adapter->data->conf.max_tmo_ns;
935 
936 	/* Create a timer pool */
937 	char pool_name[SWTIM_NAMESIZE];
938 	snprintf(pool_name, SWTIM_NAMESIZE, "swtim_pool_%"PRIu8,
939 		 adapter->data->id);
940 	/* Optimal mempool size is a power of 2 minus one */
941 	uint64_t nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
942 	int pool_size = nb_timers - 1;
943 	int cache_size = compute_msg_mempool_cache_size(
944 				adapter->data->conf.nb_timers, nb_timers);
945 	flags = 0; /* pool is multi-producer, multi-consumer */
946 	sw->tim_pool = rte_mempool_create(pool_name, pool_size,
947 			sizeof(struct rte_timer), cache_size, 0, NULL, NULL,
948 			NULL, NULL, adapter->data->socket_id, flags);
949 	if (sw->tim_pool == NULL) {
950 		EVTIM_LOG_ERR("failed to create timer object mempool");
951 		rte_errno = ENOMEM;
952 		goto free_alloc;
953 	}
954 
955 	/* Initialize the variables that track in-use timer lists */
956 	for (i = 0; i < RTE_MAX_LCORE; i++)
957 		sw->in_use[i].v = 0;
958 
959 	/* Initialize the timer subsystem and allocate timer data instance */
960 	ret = rte_timer_subsystem_init();
961 	if (ret < 0) {
962 		if (ret != -EALREADY) {
963 			EVTIM_LOG_ERR("failed to initialize timer subsystem");
964 			rte_errno = -ret;
965 			goto free_mempool;
966 		}
967 	}
968 
969 	ret = rte_timer_data_alloc(&sw->timer_data_id);
970 	if (ret < 0) {
971 		EVTIM_LOG_ERR("failed to allocate timer data instance");
972 		rte_errno = -ret;
973 		goto free_mempool;
974 	}
975 
976 	/* Initialize timer event buffer */
977 	event_buffer_init(&sw->buffer);
978 
979 	sw->adapter = adapter;
980 
981 	/* Register a service component to run adapter logic */
982 	memset(&service, 0, sizeof(service));
983 	snprintf(service.name, RTE_SERVICE_NAME_MAX,
984 		 "swtim_svc_%"PRIu8, adapter->data->id);
985 	service.socket_id = adapter->data->socket_id;
986 	service.callback = swtim_service_func;
987 	service.callback_userdata = adapter;
988 	service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
989 	ret = rte_service_component_register(&service, &sw->service_id);
990 	if (ret < 0) {
991 		EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
992 			      ": err = %d", service.name, sw->service_id,
993 			      ret);
994 
995 		rte_errno = ENOSPC;
996 		goto free_mempool;
997 	}
998 
999 	EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
1000 		      sw->service_id);
1001 
1002 	adapter->data->service_id = sw->service_id;
1003 	adapter->data->service_inited = 1;
1004 
1005 	return 0;
1006 free_mempool:
1007 	rte_mempool_free(sw->tim_pool);
1008 free_alloc:
1009 	rte_free(sw);
1010 	return -1;
1011 }
1012 
1013 static void
1014 swtim_free_tim(struct rte_timer *tim, void *arg)
1015 {
1016 	struct swtim *sw = arg;
1017 
1018 	rte_mempool_put(sw->tim_pool, tim);
1019 }
1020 
1021 /* Traverse the list of outstanding timers and put them back in the mempool
1022  * before freeing the adapter to avoid leaking the memory.
1023  */
1024 static int
1025 swtim_uninit(struct rte_event_timer_adapter *adapter)
1026 {
1027 	int ret;
1028 	struct swtim *sw = swtim_pmd_priv(adapter);
1029 
1030 	/* Free outstanding timers */
1031 	rte_timer_stop_all(sw->timer_data_id,
1032 			   (unsigned int *)(uintptr_t)sw->poll_lcores,
1033 			   sw->n_poll_lcores,
1034 			   swtim_free_tim,
1035 			   sw);
1036 
1037 	ret = rte_timer_data_dealloc(sw->timer_data_id);
1038 	if (ret < 0) {
1039 		EVTIM_LOG_ERR("failed to deallocate timer data instance");
1040 		return ret;
1041 	}
1042 
1043 	ret = rte_service_component_unregister(sw->service_id);
1044 	if (ret < 0) {
1045 		EVTIM_LOG_ERR("failed to unregister service component");
1046 		return ret;
1047 	}
1048 
1049 	rte_mempool_free(sw->tim_pool);
1050 	rte_free(sw);
1051 	adapter->data->adapter_priv = NULL;
1052 
1053 	return 0;
1054 }
1055 
1056 static inline int32_t
1057 get_mapped_count_for_service(uint32_t service_id)
1058 {
1059 	int32_t core_count, i, mapped_count = 0;
1060 	uint32_t lcore_arr[RTE_MAX_LCORE];
1061 
1062 	core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
1063 
1064 	for (i = 0; i < core_count; i++)
1065 		if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
1066 			mapped_count++;
1067 
1068 	return mapped_count;
1069 }
1070 
1071 static int
1072 swtim_start(const struct rte_event_timer_adapter *adapter)
1073 {
1074 	int mapped_count;
1075 	struct swtim *sw = swtim_pmd_priv(adapter);
1076 
1077 	/* Mapping the service to more than one service core can introduce
1078 	 * delays while one thread is waiting to acquire a lock, so only allow
1079 	 * one core to be mapped to the service.
1080 	 *
1081 	 * Note: the service could be modified such that it spreads cores to
1082 	 * poll over multiple service instances.
1083 	 */
1084 	mapped_count = get_mapped_count_for_service(sw->service_id);
1085 
1086 	if (mapped_count != 1)
1087 		return mapped_count < 1 ? -ENOENT : -ENOTSUP;
1088 
1089 	return rte_service_component_runstate_set(sw->service_id, 1);
1090 }
1091 
1092 static int
1093 swtim_stop(const struct rte_event_timer_adapter *adapter)
1094 {
1095 	int ret;
1096 	struct swtim *sw = swtim_pmd_priv(adapter);
1097 
1098 	ret = rte_service_component_runstate_set(sw->service_id, 0);
1099 	if (ret < 0)
1100 		return ret;
1101 
1102 	/* Wait for the service to complete its final iteration */
1103 	while (rte_service_may_be_active(sw->service_id))
1104 		rte_pause();
1105 
1106 	return 0;
1107 }
1108 
1109 static void
1110 swtim_get_info(const struct rte_event_timer_adapter *adapter,
1111 		struct rte_event_timer_adapter_info *adapter_info)
1112 {
1113 	struct swtim *sw = swtim_pmd_priv(adapter);
1114 	adapter_info->min_resolution_ns = sw->timer_tick_ns;
1115 	adapter_info->max_tmo_ns = sw->max_tmo_ns;
1116 }
1117 
1118 static int
1119 swtim_stats_get(const struct rte_event_timer_adapter *adapter,
1120 		struct rte_event_timer_adapter_stats *stats)
1121 {
1122 	struct swtim *sw = swtim_pmd_priv(adapter);
1123 	*stats = sw->stats; /* structure copy */
1124 	return 0;
1125 }
1126 
1127 static int
1128 swtim_stats_reset(const struct rte_event_timer_adapter *adapter)
1129 {
1130 	struct swtim *sw = swtim_pmd_priv(adapter);
1131 	memset(&sw->stats, 0, sizeof(sw->stats));
1132 	return 0;
1133 }
1134 
1135 static int
1136 swtim_remaining_ticks_get(const struct rte_event_timer_adapter *adapter,
1137 			  const struct rte_event_timer *evtim,
1138 			  uint64_t *ticks_remaining)
1139 {
1140 	uint64_t nsecs_per_adapter_tick, opaque, cycles_remaining;
1141 	enum rte_event_timer_state n_state;
1142 	double nsecs_per_cycle;
1143 	struct rte_timer *tim;
1144 	uint64_t cur_cycles;
1145 
1146 	/* Check that timer is armed */
1147 	n_state = rte_atomic_load_explicit(&evtim->state, rte_memory_order_acquire);
1148 	if (n_state != RTE_EVENT_TIMER_ARMED)
1149 		return -EINVAL;
1150 
1151 	opaque = evtim->impl_opaque[0];
1152 	tim = (struct rte_timer *)(uintptr_t)opaque;
1153 
1154 	cur_cycles = rte_get_timer_cycles();
1155 	if (cur_cycles > tim->expire) {
1156 		*ticks_remaining = 0;
1157 		return 0;
1158 	}
1159 
1160 	cycles_remaining = tim->expire - cur_cycles;
1161 	nsecs_per_cycle = (double)NSECPERSEC / rte_get_timer_hz();
1162 	nsecs_per_adapter_tick = adapter->data->conf.timer_tick_ns;
1163 
1164 	*ticks_remaining = (uint64_t)ceil((cycles_remaining * nsecs_per_cycle) /
1165 					  nsecs_per_adapter_tick);
1166 
1167 	return 0;
1168 }
1169 
1170 static uint16_t
1171 __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1172 		struct rte_event_timer **evtims,
1173 		uint16_t nb_evtims)
1174 {
1175 	int i, ret;
1176 	struct swtim *sw = swtim_pmd_priv(adapter);
1177 	uint32_t lcore_id = rte_lcore_id();
1178 	struct rte_timer *tim, *tims[nb_evtims];
1179 	uint64_t cycles;
1180 	int n_lcores;
1181 	/* Timer list for this lcore is not in use. */
1182 	uint16_t exp_state = 0;
1183 	enum rte_event_timer_state n_state;
1184 	enum rte_timer_type type = SINGLE;
1185 
1186 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1187 	/* Check that the service is running. */
1188 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1189 		rte_errno = EINVAL;
1190 		return 0;
1191 	}
1192 #endif
1193 
1194 	/* Adjust lcore_id if non-EAL thread. Arbitrarily pick the timer list of
1195 	 * the highest lcore to insert such timers into
1196 	 */
1197 	if (lcore_id == LCORE_ID_ANY)
1198 		lcore_id = RTE_MAX_LCORE - 1;
1199 
1200 	/* If this is the first time we're arming an event timer on this lcore,
1201 	 * mark this lcore as "in use"; this will cause the service
1202 	 * function to process the timer list that corresponds to this lcore.
1203 	 * The atomic compare-and-swap operation can prevent the race condition
1204 	 * on in_use flag between multiple non-EAL threads.
1205 	 */
1206 	if (unlikely(rte_atomic_compare_exchange_strong_explicit(&sw->in_use[lcore_id].v,
1207 			&exp_state, 1,
1208 			rte_memory_order_relaxed, rte_memory_order_relaxed))) {
1209 		EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll",
1210 			      lcore_id);
1211 		n_lcores = rte_atomic_fetch_add_explicit(&sw->n_poll_lcores, 1,
1212 					     rte_memory_order_relaxed);
1213 		rte_atomic_store_explicit(&sw->poll_lcores[n_lcores], lcore_id,
1214 				rte_memory_order_relaxed);
1215 	}
1216 
1217 	ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims,
1218 				   nb_evtims);
1219 	if (ret < 0) {
1220 		rte_errno = ENOSPC;
1221 		return 0;
1222 	}
1223 
1224 	/* update timer type for periodic adapter */
1225 	type = get_timer_type(adapter);
1226 
1227 	for (i = 0; i < nb_evtims; i++) {
1228 		n_state = rte_atomic_load_explicit(&evtims[i]->state, rte_memory_order_acquire);
1229 		if (n_state == RTE_EVENT_TIMER_ARMED) {
1230 			rte_errno = EALREADY;
1231 			break;
1232 		} else if (!(n_state == RTE_EVENT_TIMER_NOT_ARMED ||
1233 			     n_state == RTE_EVENT_TIMER_CANCELED)) {
1234 			rte_errno = EINVAL;
1235 			break;
1236 		}
1237 
1238 		if (unlikely(check_destination_event_queue(evtims[i],
1239 							   adapter) < 0)) {
1240 			rte_atomic_store_explicit(&evtims[i]->state,
1241 					RTE_EVENT_TIMER_ERROR,
1242 					rte_memory_order_relaxed);
1243 			rte_errno = EINVAL;
1244 			break;
1245 		}
1246 
1247 		tim = tims[i];
1248 		rte_timer_init(tim);
1249 
1250 		evtims[i]->impl_opaque[0] = (uintptr_t)tim;
1251 		evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
1252 
1253 		ret = get_timeout_cycles(evtims[i], adapter, &cycles);
1254 		if (unlikely(ret == -1)) {
1255 			rte_atomic_store_explicit(&evtims[i]->state,
1256 					RTE_EVENT_TIMER_ERROR_TOOLATE,
1257 					rte_memory_order_relaxed);
1258 			rte_errno = EINVAL;
1259 			break;
1260 		} else if (unlikely(ret == -2)) {
1261 			rte_atomic_store_explicit(&evtims[i]->state,
1262 					RTE_EVENT_TIMER_ERROR_TOOEARLY,
1263 					rte_memory_order_relaxed);
1264 			rte_errno = EINVAL;
1265 			break;
1266 		}
1267 
1268 		ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
1269 					  type, lcore_id, NULL, evtims[i]);
1270 		if (ret < 0) {
1271 			/* tim was in RUNNING or CONFIG state */
1272 			rte_atomic_store_explicit(&evtims[i]->state,
1273 					RTE_EVENT_TIMER_ERROR,
1274 					rte_memory_order_release);
1275 			break;
1276 		}
1277 
1278 		EVTIM_LOG_DBG("armed an event timer");
1279 		/* RELEASE ordering guarantees the adapter specific value
1280 		 * changes observed before the update of state.
1281 		 */
1282 		rte_atomic_store_explicit(&evtims[i]->state, RTE_EVENT_TIMER_ARMED,
1283 				rte_memory_order_release);
1284 	}
1285 
1286 	if (i < nb_evtims)
1287 		rte_mempool_put_bulk(sw->tim_pool,
1288 				     (void **)&tims[i], nb_evtims - i);
1289 
1290 	return i;
1291 }
1292 
1293 static uint16_t
1294 swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1295 		struct rte_event_timer **evtims,
1296 		uint16_t nb_evtims)
1297 {
1298 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
1299 }
1300 
1301 static uint16_t
1302 swtim_cancel_burst(const struct rte_event_timer_adapter *adapter,
1303 		   struct rte_event_timer **evtims,
1304 		   uint16_t nb_evtims)
1305 {
1306 	int i, ret;
1307 	struct rte_timer *timp;
1308 	uint64_t opaque;
1309 	struct swtim *sw = swtim_pmd_priv(adapter);
1310 	enum rte_event_timer_state n_state;
1311 
1312 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1313 	/* Check that the service is running. */
1314 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1315 		rte_errno = EINVAL;
1316 		return 0;
1317 	}
1318 #endif
1319 
1320 	for (i = 0; i < nb_evtims; i++) {
1321 		/* Don't modify the event timer state in these cases */
1322 		/* ACQUIRE ordering guarantees the access of implementation
1323 		 * specific opaque data under the correct state.
1324 		 */
1325 		n_state = rte_atomic_load_explicit(&evtims[i]->state, rte_memory_order_acquire);
1326 		if (n_state == RTE_EVENT_TIMER_CANCELED) {
1327 			rte_errno = EALREADY;
1328 			break;
1329 		} else if (n_state != RTE_EVENT_TIMER_ARMED) {
1330 			rte_errno = EINVAL;
1331 			break;
1332 		}
1333 
1334 		opaque = evtims[i]->impl_opaque[0];
1335 		timp = (struct rte_timer *)(uintptr_t)opaque;
1336 		RTE_ASSERT(timp != NULL);
1337 
1338 		ret = rte_timer_alt_stop(sw->timer_data_id, timp);
1339 		if (ret < 0) {
1340 			/* Timer is running or being configured */
1341 			rte_errno = EAGAIN;
1342 			break;
1343 		}
1344 
1345 		rte_mempool_put(sw->tim_pool, (void **)timp);
1346 
1347 		/* The RELEASE ordering here pairs with atomic ordering
1348 		 * to make sure the state update data observed between
1349 		 * threads.
1350 		 */
1351 		rte_atomic_store_explicit(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED,
1352 				rte_memory_order_release);
1353 	}
1354 
1355 	return i;
1356 }
1357 
1358 static uint16_t
1359 swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
1360 			 struct rte_event_timer **evtims,
1361 			 uint64_t timeout_ticks,
1362 			 uint16_t nb_evtims)
1363 {
1364 	int i;
1365 
1366 	for (i = 0; i < nb_evtims; i++)
1367 		evtims[i]->timeout_ticks = timeout_ticks;
1368 
1369 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
1370 }
1371 
1372 static const struct event_timer_adapter_ops swtim_ops = {
1373 	.init = swtim_init,
1374 	.uninit = swtim_uninit,
1375 	.start = swtim_start,
1376 	.stop = swtim_stop,
1377 	.get_info = swtim_get_info,
1378 	.stats_get = swtim_stats_get,
1379 	.stats_reset = swtim_stats_reset,
1380 	.arm_burst = swtim_arm_burst,
1381 	.arm_tmo_tick_burst = swtim_arm_tmo_tick_burst,
1382 	.cancel_burst = swtim_cancel_burst,
1383 	.remaining_ticks_get = swtim_remaining_ticks_get,
1384 };
1385 
1386 static int
1387 handle_ta_info(const char *cmd __rte_unused, const char *params,
1388 		struct rte_tel_data *d)
1389 {
1390 	struct rte_event_timer_adapter_info adapter_info;
1391 	struct rte_event_timer_adapter *adapter;
1392 	uint16_t adapter_id;
1393 	int ret;
1394 
1395 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1396 		return -1;
1397 
1398 	adapter_id = atoi(params);
1399 
1400 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
1401 		EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id);
1402 		return -EINVAL;
1403 	}
1404 
1405 	adapter = &adapters[adapter_id];
1406 
1407 	ret = rte_event_timer_adapter_get_info(adapter, &adapter_info);
1408 	if (ret < 0) {
1409 		EVTIM_LOG_ERR("Failed to get info for timer adapter id %u", adapter_id);
1410 		return ret;
1411 	}
1412 
1413 	rte_tel_data_start_dict(d);
1414 	rte_tel_data_add_dict_uint(d, "timer_adapter_id", adapter_id);
1415 	rte_tel_data_add_dict_uint(d, "min_resolution_ns",
1416 				   adapter_info.min_resolution_ns);
1417 	rte_tel_data_add_dict_uint(d, "max_tmo_ns", adapter_info.max_tmo_ns);
1418 	rte_tel_data_add_dict_uint(d, "event_dev_id",
1419 				   adapter_info.conf.event_dev_id);
1420 	rte_tel_data_add_dict_uint(d, "socket_id",
1421 				   adapter_info.conf.socket_id);
1422 	rte_tel_data_add_dict_uint(d, "clk_src", adapter_info.conf.clk_src);
1423 	rte_tel_data_add_dict_uint(d, "timer_tick_ns",
1424 				   adapter_info.conf.timer_tick_ns);
1425 	rte_tel_data_add_dict_uint(d, "nb_timers",
1426 				   adapter_info.conf.nb_timers);
1427 	rte_tel_data_add_dict_uint(d, "flags", adapter_info.conf.flags);
1428 
1429 	return 0;
1430 }
1431 
1432 static int
1433 handle_ta_stats(const char *cmd __rte_unused, const char *params,
1434 		struct rte_tel_data *d)
1435 {
1436 	struct rte_event_timer_adapter_stats stats;
1437 	struct rte_event_timer_adapter *adapter;
1438 	uint16_t adapter_id;
1439 	int ret;
1440 
1441 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1442 		return -1;
1443 
1444 	adapter_id = atoi(params);
1445 
1446 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
1447 		EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id);
1448 		return -EINVAL;
1449 	}
1450 
1451 	adapter = &adapters[adapter_id];
1452 
1453 	ret = rte_event_timer_adapter_stats_get(adapter, &stats);
1454 	if (ret < 0) {
1455 		EVTIM_LOG_ERR("Failed to get stats for timer adapter id %u", adapter_id);
1456 		return ret;
1457 	}
1458 
1459 	rte_tel_data_start_dict(d);
1460 	rte_tel_data_add_dict_uint(d, "timer_adapter_id", adapter_id);
1461 	rte_tel_data_add_dict_uint(d, "evtim_exp_count",
1462 				   stats.evtim_exp_count);
1463 	rte_tel_data_add_dict_uint(d, "ev_enq_count", stats.ev_enq_count);
1464 	rte_tel_data_add_dict_uint(d, "ev_inv_count", stats.ev_inv_count);
1465 	rte_tel_data_add_dict_uint(d, "evtim_retry_count",
1466 				   stats.evtim_retry_count);
1467 	rte_tel_data_add_dict_uint(d, "adapter_tick_count",
1468 				   stats.adapter_tick_count);
1469 
1470 	return 0;
1471 }
1472 
1473 RTE_INIT(ta_init_telemetry)
1474 {
1475 	rte_telemetry_register_cmd("/eventdev/ta_info",
1476 		handle_ta_info,
1477 		"Returns Timer adapter info. Parameter: Timer adapter id");
1478 
1479 	rte_telemetry_register_cmd("/eventdev/ta_stats",
1480 		handle_ta_stats,
1481 		"Returns Timer adapter stats. Parameter: Timer adapter id");
1482 }
1483