xref: /dpdk/lib/eventdev/rte_event_timer_adapter.c (revision da7e701151ea8b742d4c38ace3e4fefd1b4507fc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation.
3  * All rights reserved.
4  */
5 
6 #include <ctype.h>
7 #include <string.h>
8 #include <inttypes.h>
9 #include <stdbool.h>
10 #include <stdlib.h>
11 #include <math.h>
12 
13 #include <rte_memzone.h>
14 #include <rte_errno.h>
15 #include <rte_malloc.h>
16 #include <rte_mempool.h>
17 #include <rte_common.h>
18 #include <rte_timer.h>
19 #include <rte_service_component.h>
20 #include <rte_telemetry.h>
21 #include <rte_reciprocal.h>
22 
23 #include "event_timer_adapter_pmd.h"
24 #include "eventdev_pmd.h"
25 #include "rte_event_timer_adapter.h"
26 #include "rte_eventdev.h"
27 #include "eventdev_trace.h"
28 
29 #define DATA_MZ_NAME_MAX_LEN 64
30 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
31 
32 RTE_LOG_REGISTER_SUFFIX(evtim_logtype, adapter.timer, NOTICE);
33 RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
34 RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
35 
36 static struct rte_event_timer_adapter *adapters;
37 
38 static const struct event_timer_adapter_ops swtim_ops;
39 
40 #define EVTIM_LOG(level, logtype, ...) \
41 	rte_log(RTE_LOG_ ## level, logtype, \
42 		RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
43 			"\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
44 
45 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
46 
47 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
48 #define EVTIM_LOG_DBG(...) \
49 	EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
50 #define EVTIM_BUF_LOG_DBG(...) \
51 	EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
52 #define EVTIM_SVC_LOG_DBG(...) \
53 	EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
54 #else
55 #define EVTIM_LOG_DBG(...) (void)0
56 #define EVTIM_BUF_LOG_DBG(...) (void)0
57 #define EVTIM_SVC_LOG_DBG(...) (void)0
58 #endif
59 
60 static inline enum rte_timer_type
61 get_timer_type(const struct rte_event_timer_adapter *adapter)
62 {
63 	return (adapter->data->conf.flags &
64 			RTE_EVENT_TIMER_ADAPTER_F_PERIODIC) ?
65 			PERIODICAL : SINGLE;
66 }
67 
68 static int
69 default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
70 		     void *conf_arg)
71 {
72 	struct rte_event_timer_adapter *adapter;
73 	struct rte_eventdev *dev;
74 	struct rte_event_dev_config dev_conf;
75 	struct rte_event_port_conf *port_conf, def_port_conf = {0};
76 	int started;
77 	uint8_t port_id;
78 	uint8_t dev_id;
79 	int ret;
80 
81 	RTE_SET_USED(event_dev_id);
82 
83 	adapter = &adapters[id];
84 	dev = &rte_eventdevs[adapter->data->event_dev_id];
85 	dev_id = dev->data->dev_id;
86 	dev_conf = dev->data->dev_conf;
87 
88 	started = dev->data->dev_started;
89 	if (started)
90 		rte_event_dev_stop(dev_id);
91 
92 	port_id = dev_conf.nb_event_ports;
93 	if (conf_arg != NULL)
94 		port_conf = conf_arg;
95 	else {
96 		port_conf = &def_port_conf;
97 		ret = rte_event_port_default_conf_get(dev_id, (port_id - 1),
98 						      port_conf);
99 		if (ret < 0)
100 			return ret;
101 	}
102 
103 	dev_conf.nb_event_ports += 1;
104 	if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_SINGLE_LINK)
105 		dev_conf.nb_single_link_event_port_queues += 1;
106 
107 	ret = rte_event_dev_configure(dev_id, &dev_conf);
108 	if (ret < 0) {
109 		EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
110 		if (started)
111 			if (rte_event_dev_start(dev_id))
112 				return -EIO;
113 
114 		return ret;
115 	}
116 
117 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
118 	if (ret < 0) {
119 		EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
120 			      port_id, dev_id);
121 		return ret;
122 	}
123 
124 	*event_port_id = port_id;
125 
126 	if (started)
127 		ret = rte_event_dev_start(dev_id);
128 
129 	return ret;
130 }
131 
132 struct rte_event_timer_adapter *
133 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
134 {
135 	return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
136 						  NULL);
137 }
138 
139 struct rte_event_timer_adapter *
140 rte_event_timer_adapter_create_ext(
141 		const struct rte_event_timer_adapter_conf *conf,
142 		rte_event_timer_adapter_port_conf_cb_t conf_cb,
143 		void *conf_arg)
144 {
145 	uint16_t adapter_id;
146 	struct rte_event_timer_adapter *adapter;
147 	const struct rte_memzone *mz;
148 	char mz_name[DATA_MZ_NAME_MAX_LEN];
149 	int n, ret;
150 	struct rte_eventdev *dev;
151 
152 	if (adapters == NULL) {
153 		adapters = rte_zmalloc("Eventdev",
154 				       sizeof(struct rte_event_timer_adapter) *
155 					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
156 				       RTE_CACHE_LINE_SIZE);
157 		if (adapters == NULL) {
158 			rte_errno = ENOMEM;
159 			return NULL;
160 		}
161 	}
162 
163 	if (conf == NULL) {
164 		rte_errno = EINVAL;
165 		return NULL;
166 	}
167 
168 	/* Check eventdev ID */
169 	if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
170 		rte_errno = EINVAL;
171 		return NULL;
172 	}
173 	dev = &rte_eventdevs[conf->event_dev_id];
174 
175 	adapter_id = conf->timer_adapter_id;
176 
177 	/* Check that adapter_id is in range */
178 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
179 		rte_errno = EINVAL;
180 		return NULL;
181 	}
182 
183 	/* Check adapter ID not already allocated */
184 	adapter = &adapters[adapter_id];
185 	if (adapter->allocated) {
186 		rte_errno = EEXIST;
187 		return NULL;
188 	}
189 
190 	/* Create shared data area. */
191 	n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
192 	if (n >= (int)sizeof(mz_name)) {
193 		rte_errno = EINVAL;
194 		return NULL;
195 	}
196 	mz = rte_memzone_reserve(mz_name,
197 				 sizeof(struct rte_event_timer_adapter_data),
198 				 conf->socket_id, 0);
199 	if (mz == NULL)
200 		/* rte_errno set by rte_memzone_reserve */
201 		return NULL;
202 
203 	adapter->data = mz->addr;
204 	memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
205 
206 	adapter->data->mz = mz;
207 	adapter->data->event_dev_id = conf->event_dev_id;
208 	adapter->data->id = adapter_id;
209 	adapter->data->socket_id = conf->socket_id;
210 	adapter->data->conf = *conf;  /* copy conf structure */
211 
212 	/* Query eventdev PMD for timer adapter capabilities and ops */
213 	if (dev->dev_ops->timer_adapter_caps_get) {
214 		ret = dev->dev_ops->timer_adapter_caps_get(dev,
215 				adapter->data->conf.flags,
216 				&adapter->data->caps, &adapter->ops);
217 		if (ret < 0) {
218 			rte_errno = -ret;
219 			goto free_memzone;
220 		}
221 	}
222 
223 	if (!(adapter->data->caps &
224 	      RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
225 		FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, EINVAL);
226 		ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
227 			      &adapter->data->event_port_id, conf_arg);
228 		if (ret < 0) {
229 			rte_errno = -ret;
230 			goto free_memzone;
231 		}
232 	}
233 
234 	/* If eventdev PMD did not provide ops, use default software
235 	 * implementation.
236 	 */
237 	if (adapter->ops == NULL)
238 		adapter->ops = &swtim_ops;
239 
240 	/* Allow driver to do some setup */
241 	FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, ENOTSUP);
242 	ret = adapter->ops->init(adapter);
243 	if (ret < 0) {
244 		rte_errno = -ret;
245 		goto free_memzone;
246 	}
247 
248 	/* Set fast-path function pointers */
249 	adapter->arm_burst = adapter->ops->arm_burst;
250 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
251 	adapter->cancel_burst = adapter->ops->cancel_burst;
252 
253 	adapter->allocated = 1;
254 
255 	rte_eventdev_trace_timer_adapter_create(adapter_id, adapter, conf,
256 		conf_cb);
257 	return adapter;
258 
259 free_memzone:
260 	rte_memzone_free(adapter->data->mz);
261 	return NULL;
262 }
263 
264 int
265 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
266 		struct rte_event_timer_adapter_info *adapter_info)
267 {
268 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
269 
270 	if (adapter->ops->get_info)
271 		/* let driver set values it knows */
272 		adapter->ops->get_info(adapter, adapter_info);
273 
274 	/* Set common values */
275 	adapter_info->conf = adapter->data->conf;
276 	adapter_info->event_dev_port_id = adapter->data->event_port_id;
277 	adapter_info->caps = adapter->data->caps;
278 
279 	rte_eventdev_trace_timer_adapter_get_info(adapter, adapter_info);
280 
281 	return 0;
282 }
283 
284 int
285 rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
286 {
287 	int ret;
288 
289 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
290 	FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
291 
292 	if (adapter->data->started) {
293 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" already started",
294 			      adapter->data->id);
295 		return -EALREADY;
296 	}
297 
298 	ret = adapter->ops->start(adapter);
299 	if (ret < 0)
300 		return ret;
301 
302 	adapter->data->started = 1;
303 	rte_eventdev_trace_timer_adapter_start(adapter);
304 	return 0;
305 }
306 
307 int
308 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
309 {
310 	int ret;
311 
312 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
313 	FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
314 
315 	if (adapter->data->started == 0) {
316 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
317 			      adapter->data->id);
318 		return 0;
319 	}
320 
321 	ret = adapter->ops->stop(adapter);
322 	if (ret < 0)
323 		return ret;
324 
325 	adapter->data->started = 0;
326 	rte_eventdev_trace_timer_adapter_stop(adapter);
327 	return 0;
328 }
329 
330 struct rte_event_timer_adapter *
331 rte_event_timer_adapter_lookup(uint16_t adapter_id)
332 {
333 	char name[DATA_MZ_NAME_MAX_LEN];
334 	const struct rte_memzone *mz;
335 	struct rte_event_timer_adapter_data *data;
336 	struct rte_event_timer_adapter *adapter;
337 	int ret;
338 	struct rte_eventdev *dev;
339 
340 	if (adapters == NULL) {
341 		adapters = rte_zmalloc("Eventdev",
342 				       sizeof(struct rte_event_timer_adapter) *
343 					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
344 				       RTE_CACHE_LINE_SIZE);
345 		if (adapters == NULL) {
346 			rte_errno = ENOMEM;
347 			return NULL;
348 		}
349 	}
350 
351 	if (adapters[adapter_id].allocated)
352 		return &adapters[adapter_id]; /* Adapter is already loaded */
353 
354 	snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
355 	mz = rte_memzone_lookup(name);
356 	if (mz == NULL) {
357 		rte_errno = ENOENT;
358 		return NULL;
359 	}
360 
361 	data = mz->addr;
362 
363 	adapter = &adapters[data->id];
364 	adapter->data = data;
365 
366 	dev = &rte_eventdevs[adapter->data->event_dev_id];
367 
368 	/* Query eventdev PMD for timer adapter capabilities and ops */
369 	if (dev->dev_ops->timer_adapter_caps_get) {
370 		ret = dev->dev_ops->timer_adapter_caps_get(dev,
371 				adapter->data->conf.flags,
372 				&adapter->data->caps, &adapter->ops);
373 		if (ret < 0) {
374 			rte_errno = EINVAL;
375 			return NULL;
376 		}
377 	}
378 
379 	/* If eventdev PMD did not provide ops, use default software
380 	 * implementation.
381 	 */
382 	if (adapter->ops == NULL)
383 		adapter->ops = &swtim_ops;
384 
385 	/* Set fast-path function pointers */
386 	adapter->arm_burst = adapter->ops->arm_burst;
387 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
388 	adapter->cancel_burst = adapter->ops->cancel_burst;
389 
390 	adapter->allocated = 1;
391 
392 	rte_eventdev_trace_timer_adapter_lookup(adapter_id, adapter);
393 
394 	return adapter;
395 }
396 
397 int
398 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
399 {
400 	int i, ret;
401 
402 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
403 	FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
404 
405 	if (adapter->data->started == 1) {
406 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
407 			      "before freeing", adapter->data->id);
408 		return -EBUSY;
409 	}
410 
411 	/* free impl priv data */
412 	ret = adapter->ops->uninit(adapter);
413 	if (ret < 0)
414 		return ret;
415 
416 	/* free shared data area */
417 	ret = rte_memzone_free(adapter->data->mz);
418 	if (ret < 0)
419 		return ret;
420 
421 	adapter->data = NULL;
422 	adapter->allocated = 0;
423 
424 	ret = 0;
425 	for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++)
426 		if (adapters[i].allocated)
427 			ret = adapters[i].allocated;
428 
429 	if (!ret) {
430 		rte_free(adapters);
431 		adapters = NULL;
432 	}
433 
434 	rte_eventdev_trace_timer_adapter_free(adapter);
435 	return 0;
436 }
437 
438 int
439 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
440 				       uint32_t *service_id)
441 {
442 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
443 
444 	if (service_id == NULL)
445 		return -EINVAL;
446 
447 	if (adapter->data->service_inited && service_id != NULL)
448 		*service_id = adapter->data->service_id;
449 
450 	rte_eventdev_trace_timer_adapter_service_id_get(adapter, *service_id);
451 
452 	return adapter->data->service_inited ? 0 : -ESRCH;
453 }
454 
455 int
456 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
457 				  struct rte_event_timer_adapter_stats *stats)
458 {
459 	rte_eventdev_trace_timer_adapter_stats_get(adapter, stats);
460 
461 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
462 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
463 	if (stats == NULL)
464 		return -EINVAL;
465 
466 	return adapter->ops->stats_get(adapter, stats);
467 }
468 
469 int
470 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
471 {
472 	rte_eventdev_trace_timer_adapter_stats_reset(adapter);
473 
474 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
475 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
476 	return adapter->ops->stats_reset(adapter);
477 }
478 
479 int
480 rte_event_timer_remaining_ticks_get(
481 			const struct rte_event_timer_adapter *adapter,
482 			const struct rte_event_timer *evtim,
483 			uint64_t *ticks_remaining)
484 {
485 	rte_eventdev_trace_timer_remaining_ticks_get(adapter, evtim, ticks_remaining);
486 
487 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
488 	FUNC_PTR_OR_ERR_RET(adapter->ops->remaining_ticks_get, -ENOTSUP);
489 
490 	if (ticks_remaining == NULL)
491 		return -EINVAL;
492 
493 	return adapter->ops->remaining_ticks_get(adapter, evtim,
494 						 ticks_remaining);
495 }
496 
497 /*
498  * Software event timer adapter buffer helper functions
499  */
500 
501 #define NSECPERSEC 1E9
502 
503 /* Optimizations used to index into the buffer require that the buffer size
504  * be a power of 2.
505  */
506 #define EVENT_BUFFER_SZ 4096
507 #define EVENT_BUFFER_BATCHSZ 32
508 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
509 
510 #define EXP_TIM_BUF_SZ 128
511 
512 struct event_buffer {
513 	size_t head;
514 	size_t tail;
515 	struct rte_event events[EVENT_BUFFER_SZ];
516 } __rte_cache_aligned;
517 
518 static inline bool
519 event_buffer_full(struct event_buffer *bufp)
520 {
521 	return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
522 }
523 
524 static inline bool
525 event_buffer_batch_ready(struct event_buffer *bufp)
526 {
527 	return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
528 }
529 
530 static void
531 event_buffer_init(struct event_buffer *bufp)
532 {
533 	bufp->head = bufp->tail = 0;
534 	memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
535 }
536 
537 static int
538 event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
539 {
540 	size_t head_idx;
541 	struct rte_event *buf_eventp;
542 
543 	if (event_buffer_full(bufp))
544 		return -1;
545 
546 	/* Instead of modulus, bitwise AND with mask to get head_idx. */
547 	head_idx = bufp->head & EVENT_BUFFER_MASK;
548 	buf_eventp = &bufp->events[head_idx];
549 	rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
550 
551 	/* Wrap automatically when overflow occurs. */
552 	bufp->head++;
553 
554 	return 0;
555 }
556 
557 static void
558 event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
559 		   uint16_t *nb_events_flushed,
560 		   uint16_t *nb_events_inv)
561 {
562 	struct rte_event *events = bufp->events;
563 	size_t head_idx, tail_idx;
564 	uint16_t n = 0;
565 
566 	/* Instead of modulus, bitwise AND with mask to get index. */
567 	head_idx = bufp->head & EVENT_BUFFER_MASK;
568 	tail_idx = bufp->tail & EVENT_BUFFER_MASK;
569 
570 	RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ);
571 
572 	/* Determine the largest contiguous run we can attempt to enqueue to the
573 	 * event device.
574 	 */
575 	if (head_idx > tail_idx)
576 		n = head_idx - tail_idx;
577 	else if (head_idx < tail_idx)
578 		n = EVENT_BUFFER_SZ - tail_idx;
579 	else if (event_buffer_full(bufp))
580 		n = EVENT_BUFFER_SZ - tail_idx;
581 	else {
582 		*nb_events_flushed = 0;
583 		return;
584 	}
585 
586 	n = RTE_MIN(EVENT_BUFFER_BATCHSZ, n);
587 	*nb_events_inv = 0;
588 
589 	*nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
590 						     &events[tail_idx], n);
591 	if (*nb_events_flushed != n) {
592 		if (rte_errno == EINVAL) {
593 			EVTIM_LOG_ERR("failed to enqueue invalid event - "
594 				      "dropping it");
595 			(*nb_events_inv)++;
596 		} else if (rte_errno == ENOSPC)
597 			rte_pause();
598 	}
599 
600 	if (*nb_events_flushed > 0)
601 		EVTIM_BUF_LOG_DBG("enqueued %"PRIu16" timer events to event "
602 				  "device", *nb_events_flushed);
603 
604 	bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
605 }
606 
607 /*
608  * Software event timer adapter implementation
609  */
610 struct swtim {
611 	/* Identifier of service executing timer management logic. */
612 	uint32_t service_id;
613 	/* The cycle count at which the adapter should next tick */
614 	uint64_t next_tick_cycles;
615 	/* The tick resolution used by adapter instance. May have been
616 	 * adjusted from what user requested
617 	 */
618 	uint64_t timer_tick_ns;
619 	/* Maximum timeout in nanoseconds allowed by adapter instance. */
620 	uint64_t max_tmo_ns;
621 	/* Buffered timer expiry events to be enqueued to an event device. */
622 	struct event_buffer buffer;
623 	/* Statistics */
624 	struct rte_event_timer_adapter_stats stats;
625 	/* Mempool of timer objects */
626 	struct rte_mempool *tim_pool;
627 	/* Back pointer for convenience */
628 	struct rte_event_timer_adapter *adapter;
629 	/* Identifier of timer data instance */
630 	uint32_t timer_data_id;
631 	/* Track which cores have actually armed a timer */
632 	struct {
633 		RTE_ATOMIC(uint16_t) v;
634 	} __rte_cache_aligned in_use[RTE_MAX_LCORE];
635 	/* Track which cores' timer lists should be polled */
636 	RTE_ATOMIC(unsigned int) poll_lcores[RTE_MAX_LCORE];
637 	/* The number of lists that should be polled */
638 	RTE_ATOMIC(int) n_poll_lcores;
639 	/* Timers which have expired and can be returned to a mempool */
640 	struct rte_timer *expired_timers[EXP_TIM_BUF_SZ];
641 	/* The number of timers that can be returned to a mempool */
642 	size_t n_expired_timers;
643 };
644 
645 static inline struct swtim *
646 swtim_pmd_priv(const struct rte_event_timer_adapter *adapter)
647 {
648 	return adapter->data->adapter_priv;
649 }
650 
651 static void
652 swtim_callback(struct rte_timer *tim)
653 {
654 	struct rte_event_timer *evtim = tim->arg;
655 	struct rte_event_timer_adapter *adapter;
656 	unsigned int lcore = rte_lcore_id();
657 	struct swtim *sw;
658 	uint16_t nb_evs_flushed = 0;
659 	uint16_t nb_evs_invalid = 0;
660 	uint64_t opaque;
661 	int ret;
662 	int n_lcores;
663 	enum rte_timer_type type;
664 
665 	opaque = evtim->impl_opaque[1];
666 	adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
667 	sw = swtim_pmd_priv(adapter);
668 	type = get_timer_type(adapter);
669 
670 	if (unlikely(sw->in_use[lcore].v == 0)) {
671 		sw->in_use[lcore].v = 1;
672 		n_lcores = rte_atomic_fetch_add_explicit(&sw->n_poll_lcores, 1,
673 					     rte_memory_order_relaxed);
674 		rte_atomic_store_explicit(&sw->poll_lcores[n_lcores], lcore,
675 				rte_memory_order_relaxed);
676 	}
677 
678 	ret = event_buffer_add(&sw->buffer, &evtim->ev);
679 	if (ret < 0) {
680 		if (type == SINGLE) {
681 			/* If event buffer is full, put timer back in list with
682 			 * immediate expiry value, so that we process it again
683 			 * on the next iteration.
684 			 */
685 			ret = rte_timer_alt_reset(sw->timer_data_id, tim, 0,
686 						SINGLE,	lcore, NULL, evtim);
687 			if (ret < 0) {
688 				EVTIM_LOG_DBG("event buffer full, failed to "
689 						"reset timer with immediate "
690 						"expiry value");
691 			} else {
692 				sw->stats.evtim_retry_count++;
693 				EVTIM_LOG_DBG("event buffer full, resetting "
694 						"rte_timer with immediate "
695 						"expiry value");
696 			}
697 		} else {
698 			sw->stats.evtim_drop_count++;
699 		}
700 
701 	} else {
702 		EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
703 
704 		/* Empty the buffer here, if necessary, to free older expired
705 		 * timers only
706 		 */
707 		if (unlikely(sw->n_expired_timers == EXP_TIM_BUF_SZ)) {
708 			rte_mempool_put_bulk(sw->tim_pool,
709 					     (void **)sw->expired_timers,
710 					     sw->n_expired_timers);
711 			sw->n_expired_timers = 0;
712 		}
713 
714 		/* Don't free rte_timer for a periodic event timer until
715 		 * it is cancelled
716 		 */
717 		if (type == SINGLE)
718 			sw->expired_timers[sw->n_expired_timers++] = tim;
719 		sw->stats.evtim_exp_count++;
720 
721 		if (type == SINGLE)
722 			rte_atomic_store_explicit(&evtim->state, RTE_EVENT_TIMER_NOT_ARMED,
723 				rte_memory_order_release);
724 	}
725 
726 	if (event_buffer_batch_ready(&sw->buffer)) {
727 		event_buffer_flush(&sw->buffer,
728 				   adapter->data->event_dev_id,
729 				   adapter->data->event_port_id,
730 				   &nb_evs_flushed,
731 				   &nb_evs_invalid);
732 
733 		sw->stats.ev_enq_count += nb_evs_flushed;
734 		sw->stats.ev_inv_count += nb_evs_invalid;
735 	}
736 }
737 
738 static __rte_always_inline int
739 get_timeout_cycles(struct rte_event_timer *evtim,
740 		   const struct rte_event_timer_adapter *adapter,
741 		   uint64_t *timeout_cycles)
742 {
743 	static struct rte_reciprocal_u64 nsecpersec_inverse;
744 	static uint64_t timer_hz;
745 	uint64_t rem_cycles, secs_cycles = 0;
746 	uint64_t secs, timeout_nsecs;
747 	uint64_t nsecpersec;
748 	struct swtim *sw;
749 
750 	sw = swtim_pmd_priv(adapter);
751 	nsecpersec = (uint64_t)NSECPERSEC;
752 
753 	timeout_nsecs = evtim->timeout_ticks * sw->timer_tick_ns;
754 	if (timeout_nsecs > sw->max_tmo_ns)
755 		return -1;
756 	if (timeout_nsecs < sw->timer_tick_ns)
757 		return -2;
758 
759 	/* Set these values in the first invocation */
760 	if (!timer_hz) {
761 		timer_hz = rte_get_timer_hz();
762 		nsecpersec_inverse = rte_reciprocal_value_u64(nsecpersec);
763 	}
764 
765 	/* If timeout_nsecs > nsecpersec, decrease timeout_nsecs by the number
766 	 * of whole seconds it contains and convert that value to a number
767 	 * of cycles. This keeps timeout_nsecs in the interval [0..nsecpersec)
768 	 * in order to avoid overflow when we later multiply by timer_hz.
769 	 */
770 	if (timeout_nsecs > nsecpersec) {
771 		secs = rte_reciprocal_divide_u64(timeout_nsecs,
772 						 &nsecpersec_inverse);
773 		secs_cycles = secs * timer_hz;
774 		timeout_nsecs -= secs * nsecpersec;
775 	}
776 
777 	rem_cycles = rte_reciprocal_divide_u64(timeout_nsecs * timer_hz,
778 					       &nsecpersec_inverse);
779 
780 	*timeout_cycles = secs_cycles + rem_cycles;
781 
782 	return 0;
783 }
784 
785 /* This function returns true if one or more (adapter) ticks have occurred since
786  * the last time it was called.
787  */
788 static inline bool
789 swtim_did_tick(struct swtim *sw)
790 {
791 	uint64_t cycles_per_adapter_tick, start_cycles;
792 	uint64_t *next_tick_cyclesp;
793 
794 	next_tick_cyclesp = &sw->next_tick_cycles;
795 	cycles_per_adapter_tick = sw->timer_tick_ns *
796 			(rte_get_timer_hz() / NSECPERSEC);
797 	start_cycles = rte_get_timer_cycles();
798 
799 	/* Note: initially, *next_tick_cyclesp == 0, so the clause below will
800 	 * execute, and set things going.
801 	 */
802 
803 	if (start_cycles >= *next_tick_cyclesp) {
804 		/* Snap the current cycle count to the preceding adapter tick
805 		 * boundary.
806 		 */
807 		start_cycles -= start_cycles % cycles_per_adapter_tick;
808 		*next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
809 
810 		return true;
811 	}
812 
813 	return false;
814 }
815 
816 /* Check that event timer event queue sched type matches destination event queue
817  * sched type
818  */
819 static __rte_always_inline int
820 check_destination_event_queue(struct rte_event_timer *evtim,
821 			      const struct rte_event_timer_adapter *adapter)
822 {
823 	int ret;
824 	uint32_t sched_type;
825 
826 	ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
827 				       evtim->ev.queue_id,
828 				       RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
829 				       &sched_type);
830 
831 	if ((ret == 0 && evtim->ev.sched_type == sched_type) ||
832 	    ret == -EOVERFLOW)
833 		return 0;
834 
835 	return -1;
836 }
837 
838 static int
839 swtim_service_func(void *arg)
840 {
841 	struct rte_event_timer_adapter *adapter = arg;
842 	struct swtim *sw = swtim_pmd_priv(adapter);
843 	uint16_t nb_evs_flushed = 0;
844 	uint16_t nb_evs_invalid = 0;
845 	const uint64_t prior_enq_count = sw->stats.ev_enq_count;
846 
847 	if (swtim_did_tick(sw)) {
848 		rte_timer_alt_manage(sw->timer_data_id,
849 				     (unsigned int *)(uintptr_t)sw->poll_lcores,
850 				     sw->n_poll_lcores,
851 				     swtim_callback);
852 
853 		/* Return expired timer objects back to mempool */
854 		rte_mempool_put_bulk(sw->tim_pool, (void **)sw->expired_timers,
855 				     sw->n_expired_timers);
856 		sw->n_expired_timers = 0;
857 
858 		sw->stats.adapter_tick_count++;
859 	}
860 
861 	event_buffer_flush(&sw->buffer,
862 			   adapter->data->event_dev_id,
863 			   adapter->data->event_port_id,
864 			   &nb_evs_flushed,
865 			   &nb_evs_invalid);
866 
867 	sw->stats.ev_enq_count += nb_evs_flushed;
868 	sw->stats.ev_inv_count += nb_evs_invalid;
869 
870 	rte_event_maintain(adapter->data->event_dev_id,
871 			   adapter->data->event_port_id, 0);
872 
873 	return prior_enq_count == sw->stats.ev_enq_count ? -EAGAIN : 0;
874 }
875 
876 /* The adapter initialization function rounds the mempool size up to the next
877  * power of 2, so we can take the difference between that value and what the
878  * user requested, and use the space for caches.  This avoids a scenario where a
879  * user can't arm the number of timers the adapter was configured with because
880  * mempool objects have been lost to caches.
881  *
882  * nb_actual should always be a power of 2, so we can iterate over the powers
883  * of 2 to see what the largest cache size we can use is.
884  */
885 static int
886 compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
887 {
888 	int i;
889 	int size;
890 	int cache_size = 0;
891 
892 	for (i = 0;; i++) {
893 		size = 1 << i;
894 
895 		if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
896 		    size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
897 		    size <= nb_actual / 1.5)
898 			cache_size = size;
899 		else
900 			break;
901 	}
902 
903 	return cache_size;
904 }
905 
906 static int
907 swtim_init(struct rte_event_timer_adapter *adapter)
908 {
909 	int i, ret;
910 	struct swtim *sw;
911 	unsigned int flags;
912 	struct rte_service_spec service;
913 
914 	/* Allocate storage for private data area */
915 #define SWTIM_NAMESIZE 32
916 	char swtim_name[SWTIM_NAMESIZE];
917 	snprintf(swtim_name, SWTIM_NAMESIZE, "swtim_%"PRIu8,
918 			adapter->data->id);
919 	sw = rte_zmalloc_socket(swtim_name, sizeof(*sw), RTE_CACHE_LINE_SIZE,
920 			adapter->data->socket_id);
921 	if (sw == NULL) {
922 		EVTIM_LOG_ERR("failed to allocate space for private data");
923 		rte_errno = ENOMEM;
924 		return -1;
925 	}
926 
927 	/* Connect storage to adapter instance */
928 	adapter->data->adapter_priv = sw;
929 	sw->adapter = adapter;
930 
931 	sw->timer_tick_ns = adapter->data->conf.timer_tick_ns;
932 	sw->max_tmo_ns = adapter->data->conf.max_tmo_ns;
933 
934 	/* Create a timer pool */
935 	char pool_name[SWTIM_NAMESIZE];
936 	snprintf(pool_name, SWTIM_NAMESIZE, "swtim_pool_%"PRIu8,
937 		 adapter->data->id);
938 	/* Optimal mempool size is a power of 2 minus one */
939 	uint64_t nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
940 	int pool_size = nb_timers - 1;
941 	int cache_size = compute_msg_mempool_cache_size(
942 				adapter->data->conf.nb_timers, nb_timers);
943 	flags = 0; /* pool is multi-producer, multi-consumer */
944 	sw->tim_pool = rte_mempool_create(pool_name, pool_size,
945 			sizeof(struct rte_timer), cache_size, 0, NULL, NULL,
946 			NULL, NULL, adapter->data->socket_id, flags);
947 	if (sw->tim_pool == NULL) {
948 		EVTIM_LOG_ERR("failed to create timer object mempool");
949 		rte_errno = ENOMEM;
950 		goto free_alloc;
951 	}
952 
953 	/* Initialize the variables that track in-use timer lists */
954 	for (i = 0; i < RTE_MAX_LCORE; i++)
955 		sw->in_use[i].v = 0;
956 
957 	/* Initialize the timer subsystem and allocate timer data instance */
958 	ret = rte_timer_subsystem_init();
959 	if (ret < 0) {
960 		if (ret != -EALREADY) {
961 			EVTIM_LOG_ERR("failed to initialize timer subsystem");
962 			rte_errno = -ret;
963 			goto free_mempool;
964 		}
965 	}
966 
967 	ret = rte_timer_data_alloc(&sw->timer_data_id);
968 	if (ret < 0) {
969 		EVTIM_LOG_ERR("failed to allocate timer data instance");
970 		rte_errno = -ret;
971 		goto free_mempool;
972 	}
973 
974 	/* Initialize timer event buffer */
975 	event_buffer_init(&sw->buffer);
976 
977 	sw->adapter = adapter;
978 
979 	/* Register a service component to run adapter logic */
980 	memset(&service, 0, sizeof(service));
981 	snprintf(service.name, RTE_SERVICE_NAME_MAX,
982 		 "swtim_svc_%"PRIu8, adapter->data->id);
983 	service.socket_id = adapter->data->socket_id;
984 	service.callback = swtim_service_func;
985 	service.callback_userdata = adapter;
986 	service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
987 	ret = rte_service_component_register(&service, &sw->service_id);
988 	if (ret < 0) {
989 		EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
990 			      ": err = %d", service.name, sw->service_id,
991 			      ret);
992 
993 		rte_errno = ENOSPC;
994 		goto free_mempool;
995 	}
996 
997 	EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
998 		      sw->service_id);
999 
1000 	adapter->data->service_id = sw->service_id;
1001 	adapter->data->service_inited = 1;
1002 
1003 	return 0;
1004 free_mempool:
1005 	rte_mempool_free(sw->tim_pool);
1006 free_alloc:
1007 	rte_free(sw);
1008 	return -1;
1009 }
1010 
1011 static void
1012 swtim_free_tim(struct rte_timer *tim, void *arg)
1013 {
1014 	struct swtim *sw = arg;
1015 
1016 	rte_mempool_put(sw->tim_pool, tim);
1017 }
1018 
1019 /* Traverse the list of outstanding timers and put them back in the mempool
1020  * before freeing the adapter to avoid leaking the memory.
1021  */
1022 static int
1023 swtim_uninit(struct rte_event_timer_adapter *adapter)
1024 {
1025 	int ret;
1026 	struct swtim *sw = swtim_pmd_priv(adapter);
1027 
1028 	/* Free outstanding timers */
1029 	rte_timer_stop_all(sw->timer_data_id,
1030 			   (unsigned int *)(uintptr_t)sw->poll_lcores,
1031 			   sw->n_poll_lcores,
1032 			   swtim_free_tim,
1033 			   sw);
1034 
1035 	ret = rte_timer_data_dealloc(sw->timer_data_id);
1036 	if (ret < 0) {
1037 		EVTIM_LOG_ERR("failed to deallocate timer data instance");
1038 		return ret;
1039 	}
1040 
1041 	ret = rte_service_component_unregister(sw->service_id);
1042 	if (ret < 0) {
1043 		EVTIM_LOG_ERR("failed to unregister service component");
1044 		return ret;
1045 	}
1046 
1047 	rte_mempool_free(sw->tim_pool);
1048 	rte_free(sw);
1049 	adapter->data->adapter_priv = NULL;
1050 
1051 	return 0;
1052 }
1053 
1054 static inline int32_t
1055 get_mapped_count_for_service(uint32_t service_id)
1056 {
1057 	int32_t core_count, i, mapped_count = 0;
1058 	uint32_t lcore_arr[RTE_MAX_LCORE];
1059 
1060 	core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
1061 
1062 	for (i = 0; i < core_count; i++)
1063 		if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
1064 			mapped_count++;
1065 
1066 	return mapped_count;
1067 }
1068 
1069 static int
1070 swtim_start(const struct rte_event_timer_adapter *adapter)
1071 {
1072 	int mapped_count;
1073 	struct swtim *sw = swtim_pmd_priv(adapter);
1074 
1075 	/* Mapping the service to more than one service core can introduce
1076 	 * delays while one thread is waiting to acquire a lock, so only allow
1077 	 * one core to be mapped to the service.
1078 	 *
1079 	 * Note: the service could be modified such that it spreads cores to
1080 	 * poll over multiple service instances.
1081 	 */
1082 	mapped_count = get_mapped_count_for_service(sw->service_id);
1083 
1084 	if (mapped_count != 1)
1085 		return mapped_count < 1 ? -ENOENT : -ENOTSUP;
1086 
1087 	return rte_service_component_runstate_set(sw->service_id, 1);
1088 }
1089 
1090 static int
1091 swtim_stop(const struct rte_event_timer_adapter *adapter)
1092 {
1093 	int ret;
1094 	struct swtim *sw = swtim_pmd_priv(adapter);
1095 
1096 	ret = rte_service_component_runstate_set(sw->service_id, 0);
1097 	if (ret < 0)
1098 		return ret;
1099 
1100 	/* Wait for the service to complete its final iteration */
1101 	while (rte_service_may_be_active(sw->service_id))
1102 		rte_pause();
1103 
1104 	return 0;
1105 }
1106 
1107 static void
1108 swtim_get_info(const struct rte_event_timer_adapter *adapter,
1109 		struct rte_event_timer_adapter_info *adapter_info)
1110 {
1111 	struct swtim *sw = swtim_pmd_priv(adapter);
1112 	adapter_info->min_resolution_ns = sw->timer_tick_ns;
1113 	adapter_info->max_tmo_ns = sw->max_tmo_ns;
1114 }
1115 
1116 static int
1117 swtim_stats_get(const struct rte_event_timer_adapter *adapter,
1118 		struct rte_event_timer_adapter_stats *stats)
1119 {
1120 	struct swtim *sw = swtim_pmd_priv(adapter);
1121 	*stats = sw->stats; /* structure copy */
1122 	return 0;
1123 }
1124 
1125 static int
1126 swtim_stats_reset(const struct rte_event_timer_adapter *adapter)
1127 {
1128 	struct swtim *sw = swtim_pmd_priv(adapter);
1129 	memset(&sw->stats, 0, sizeof(sw->stats));
1130 	return 0;
1131 }
1132 
1133 static int
1134 swtim_remaining_ticks_get(const struct rte_event_timer_adapter *adapter,
1135 			  const struct rte_event_timer *evtim,
1136 			  uint64_t *ticks_remaining)
1137 {
1138 	uint64_t nsecs_per_adapter_tick, opaque, cycles_remaining;
1139 	enum rte_event_timer_state n_state;
1140 	double nsecs_per_cycle;
1141 	struct rte_timer *tim;
1142 	uint64_t cur_cycles;
1143 
1144 	/* Check that timer is armed */
1145 	n_state = rte_atomic_load_explicit(&evtim->state, rte_memory_order_acquire);
1146 	if (n_state != RTE_EVENT_TIMER_ARMED)
1147 		return -EINVAL;
1148 
1149 	opaque = evtim->impl_opaque[0];
1150 	tim = (struct rte_timer *)(uintptr_t)opaque;
1151 
1152 	cur_cycles = rte_get_timer_cycles();
1153 	if (cur_cycles > tim->expire) {
1154 		*ticks_remaining = 0;
1155 		return 0;
1156 	}
1157 
1158 	cycles_remaining = tim->expire - cur_cycles;
1159 	nsecs_per_cycle = (double)NSECPERSEC / rte_get_timer_hz();
1160 	nsecs_per_adapter_tick = adapter->data->conf.timer_tick_ns;
1161 
1162 	*ticks_remaining = (uint64_t)ceil((cycles_remaining * nsecs_per_cycle) /
1163 					  nsecs_per_adapter_tick);
1164 
1165 	return 0;
1166 }
1167 
1168 static uint16_t
1169 __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1170 		struct rte_event_timer **evtims,
1171 		uint16_t nb_evtims)
1172 {
1173 	int i, ret;
1174 	struct swtim *sw = swtim_pmd_priv(adapter);
1175 	uint32_t lcore_id = rte_lcore_id();
1176 	struct rte_timer *tim, *tims[nb_evtims];
1177 	uint64_t cycles;
1178 	int n_lcores;
1179 	/* Timer list for this lcore is not in use. */
1180 	uint16_t exp_state = 0;
1181 	enum rte_event_timer_state n_state;
1182 	enum rte_timer_type type = SINGLE;
1183 
1184 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1185 	/* Check that the service is running. */
1186 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1187 		rte_errno = EINVAL;
1188 		return 0;
1189 	}
1190 #endif
1191 
1192 	/* Adjust lcore_id if non-EAL thread. Arbitrarily pick the timer list of
1193 	 * the highest lcore to insert such timers into
1194 	 */
1195 	if (lcore_id == LCORE_ID_ANY)
1196 		lcore_id = RTE_MAX_LCORE - 1;
1197 
1198 	/* If this is the first time we're arming an event timer on this lcore,
1199 	 * mark this lcore as "in use"; this will cause the service
1200 	 * function to process the timer list that corresponds to this lcore.
1201 	 * The atomic compare-and-swap operation can prevent the race condition
1202 	 * on in_use flag between multiple non-EAL threads.
1203 	 */
1204 	if (unlikely(rte_atomic_compare_exchange_strong_explicit(&sw->in_use[lcore_id].v,
1205 			&exp_state, 1,
1206 			rte_memory_order_relaxed, rte_memory_order_relaxed))) {
1207 		EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll",
1208 			      lcore_id);
1209 		n_lcores = rte_atomic_fetch_add_explicit(&sw->n_poll_lcores, 1,
1210 					     rte_memory_order_relaxed);
1211 		rte_atomic_store_explicit(&sw->poll_lcores[n_lcores], lcore_id,
1212 				rte_memory_order_relaxed);
1213 	}
1214 
1215 	ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims,
1216 				   nb_evtims);
1217 	if (ret < 0) {
1218 		rte_errno = ENOSPC;
1219 		return 0;
1220 	}
1221 
1222 	/* update timer type for periodic adapter */
1223 	type = get_timer_type(adapter);
1224 
1225 	for (i = 0; i < nb_evtims; i++) {
1226 		n_state = rte_atomic_load_explicit(&evtims[i]->state, rte_memory_order_acquire);
1227 		if (n_state == RTE_EVENT_TIMER_ARMED) {
1228 			rte_errno = EALREADY;
1229 			break;
1230 		} else if (!(n_state == RTE_EVENT_TIMER_NOT_ARMED ||
1231 			     n_state == RTE_EVENT_TIMER_CANCELED)) {
1232 			rte_errno = EINVAL;
1233 			break;
1234 		}
1235 
1236 		if (unlikely(check_destination_event_queue(evtims[i],
1237 							   adapter) < 0)) {
1238 			rte_atomic_store_explicit(&evtims[i]->state,
1239 					RTE_EVENT_TIMER_ERROR,
1240 					rte_memory_order_relaxed);
1241 			rte_errno = EINVAL;
1242 			break;
1243 		}
1244 
1245 		tim = tims[i];
1246 		rte_timer_init(tim);
1247 
1248 		evtims[i]->impl_opaque[0] = (uintptr_t)tim;
1249 		evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
1250 
1251 		ret = get_timeout_cycles(evtims[i], adapter, &cycles);
1252 		if (unlikely(ret == -1)) {
1253 			rte_atomic_store_explicit(&evtims[i]->state,
1254 					RTE_EVENT_TIMER_ERROR_TOOLATE,
1255 					rte_memory_order_relaxed);
1256 			rte_errno = EINVAL;
1257 			break;
1258 		} else if (unlikely(ret == -2)) {
1259 			rte_atomic_store_explicit(&evtims[i]->state,
1260 					RTE_EVENT_TIMER_ERROR_TOOEARLY,
1261 					rte_memory_order_relaxed);
1262 			rte_errno = EINVAL;
1263 			break;
1264 		}
1265 
1266 		ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
1267 					  type, lcore_id, NULL, evtims[i]);
1268 		if (ret < 0) {
1269 			/* tim was in RUNNING or CONFIG state */
1270 			rte_atomic_store_explicit(&evtims[i]->state,
1271 					RTE_EVENT_TIMER_ERROR,
1272 					rte_memory_order_release);
1273 			break;
1274 		}
1275 
1276 		EVTIM_LOG_DBG("armed an event timer");
1277 		/* RELEASE ordering guarantees the adapter specific value
1278 		 * changes observed before the update of state.
1279 		 */
1280 		rte_atomic_store_explicit(&evtims[i]->state, RTE_EVENT_TIMER_ARMED,
1281 				rte_memory_order_release);
1282 	}
1283 
1284 	if (i < nb_evtims)
1285 		rte_mempool_put_bulk(sw->tim_pool,
1286 				     (void **)&tims[i], nb_evtims - i);
1287 
1288 	return i;
1289 }
1290 
1291 static uint16_t
1292 swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1293 		struct rte_event_timer **evtims,
1294 		uint16_t nb_evtims)
1295 {
1296 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
1297 }
1298 
1299 static uint16_t
1300 swtim_cancel_burst(const struct rte_event_timer_adapter *adapter,
1301 		   struct rte_event_timer **evtims,
1302 		   uint16_t nb_evtims)
1303 {
1304 	int i, ret;
1305 	struct rte_timer *timp;
1306 	uint64_t opaque;
1307 	struct swtim *sw = swtim_pmd_priv(adapter);
1308 	enum rte_event_timer_state n_state;
1309 
1310 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1311 	/* Check that the service is running. */
1312 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1313 		rte_errno = EINVAL;
1314 		return 0;
1315 	}
1316 #endif
1317 
1318 	for (i = 0; i < nb_evtims; i++) {
1319 		/* Don't modify the event timer state in these cases */
1320 		/* ACQUIRE ordering guarantees the access of implementation
1321 		 * specific opaque data under the correct state.
1322 		 */
1323 		n_state = rte_atomic_load_explicit(&evtims[i]->state, rte_memory_order_acquire);
1324 		if (n_state == RTE_EVENT_TIMER_CANCELED) {
1325 			rte_errno = EALREADY;
1326 			break;
1327 		} else if (n_state != RTE_EVENT_TIMER_ARMED) {
1328 			rte_errno = EINVAL;
1329 			break;
1330 		}
1331 
1332 		opaque = evtims[i]->impl_opaque[0];
1333 		timp = (struct rte_timer *)(uintptr_t)opaque;
1334 		RTE_ASSERT(timp != NULL);
1335 
1336 		ret = rte_timer_alt_stop(sw->timer_data_id, timp);
1337 		if (ret < 0) {
1338 			/* Timer is running or being configured */
1339 			rte_errno = EAGAIN;
1340 			break;
1341 		}
1342 
1343 		rte_mempool_put(sw->tim_pool, (void **)timp);
1344 
1345 		/* The RELEASE ordering here pairs with atomic ordering
1346 		 * to make sure the state update data observed between
1347 		 * threads.
1348 		 */
1349 		rte_atomic_store_explicit(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED,
1350 				rte_memory_order_release);
1351 	}
1352 
1353 	return i;
1354 }
1355 
1356 static uint16_t
1357 swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
1358 			 struct rte_event_timer **evtims,
1359 			 uint64_t timeout_ticks,
1360 			 uint16_t nb_evtims)
1361 {
1362 	int i;
1363 
1364 	for (i = 0; i < nb_evtims; i++)
1365 		evtims[i]->timeout_ticks = timeout_ticks;
1366 
1367 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
1368 }
1369 
1370 static const struct event_timer_adapter_ops swtim_ops = {
1371 	.init = swtim_init,
1372 	.uninit = swtim_uninit,
1373 	.start = swtim_start,
1374 	.stop = swtim_stop,
1375 	.get_info = swtim_get_info,
1376 	.stats_get = swtim_stats_get,
1377 	.stats_reset = swtim_stats_reset,
1378 	.arm_burst = swtim_arm_burst,
1379 	.arm_tmo_tick_burst = swtim_arm_tmo_tick_burst,
1380 	.cancel_burst = swtim_cancel_burst,
1381 	.remaining_ticks_get = swtim_remaining_ticks_get,
1382 };
1383 
1384 static int
1385 handle_ta_info(const char *cmd __rte_unused, const char *params,
1386 		struct rte_tel_data *d)
1387 {
1388 	struct rte_event_timer_adapter_info adapter_info;
1389 	struct rte_event_timer_adapter *adapter;
1390 	uint16_t adapter_id;
1391 	int ret;
1392 
1393 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1394 		return -1;
1395 
1396 	adapter_id = atoi(params);
1397 
1398 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
1399 		EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id);
1400 		return -EINVAL;
1401 	}
1402 
1403 	adapter = &adapters[adapter_id];
1404 
1405 	ret = rte_event_timer_adapter_get_info(adapter, &adapter_info);
1406 	if (ret < 0) {
1407 		EVTIM_LOG_ERR("Failed to get info for timer adapter id %u", adapter_id);
1408 		return ret;
1409 	}
1410 
1411 	rte_tel_data_start_dict(d);
1412 	rte_tel_data_add_dict_uint(d, "timer_adapter_id", adapter_id);
1413 	rte_tel_data_add_dict_uint(d, "min_resolution_ns",
1414 				   adapter_info.min_resolution_ns);
1415 	rte_tel_data_add_dict_uint(d, "max_tmo_ns", adapter_info.max_tmo_ns);
1416 	rte_tel_data_add_dict_uint(d, "event_dev_id",
1417 				   adapter_info.conf.event_dev_id);
1418 	rte_tel_data_add_dict_uint(d, "socket_id",
1419 				   adapter_info.conf.socket_id);
1420 	rte_tel_data_add_dict_uint(d, "clk_src", adapter_info.conf.clk_src);
1421 	rte_tel_data_add_dict_uint(d, "timer_tick_ns",
1422 				   adapter_info.conf.timer_tick_ns);
1423 	rte_tel_data_add_dict_uint(d, "nb_timers",
1424 				   adapter_info.conf.nb_timers);
1425 	rte_tel_data_add_dict_uint(d, "flags", adapter_info.conf.flags);
1426 
1427 	return 0;
1428 }
1429 
1430 static int
1431 handle_ta_stats(const char *cmd __rte_unused, const char *params,
1432 		struct rte_tel_data *d)
1433 {
1434 	struct rte_event_timer_adapter_stats stats;
1435 	struct rte_event_timer_adapter *adapter;
1436 	uint16_t adapter_id;
1437 	int ret;
1438 
1439 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1440 		return -1;
1441 
1442 	adapter_id = atoi(params);
1443 
1444 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
1445 		EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id);
1446 		return -EINVAL;
1447 	}
1448 
1449 	adapter = &adapters[adapter_id];
1450 
1451 	ret = rte_event_timer_adapter_stats_get(adapter, &stats);
1452 	if (ret < 0) {
1453 		EVTIM_LOG_ERR("Failed to get stats for timer adapter id %u", adapter_id);
1454 		return ret;
1455 	}
1456 
1457 	rte_tel_data_start_dict(d);
1458 	rte_tel_data_add_dict_uint(d, "timer_adapter_id", adapter_id);
1459 	rte_tel_data_add_dict_uint(d, "evtim_exp_count",
1460 				   stats.evtim_exp_count);
1461 	rte_tel_data_add_dict_uint(d, "ev_enq_count", stats.ev_enq_count);
1462 	rte_tel_data_add_dict_uint(d, "ev_inv_count", stats.ev_inv_count);
1463 	rte_tel_data_add_dict_uint(d, "evtim_retry_count",
1464 				   stats.evtim_retry_count);
1465 	rte_tel_data_add_dict_uint(d, "adapter_tick_count",
1466 				   stats.adapter_tick_count);
1467 
1468 	return 0;
1469 }
1470 
1471 RTE_INIT(ta_init_telemetry)
1472 {
1473 	rte_telemetry_register_cmd("/eventdev/ta_info",
1474 		handle_ta_info,
1475 		"Returns Timer adapter info. Parameter: Timer adapter id");
1476 
1477 	rte_telemetry_register_cmd("/eventdev/ta_stats",
1478 		handle_ta_stats,
1479 		"Returns Timer adapter stats. Parameter: Timer adapter id");
1480 }
1481