xref: /dpdk/lib/eventdev/rte_event_timer_adapter.c (revision bc84182d6ae809d131467516056c7ea98bb4d961)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation.
3  * All rights reserved.
4  */
5 
6 #include <ctype.h>
7 #include <string.h>
8 #include <inttypes.h>
9 #include <stdbool.h>
10 #include <stdlib.h>
11 #include <math.h>
12 
13 #include <rte_memzone.h>
14 #include <rte_errno.h>
15 #include <rte_malloc.h>
16 #include <rte_mempool.h>
17 #include <rte_common.h>
18 #include <rte_timer.h>
19 #include <rte_service_component.h>
20 #include <rte_telemetry.h>
21 
22 #include "event_timer_adapter_pmd.h"
23 #include "eventdev_pmd.h"
24 #include "rte_event_timer_adapter.h"
25 #include "rte_eventdev.h"
26 #include "eventdev_trace.h"
27 
28 #define DATA_MZ_NAME_MAX_LEN 64
29 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
30 
31 RTE_LOG_REGISTER_SUFFIX(evtim_logtype, adapter.timer, NOTICE);
32 RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
33 RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
34 
35 static struct rte_event_timer_adapter *adapters;
36 
37 static const struct event_timer_adapter_ops swtim_ops;
38 
39 #define EVTIM_LOG(level, logtype, ...) \
40 	rte_log(RTE_LOG_ ## level, logtype, \
41 		RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
42 			"\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
43 
44 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
45 
46 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
47 #define EVTIM_LOG_DBG(...) \
48 	EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
49 #define EVTIM_BUF_LOG_DBG(...) \
50 	EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
51 #define EVTIM_SVC_LOG_DBG(...) \
52 	EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
53 #else
54 #define EVTIM_LOG_DBG(...) (void)0
55 #define EVTIM_BUF_LOG_DBG(...) (void)0
56 #define EVTIM_SVC_LOG_DBG(...) (void)0
57 #endif
58 
59 static inline enum rte_timer_type
60 get_timer_type(const struct rte_event_timer_adapter *adapter)
61 {
62 	return (adapter->data->conf.flags &
63 			RTE_EVENT_TIMER_ADAPTER_F_PERIODIC) ?
64 			PERIODICAL : SINGLE;
65 }
66 
67 static int
68 default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
69 		     void *conf_arg)
70 {
71 	struct rte_event_timer_adapter *adapter;
72 	struct rte_eventdev *dev;
73 	struct rte_event_dev_config dev_conf;
74 	struct rte_event_port_conf *port_conf, def_port_conf = {0};
75 	int started;
76 	uint8_t port_id;
77 	uint8_t dev_id;
78 	int ret;
79 
80 	RTE_SET_USED(event_dev_id);
81 
82 	adapter = &adapters[id];
83 	dev = &rte_eventdevs[adapter->data->event_dev_id];
84 	dev_id = dev->data->dev_id;
85 	dev_conf = dev->data->dev_conf;
86 
87 	started = dev->data->dev_started;
88 	if (started)
89 		rte_event_dev_stop(dev_id);
90 
91 	port_id = dev_conf.nb_event_ports;
92 	if (conf_arg != NULL)
93 		port_conf = conf_arg;
94 	else {
95 		port_conf = &def_port_conf;
96 		ret = rte_event_port_default_conf_get(dev_id, (port_id - 1),
97 						      port_conf);
98 		if (ret < 0)
99 			return ret;
100 	}
101 
102 	dev_conf.nb_event_ports += 1;
103 	if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_SINGLE_LINK)
104 		dev_conf.nb_single_link_event_port_queues += 1;
105 
106 	ret = rte_event_dev_configure(dev_id, &dev_conf);
107 	if (ret < 0) {
108 		EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
109 		if (started)
110 			if (rte_event_dev_start(dev_id))
111 				return -EIO;
112 
113 		return ret;
114 	}
115 
116 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
117 	if (ret < 0) {
118 		EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
119 			      port_id, dev_id);
120 		return ret;
121 	}
122 
123 	*event_port_id = port_id;
124 
125 	if (started)
126 		ret = rte_event_dev_start(dev_id);
127 
128 	return ret;
129 }
130 
131 struct rte_event_timer_adapter *
132 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
133 {
134 	return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
135 						  NULL);
136 }
137 
138 struct rte_event_timer_adapter *
139 rte_event_timer_adapter_create_ext(
140 		const struct rte_event_timer_adapter_conf *conf,
141 		rte_event_timer_adapter_port_conf_cb_t conf_cb,
142 		void *conf_arg)
143 {
144 	uint16_t adapter_id;
145 	struct rte_event_timer_adapter *adapter;
146 	const struct rte_memzone *mz;
147 	char mz_name[DATA_MZ_NAME_MAX_LEN];
148 	int n, ret;
149 	struct rte_eventdev *dev;
150 
151 	if (adapters == NULL) {
152 		adapters = rte_zmalloc("Eventdev",
153 				       sizeof(struct rte_event_timer_adapter) *
154 					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
155 				       RTE_CACHE_LINE_SIZE);
156 		if (adapters == NULL) {
157 			rte_errno = ENOMEM;
158 			return NULL;
159 		}
160 	}
161 
162 	if (conf == NULL) {
163 		rte_errno = EINVAL;
164 		return NULL;
165 	}
166 
167 	/* Check eventdev ID */
168 	if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
169 		rte_errno = EINVAL;
170 		return NULL;
171 	}
172 	dev = &rte_eventdevs[conf->event_dev_id];
173 
174 	adapter_id = conf->timer_adapter_id;
175 
176 	/* Check that adapter_id is in range */
177 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
178 		rte_errno = EINVAL;
179 		return NULL;
180 	}
181 
182 	/* Check adapter ID not already allocated */
183 	adapter = &adapters[adapter_id];
184 	if (adapter->allocated) {
185 		rte_errno = EEXIST;
186 		return NULL;
187 	}
188 
189 	/* Create shared data area. */
190 	n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
191 	if (n >= (int)sizeof(mz_name)) {
192 		rte_errno = EINVAL;
193 		return NULL;
194 	}
195 	mz = rte_memzone_reserve(mz_name,
196 				 sizeof(struct rte_event_timer_adapter_data),
197 				 conf->socket_id, 0);
198 	if (mz == NULL)
199 		/* rte_errno set by rte_memzone_reserve */
200 		return NULL;
201 
202 	adapter->data = mz->addr;
203 	memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
204 
205 	adapter->data->mz = mz;
206 	adapter->data->event_dev_id = conf->event_dev_id;
207 	adapter->data->id = adapter_id;
208 	adapter->data->socket_id = conf->socket_id;
209 	adapter->data->conf = *conf;  /* copy conf structure */
210 
211 	/* Query eventdev PMD for timer adapter capabilities and ops */
212 	if (dev->dev_ops->timer_adapter_caps_get) {
213 		ret = dev->dev_ops->timer_adapter_caps_get(dev,
214 				adapter->data->conf.flags,
215 				&adapter->data->caps, &adapter->ops);
216 		if (ret < 0) {
217 			rte_errno = -ret;
218 			goto free_memzone;
219 		}
220 	}
221 
222 	if (!(adapter->data->caps &
223 	      RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
224 		FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, EINVAL);
225 		ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
226 			      &adapter->data->event_port_id, conf_arg);
227 		if (ret < 0) {
228 			rte_errno = -ret;
229 			goto free_memzone;
230 		}
231 	}
232 
233 	/* If eventdev PMD did not provide ops, use default software
234 	 * implementation.
235 	 */
236 	if (adapter->ops == NULL)
237 		adapter->ops = &swtim_ops;
238 
239 	/* Allow driver to do some setup */
240 	FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, ENOTSUP);
241 	ret = adapter->ops->init(adapter);
242 	if (ret < 0) {
243 		rte_errno = -ret;
244 		goto free_memzone;
245 	}
246 
247 	/* Set fast-path function pointers */
248 	adapter->arm_burst = adapter->ops->arm_burst;
249 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
250 	adapter->cancel_burst = adapter->ops->cancel_burst;
251 
252 	adapter->allocated = 1;
253 
254 	rte_eventdev_trace_timer_adapter_create(adapter_id, adapter, conf,
255 		conf_cb);
256 	return adapter;
257 
258 free_memzone:
259 	rte_memzone_free(adapter->data->mz);
260 	return NULL;
261 }
262 
263 int
264 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
265 		struct rte_event_timer_adapter_info *adapter_info)
266 {
267 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
268 
269 	if (adapter->ops->get_info)
270 		/* let driver set values it knows */
271 		adapter->ops->get_info(adapter, adapter_info);
272 
273 	/* Set common values */
274 	adapter_info->conf = adapter->data->conf;
275 	adapter_info->event_dev_port_id = adapter->data->event_port_id;
276 	adapter_info->caps = adapter->data->caps;
277 
278 	rte_eventdev_trace_timer_adapter_get_info(adapter, adapter_info);
279 
280 	return 0;
281 }
282 
283 int
284 rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
285 {
286 	int ret;
287 
288 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
289 	FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
290 
291 	if (adapter->data->started) {
292 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" already started",
293 			      adapter->data->id);
294 		return -EALREADY;
295 	}
296 
297 	ret = adapter->ops->start(adapter);
298 	if (ret < 0)
299 		return ret;
300 
301 	adapter->data->started = 1;
302 	rte_eventdev_trace_timer_adapter_start(adapter);
303 	return 0;
304 }
305 
306 int
307 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
308 {
309 	int ret;
310 
311 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
312 	FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
313 
314 	if (adapter->data->started == 0) {
315 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
316 			      adapter->data->id);
317 		return 0;
318 	}
319 
320 	ret = adapter->ops->stop(adapter);
321 	if (ret < 0)
322 		return ret;
323 
324 	adapter->data->started = 0;
325 	rte_eventdev_trace_timer_adapter_stop(adapter);
326 	return 0;
327 }
328 
329 struct rte_event_timer_adapter *
330 rte_event_timer_adapter_lookup(uint16_t adapter_id)
331 {
332 	char name[DATA_MZ_NAME_MAX_LEN];
333 	const struct rte_memzone *mz;
334 	struct rte_event_timer_adapter_data *data;
335 	struct rte_event_timer_adapter *adapter;
336 	int ret;
337 	struct rte_eventdev *dev;
338 
339 	if (adapters == NULL) {
340 		adapters = rte_zmalloc("Eventdev",
341 				       sizeof(struct rte_event_timer_adapter) *
342 					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
343 				       RTE_CACHE_LINE_SIZE);
344 		if (adapters == NULL) {
345 			rte_errno = ENOMEM;
346 			return NULL;
347 		}
348 	}
349 
350 	if (adapters[adapter_id].allocated)
351 		return &adapters[adapter_id]; /* Adapter is already loaded */
352 
353 	snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
354 	mz = rte_memzone_lookup(name);
355 	if (mz == NULL) {
356 		rte_errno = ENOENT;
357 		return NULL;
358 	}
359 
360 	data = mz->addr;
361 
362 	adapter = &adapters[data->id];
363 	adapter->data = data;
364 
365 	dev = &rte_eventdevs[adapter->data->event_dev_id];
366 
367 	/* Query eventdev PMD for timer adapter capabilities and ops */
368 	if (dev->dev_ops->timer_adapter_caps_get) {
369 		ret = dev->dev_ops->timer_adapter_caps_get(dev,
370 				adapter->data->conf.flags,
371 				&adapter->data->caps, &adapter->ops);
372 		if (ret < 0) {
373 			rte_errno = EINVAL;
374 			return NULL;
375 		}
376 	}
377 
378 	/* If eventdev PMD did not provide ops, use default software
379 	 * implementation.
380 	 */
381 	if (adapter->ops == NULL)
382 		adapter->ops = &swtim_ops;
383 
384 	/* Set fast-path function pointers */
385 	adapter->arm_burst = adapter->ops->arm_burst;
386 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
387 	adapter->cancel_burst = adapter->ops->cancel_burst;
388 
389 	adapter->allocated = 1;
390 
391 	rte_eventdev_trace_timer_adapter_lookup(adapter_id, adapter);
392 
393 	return adapter;
394 }
395 
396 int
397 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
398 {
399 	int i, ret;
400 
401 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
402 	FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
403 
404 	if (adapter->data->started == 1) {
405 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
406 			      "before freeing", adapter->data->id);
407 		return -EBUSY;
408 	}
409 
410 	/* free impl priv data */
411 	ret = adapter->ops->uninit(adapter);
412 	if (ret < 0)
413 		return ret;
414 
415 	/* free shared data area */
416 	ret = rte_memzone_free(adapter->data->mz);
417 	if (ret < 0)
418 		return ret;
419 
420 	adapter->data = NULL;
421 	adapter->allocated = 0;
422 
423 	ret = 0;
424 	for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++)
425 		if (adapters[i].allocated)
426 			ret = adapters[i].allocated;
427 
428 	if (!ret) {
429 		rte_free(adapters);
430 		adapters = NULL;
431 	}
432 
433 	rte_eventdev_trace_timer_adapter_free(adapter);
434 	return 0;
435 }
436 
437 int
438 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
439 				       uint32_t *service_id)
440 {
441 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
442 
443 	if (service_id == NULL)
444 		return -EINVAL;
445 
446 	if (adapter->data->service_inited && service_id != NULL)
447 		*service_id = adapter->data->service_id;
448 
449 	rte_eventdev_trace_timer_adapter_service_id_get(adapter, *service_id);
450 
451 	return adapter->data->service_inited ? 0 : -ESRCH;
452 }
453 
454 int
455 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
456 				  struct rte_event_timer_adapter_stats *stats)
457 {
458 	rte_eventdev_trace_timer_adapter_stats_get(adapter, stats);
459 
460 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
461 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
462 	if (stats == NULL)
463 		return -EINVAL;
464 
465 	return adapter->ops->stats_get(adapter, stats);
466 }
467 
468 int
469 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
470 {
471 	rte_eventdev_trace_timer_adapter_stats_reset(adapter);
472 
473 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
474 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
475 	return adapter->ops->stats_reset(adapter);
476 }
477 
478 int
479 rte_event_timer_remaining_ticks_get(
480 			const struct rte_event_timer_adapter *adapter,
481 			const struct rte_event_timer *evtim,
482 			uint64_t *ticks_remaining)
483 {
484 	rte_eventdev_trace_timer_remaining_ticks_get(adapter, evtim, ticks_remaining);
485 
486 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
487 	FUNC_PTR_OR_ERR_RET(adapter->ops->remaining_ticks_get, -ENOTSUP);
488 
489 	if (ticks_remaining == NULL)
490 		return -EINVAL;
491 
492 	return adapter->ops->remaining_ticks_get(adapter, evtim,
493 						 ticks_remaining);
494 }
495 
496 /*
497  * Software event timer adapter buffer helper functions
498  */
499 
500 #define NSECPERSEC 1E9
501 
502 /* Optimizations used to index into the buffer require that the buffer size
503  * be a power of 2.
504  */
505 #define EVENT_BUFFER_SZ 4096
506 #define EVENT_BUFFER_BATCHSZ 32
507 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
508 
509 #define EXP_TIM_BUF_SZ 128
510 
511 struct event_buffer {
512 	size_t head;
513 	size_t tail;
514 	struct rte_event events[EVENT_BUFFER_SZ];
515 } __rte_cache_aligned;
516 
517 static inline bool
518 event_buffer_full(struct event_buffer *bufp)
519 {
520 	return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
521 }
522 
523 static inline bool
524 event_buffer_batch_ready(struct event_buffer *bufp)
525 {
526 	return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
527 }
528 
529 static void
530 event_buffer_init(struct event_buffer *bufp)
531 {
532 	bufp->head = bufp->tail = 0;
533 	memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
534 }
535 
536 static int
537 event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
538 {
539 	size_t head_idx;
540 	struct rte_event *buf_eventp;
541 
542 	if (event_buffer_full(bufp))
543 		return -1;
544 
545 	/* Instead of modulus, bitwise AND with mask to get head_idx. */
546 	head_idx = bufp->head & EVENT_BUFFER_MASK;
547 	buf_eventp = &bufp->events[head_idx];
548 	rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
549 
550 	/* Wrap automatically when overflow occurs. */
551 	bufp->head++;
552 
553 	return 0;
554 }
555 
556 static void
557 event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
558 		   uint16_t *nb_events_flushed,
559 		   uint16_t *nb_events_inv)
560 {
561 	struct rte_event *events = bufp->events;
562 	size_t head_idx, tail_idx;
563 	uint16_t n = 0;
564 
565 	/* Instead of modulus, bitwise AND with mask to get index. */
566 	head_idx = bufp->head & EVENT_BUFFER_MASK;
567 	tail_idx = bufp->tail & EVENT_BUFFER_MASK;
568 
569 	RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ);
570 
571 	/* Determine the largest contiguous run we can attempt to enqueue to the
572 	 * event device.
573 	 */
574 	if (head_idx > tail_idx)
575 		n = head_idx - tail_idx;
576 	else if (head_idx < tail_idx)
577 		n = EVENT_BUFFER_SZ - tail_idx;
578 	else if (event_buffer_full(bufp))
579 		n = EVENT_BUFFER_SZ - tail_idx;
580 	else {
581 		*nb_events_flushed = 0;
582 		return;
583 	}
584 
585 	n = RTE_MIN(EVENT_BUFFER_BATCHSZ, n);
586 	*nb_events_inv = 0;
587 
588 	*nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
589 						     &events[tail_idx], n);
590 	if (*nb_events_flushed != n) {
591 		if (rte_errno == EINVAL) {
592 			EVTIM_LOG_ERR("failed to enqueue invalid event - "
593 				      "dropping it");
594 			(*nb_events_inv)++;
595 		} else if (rte_errno == ENOSPC)
596 			rte_pause();
597 	}
598 
599 	if (*nb_events_flushed > 0)
600 		EVTIM_BUF_LOG_DBG("enqueued %"PRIu16" timer events to event "
601 				  "device", *nb_events_flushed);
602 
603 	bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
604 }
605 
606 /*
607  * Software event timer adapter implementation
608  */
609 struct swtim {
610 	/* Identifier of service executing timer management logic. */
611 	uint32_t service_id;
612 	/* The cycle count at which the adapter should next tick */
613 	uint64_t next_tick_cycles;
614 	/* The tick resolution used by adapter instance. May have been
615 	 * adjusted from what user requested
616 	 */
617 	uint64_t timer_tick_ns;
618 	/* Maximum timeout in nanoseconds allowed by adapter instance. */
619 	uint64_t max_tmo_ns;
620 	/* Buffered timer expiry events to be enqueued to an event device. */
621 	struct event_buffer buffer;
622 	/* Statistics */
623 	struct rte_event_timer_adapter_stats stats;
624 	/* Mempool of timer objects */
625 	struct rte_mempool *tim_pool;
626 	/* Back pointer for convenience */
627 	struct rte_event_timer_adapter *adapter;
628 	/* Identifier of timer data instance */
629 	uint32_t timer_data_id;
630 	/* Track which cores have actually armed a timer */
631 	struct {
632 		uint16_t v;
633 	} __rte_cache_aligned in_use[RTE_MAX_LCORE];
634 	/* Track which cores' timer lists should be polled */
635 	unsigned int poll_lcores[RTE_MAX_LCORE];
636 	/* The number of lists that should be polled */
637 	int n_poll_lcores;
638 	/* Timers which have expired and can be returned to a mempool */
639 	struct rte_timer *expired_timers[EXP_TIM_BUF_SZ];
640 	/* The number of timers that can be returned to a mempool */
641 	size_t n_expired_timers;
642 };
643 
644 static inline struct swtim *
645 swtim_pmd_priv(const struct rte_event_timer_adapter *adapter)
646 {
647 	return adapter->data->adapter_priv;
648 }
649 
650 static void
651 swtim_callback(struct rte_timer *tim)
652 {
653 	struct rte_event_timer *evtim = tim->arg;
654 	struct rte_event_timer_adapter *adapter;
655 	unsigned int lcore = rte_lcore_id();
656 	struct swtim *sw;
657 	uint16_t nb_evs_flushed = 0;
658 	uint16_t nb_evs_invalid = 0;
659 	uint64_t opaque;
660 	int ret;
661 	int n_lcores;
662 	enum rte_timer_type type;
663 
664 	opaque = evtim->impl_opaque[1];
665 	adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
666 	sw = swtim_pmd_priv(adapter);
667 	type = get_timer_type(adapter);
668 
669 	if (unlikely(sw->in_use[lcore].v == 0)) {
670 		sw->in_use[lcore].v = 1;
671 		n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
672 					     __ATOMIC_RELAXED);
673 		__atomic_store_n(&sw->poll_lcores[n_lcores], lcore,
674 				__ATOMIC_RELAXED);
675 	}
676 
677 	ret = event_buffer_add(&sw->buffer, &evtim->ev);
678 	if (ret < 0) {
679 		if (type == SINGLE) {
680 			/* If event buffer is full, put timer back in list with
681 			 * immediate expiry value, so that we process it again
682 			 * on the next iteration.
683 			 */
684 			ret = rte_timer_alt_reset(sw->timer_data_id, tim, 0,
685 						SINGLE,	lcore, NULL, evtim);
686 			if (ret < 0) {
687 				EVTIM_LOG_DBG("event buffer full, failed to "
688 						"reset timer with immediate "
689 						"expiry value");
690 			} else {
691 				sw->stats.evtim_retry_count++;
692 				EVTIM_LOG_DBG("event buffer full, resetting "
693 						"rte_timer with immediate "
694 						"expiry value");
695 			}
696 		} else {
697 			sw->stats.evtim_drop_count++;
698 		}
699 
700 	} else {
701 		EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
702 
703 		/* Empty the buffer here, if necessary, to free older expired
704 		 * timers only
705 		 */
706 		if (unlikely(sw->n_expired_timers == EXP_TIM_BUF_SZ)) {
707 			rte_mempool_put_bulk(sw->tim_pool,
708 					     (void **)sw->expired_timers,
709 					     sw->n_expired_timers);
710 			sw->n_expired_timers = 0;
711 		}
712 
713 		/* Don't free rte_timer for a periodic event timer until
714 		 * it is cancelled
715 		 */
716 		if (type == SINGLE)
717 			sw->expired_timers[sw->n_expired_timers++] = tim;
718 		sw->stats.evtim_exp_count++;
719 
720 		if (type == SINGLE)
721 			__atomic_store_n(&evtim->state, RTE_EVENT_TIMER_NOT_ARMED,
722 				__ATOMIC_RELEASE);
723 	}
724 
725 	if (event_buffer_batch_ready(&sw->buffer)) {
726 		event_buffer_flush(&sw->buffer,
727 				   adapter->data->event_dev_id,
728 				   adapter->data->event_port_id,
729 				   &nb_evs_flushed,
730 				   &nb_evs_invalid);
731 
732 		sw->stats.ev_enq_count += nb_evs_flushed;
733 		sw->stats.ev_inv_count += nb_evs_invalid;
734 	}
735 }
736 
737 static __rte_always_inline uint64_t
738 get_timeout_cycles(struct rte_event_timer *evtim,
739 		   const struct rte_event_timer_adapter *adapter)
740 {
741 	struct swtim *sw = swtim_pmd_priv(adapter);
742 	uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
743 	return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
744 }
745 
746 /* This function returns true if one or more (adapter) ticks have occurred since
747  * the last time it was called.
748  */
749 static inline bool
750 swtim_did_tick(struct swtim *sw)
751 {
752 	uint64_t cycles_per_adapter_tick, start_cycles;
753 	uint64_t *next_tick_cyclesp;
754 
755 	next_tick_cyclesp = &sw->next_tick_cycles;
756 	cycles_per_adapter_tick = sw->timer_tick_ns *
757 			(rte_get_timer_hz() / NSECPERSEC);
758 	start_cycles = rte_get_timer_cycles();
759 
760 	/* Note: initially, *next_tick_cyclesp == 0, so the clause below will
761 	 * execute, and set things going.
762 	 */
763 
764 	if (start_cycles >= *next_tick_cyclesp) {
765 		/* Snap the current cycle count to the preceding adapter tick
766 		 * boundary.
767 		 */
768 		start_cycles -= start_cycles % cycles_per_adapter_tick;
769 		*next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
770 
771 		return true;
772 	}
773 
774 	return false;
775 }
776 
777 /* Check that event timer timeout value is in range */
778 static __rte_always_inline int
779 check_timeout(struct rte_event_timer *evtim,
780 	      const struct rte_event_timer_adapter *adapter)
781 {
782 	uint64_t tmo_nsec;
783 	struct swtim *sw = swtim_pmd_priv(adapter);
784 
785 	tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
786 	if (tmo_nsec > sw->max_tmo_ns)
787 		return -1;
788 	if (tmo_nsec < sw->timer_tick_ns)
789 		return -2;
790 
791 	return 0;
792 }
793 
794 /* Check that event timer event queue sched type matches destination event queue
795  * sched type
796  */
797 static __rte_always_inline int
798 check_destination_event_queue(struct rte_event_timer *evtim,
799 			      const struct rte_event_timer_adapter *adapter)
800 {
801 	int ret;
802 	uint32_t sched_type;
803 
804 	ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
805 				       evtim->ev.queue_id,
806 				       RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
807 				       &sched_type);
808 
809 	if ((ret == 0 && evtim->ev.sched_type == sched_type) ||
810 	    ret == -EOVERFLOW)
811 		return 0;
812 
813 	return -1;
814 }
815 
816 static int
817 swtim_service_func(void *arg)
818 {
819 	struct rte_event_timer_adapter *adapter = arg;
820 	struct swtim *sw = swtim_pmd_priv(adapter);
821 	uint16_t nb_evs_flushed = 0;
822 	uint16_t nb_evs_invalid = 0;
823 	const uint64_t prior_enq_count = sw->stats.ev_enq_count;
824 
825 	if (swtim_did_tick(sw)) {
826 		rte_timer_alt_manage(sw->timer_data_id,
827 				     sw->poll_lcores,
828 				     sw->n_poll_lcores,
829 				     swtim_callback);
830 
831 		/* Return expired timer objects back to mempool */
832 		rte_mempool_put_bulk(sw->tim_pool, (void **)sw->expired_timers,
833 				     sw->n_expired_timers);
834 		sw->n_expired_timers = 0;
835 
836 		event_buffer_flush(&sw->buffer,
837 				   adapter->data->event_dev_id,
838 				   adapter->data->event_port_id,
839 				   &nb_evs_flushed,
840 				   &nb_evs_invalid);
841 
842 		sw->stats.ev_enq_count += nb_evs_flushed;
843 		sw->stats.ev_inv_count += nb_evs_invalid;
844 		sw->stats.adapter_tick_count++;
845 	}
846 
847 	rte_event_maintain(adapter->data->event_dev_id,
848 			   adapter->data->event_port_id, 0);
849 
850 	return prior_enq_count == sw->stats.ev_enq_count ? -EAGAIN : 0;
851 }
852 
853 /* The adapter initialization function rounds the mempool size up to the next
854  * power of 2, so we can take the difference between that value and what the
855  * user requested, and use the space for caches.  This avoids a scenario where a
856  * user can't arm the number of timers the adapter was configured with because
857  * mempool objects have been lost to caches.
858  *
859  * nb_actual should always be a power of 2, so we can iterate over the powers
860  * of 2 to see what the largest cache size we can use is.
861  */
862 static int
863 compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
864 {
865 	int i;
866 	int size;
867 	int cache_size = 0;
868 
869 	for (i = 0;; i++) {
870 		size = 1 << i;
871 
872 		if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
873 		    size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
874 		    size <= nb_actual / 1.5)
875 			cache_size = size;
876 		else
877 			break;
878 	}
879 
880 	return cache_size;
881 }
882 
883 static int
884 swtim_init(struct rte_event_timer_adapter *adapter)
885 {
886 	int i, ret;
887 	struct swtim *sw;
888 	unsigned int flags;
889 	struct rte_service_spec service;
890 
891 	/* Allocate storage for private data area */
892 #define SWTIM_NAMESIZE 32
893 	char swtim_name[SWTIM_NAMESIZE];
894 	snprintf(swtim_name, SWTIM_NAMESIZE, "swtim_%"PRIu8,
895 			adapter->data->id);
896 	sw = rte_zmalloc_socket(swtim_name, sizeof(*sw), RTE_CACHE_LINE_SIZE,
897 			adapter->data->socket_id);
898 	if (sw == NULL) {
899 		EVTIM_LOG_ERR("failed to allocate space for private data");
900 		rte_errno = ENOMEM;
901 		return -1;
902 	}
903 
904 	/* Connect storage to adapter instance */
905 	adapter->data->adapter_priv = sw;
906 	sw->adapter = adapter;
907 
908 	sw->timer_tick_ns = adapter->data->conf.timer_tick_ns;
909 	sw->max_tmo_ns = adapter->data->conf.max_tmo_ns;
910 
911 	/* Create a timer pool */
912 	char pool_name[SWTIM_NAMESIZE];
913 	snprintf(pool_name, SWTIM_NAMESIZE, "swtim_pool_%"PRIu8,
914 		 adapter->data->id);
915 	/* Optimal mempool size is a power of 2 minus one */
916 	uint64_t nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
917 	int pool_size = nb_timers - 1;
918 	int cache_size = compute_msg_mempool_cache_size(
919 				adapter->data->conf.nb_timers, nb_timers);
920 	flags = 0; /* pool is multi-producer, multi-consumer */
921 	sw->tim_pool = rte_mempool_create(pool_name, pool_size,
922 			sizeof(struct rte_timer), cache_size, 0, NULL, NULL,
923 			NULL, NULL, adapter->data->socket_id, flags);
924 	if (sw->tim_pool == NULL) {
925 		EVTIM_LOG_ERR("failed to create timer object mempool");
926 		rte_errno = ENOMEM;
927 		goto free_alloc;
928 	}
929 
930 	/* Initialize the variables that track in-use timer lists */
931 	for (i = 0; i < RTE_MAX_LCORE; i++)
932 		sw->in_use[i].v = 0;
933 
934 	/* Initialize the timer subsystem and allocate timer data instance */
935 	ret = rte_timer_subsystem_init();
936 	if (ret < 0) {
937 		if (ret != -EALREADY) {
938 			EVTIM_LOG_ERR("failed to initialize timer subsystem");
939 			rte_errno = -ret;
940 			goto free_mempool;
941 		}
942 	}
943 
944 	ret = rte_timer_data_alloc(&sw->timer_data_id);
945 	if (ret < 0) {
946 		EVTIM_LOG_ERR("failed to allocate timer data instance");
947 		rte_errno = -ret;
948 		goto free_mempool;
949 	}
950 
951 	/* Initialize timer event buffer */
952 	event_buffer_init(&sw->buffer);
953 
954 	sw->adapter = adapter;
955 
956 	/* Register a service component to run adapter logic */
957 	memset(&service, 0, sizeof(service));
958 	snprintf(service.name, RTE_SERVICE_NAME_MAX,
959 		 "swtim_svc_%"PRIu8, adapter->data->id);
960 	service.socket_id = adapter->data->socket_id;
961 	service.callback = swtim_service_func;
962 	service.callback_userdata = adapter;
963 	service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
964 	ret = rte_service_component_register(&service, &sw->service_id);
965 	if (ret < 0) {
966 		EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
967 			      ": err = %d", service.name, sw->service_id,
968 			      ret);
969 
970 		rte_errno = ENOSPC;
971 		goto free_mempool;
972 	}
973 
974 	EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
975 		      sw->service_id);
976 
977 	adapter->data->service_id = sw->service_id;
978 	adapter->data->service_inited = 1;
979 
980 	return 0;
981 free_mempool:
982 	rte_mempool_free(sw->tim_pool);
983 free_alloc:
984 	rte_free(sw);
985 	return -1;
986 }
987 
988 static void
989 swtim_free_tim(struct rte_timer *tim, void *arg)
990 {
991 	struct swtim *sw = arg;
992 
993 	rte_mempool_put(sw->tim_pool, tim);
994 }
995 
996 /* Traverse the list of outstanding timers and put them back in the mempool
997  * before freeing the adapter to avoid leaking the memory.
998  */
999 static int
1000 swtim_uninit(struct rte_event_timer_adapter *adapter)
1001 {
1002 	int ret;
1003 	struct swtim *sw = swtim_pmd_priv(adapter);
1004 
1005 	/* Free outstanding timers */
1006 	rte_timer_stop_all(sw->timer_data_id,
1007 			   sw->poll_lcores,
1008 			   sw->n_poll_lcores,
1009 			   swtim_free_tim,
1010 			   sw);
1011 
1012 	ret = rte_timer_data_dealloc(sw->timer_data_id);
1013 	if (ret < 0) {
1014 		EVTIM_LOG_ERR("failed to deallocate timer data instance");
1015 		return ret;
1016 	}
1017 
1018 	ret = rte_service_component_unregister(sw->service_id);
1019 	if (ret < 0) {
1020 		EVTIM_LOG_ERR("failed to unregister service component");
1021 		return ret;
1022 	}
1023 
1024 	rte_mempool_free(sw->tim_pool);
1025 	rte_free(sw);
1026 	adapter->data->adapter_priv = NULL;
1027 
1028 	return 0;
1029 }
1030 
1031 static inline int32_t
1032 get_mapped_count_for_service(uint32_t service_id)
1033 {
1034 	int32_t core_count, i, mapped_count = 0;
1035 	uint32_t lcore_arr[RTE_MAX_LCORE];
1036 
1037 	core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
1038 
1039 	for (i = 0; i < core_count; i++)
1040 		if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
1041 			mapped_count++;
1042 
1043 	return mapped_count;
1044 }
1045 
1046 static int
1047 swtim_start(const struct rte_event_timer_adapter *adapter)
1048 {
1049 	int mapped_count;
1050 	struct swtim *sw = swtim_pmd_priv(adapter);
1051 
1052 	/* Mapping the service to more than one service core can introduce
1053 	 * delays while one thread is waiting to acquire a lock, so only allow
1054 	 * one core to be mapped to the service.
1055 	 *
1056 	 * Note: the service could be modified such that it spreads cores to
1057 	 * poll over multiple service instances.
1058 	 */
1059 	mapped_count = get_mapped_count_for_service(sw->service_id);
1060 
1061 	if (mapped_count != 1)
1062 		return mapped_count < 1 ? -ENOENT : -ENOTSUP;
1063 
1064 	return rte_service_component_runstate_set(sw->service_id, 1);
1065 }
1066 
1067 static int
1068 swtim_stop(const struct rte_event_timer_adapter *adapter)
1069 {
1070 	int ret;
1071 	struct swtim *sw = swtim_pmd_priv(adapter);
1072 
1073 	ret = rte_service_component_runstate_set(sw->service_id, 0);
1074 	if (ret < 0)
1075 		return ret;
1076 
1077 	/* Wait for the service to complete its final iteration */
1078 	while (rte_service_may_be_active(sw->service_id))
1079 		rte_pause();
1080 
1081 	return 0;
1082 }
1083 
1084 static void
1085 swtim_get_info(const struct rte_event_timer_adapter *adapter,
1086 		struct rte_event_timer_adapter_info *adapter_info)
1087 {
1088 	struct swtim *sw = swtim_pmd_priv(adapter);
1089 	adapter_info->min_resolution_ns = sw->timer_tick_ns;
1090 	adapter_info->max_tmo_ns = sw->max_tmo_ns;
1091 }
1092 
1093 static int
1094 swtim_stats_get(const struct rte_event_timer_adapter *adapter,
1095 		struct rte_event_timer_adapter_stats *stats)
1096 {
1097 	struct swtim *sw = swtim_pmd_priv(adapter);
1098 	*stats = sw->stats; /* structure copy */
1099 	return 0;
1100 }
1101 
1102 static int
1103 swtim_stats_reset(const struct rte_event_timer_adapter *adapter)
1104 {
1105 	struct swtim *sw = swtim_pmd_priv(adapter);
1106 	memset(&sw->stats, 0, sizeof(sw->stats));
1107 	return 0;
1108 }
1109 
1110 static int
1111 swtim_remaining_ticks_get(const struct rte_event_timer_adapter *adapter,
1112 			  const struct rte_event_timer *evtim,
1113 			  uint64_t *ticks_remaining)
1114 {
1115 	uint64_t nsecs_per_adapter_tick, opaque, cycles_remaining;
1116 	enum rte_event_timer_state n_state;
1117 	double nsecs_per_cycle;
1118 	struct rte_timer *tim;
1119 	uint64_t cur_cycles;
1120 
1121 	/* Check that timer is armed */
1122 	n_state = __atomic_load_n(&evtim->state, __ATOMIC_ACQUIRE);
1123 	if (n_state != RTE_EVENT_TIMER_ARMED)
1124 		return -EINVAL;
1125 
1126 	opaque = evtim->impl_opaque[0];
1127 	tim = (struct rte_timer *)(uintptr_t)opaque;
1128 
1129 	cur_cycles = rte_get_timer_cycles();
1130 	if (cur_cycles > tim->expire) {
1131 		*ticks_remaining = 0;
1132 		return 0;
1133 	}
1134 
1135 	cycles_remaining = tim->expire - cur_cycles;
1136 	nsecs_per_cycle = (double)NSECPERSEC / rte_get_timer_hz();
1137 	nsecs_per_adapter_tick = adapter->data->conf.timer_tick_ns;
1138 
1139 	*ticks_remaining = (uint64_t)ceil((cycles_remaining * nsecs_per_cycle) /
1140 					  nsecs_per_adapter_tick);
1141 
1142 	return 0;
1143 }
1144 
1145 static uint16_t
1146 __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1147 		struct rte_event_timer **evtims,
1148 		uint16_t nb_evtims)
1149 {
1150 	int i, ret;
1151 	struct swtim *sw = swtim_pmd_priv(adapter);
1152 	uint32_t lcore_id = rte_lcore_id();
1153 	struct rte_timer *tim, *tims[nb_evtims];
1154 	uint64_t cycles;
1155 	int n_lcores;
1156 	/* Timer list for this lcore is not in use. */
1157 	uint16_t exp_state = 0;
1158 	enum rte_event_timer_state n_state;
1159 	enum rte_timer_type type = SINGLE;
1160 
1161 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1162 	/* Check that the service is running. */
1163 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1164 		rte_errno = EINVAL;
1165 		return 0;
1166 	}
1167 #endif
1168 
1169 	/* Adjust lcore_id if non-EAL thread. Arbitrarily pick the timer list of
1170 	 * the highest lcore to insert such timers into
1171 	 */
1172 	if (lcore_id == LCORE_ID_ANY)
1173 		lcore_id = RTE_MAX_LCORE - 1;
1174 
1175 	/* If this is the first time we're arming an event timer on this lcore,
1176 	 * mark this lcore as "in use"; this will cause the service
1177 	 * function to process the timer list that corresponds to this lcore.
1178 	 * The atomic compare-and-swap operation can prevent the race condition
1179 	 * on in_use flag between multiple non-EAL threads.
1180 	 */
1181 	if (unlikely(__atomic_compare_exchange_n(&sw->in_use[lcore_id].v,
1182 			&exp_state, 1, 0,
1183 			__ATOMIC_RELAXED, __ATOMIC_RELAXED))) {
1184 		EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll",
1185 			      lcore_id);
1186 		n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
1187 					     __ATOMIC_RELAXED);
1188 		__atomic_store_n(&sw->poll_lcores[n_lcores], lcore_id,
1189 				__ATOMIC_RELAXED);
1190 	}
1191 
1192 	ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims,
1193 				   nb_evtims);
1194 	if (ret < 0) {
1195 		rte_errno = ENOSPC;
1196 		return 0;
1197 	}
1198 
1199 	/* update timer type for periodic adapter */
1200 	type = get_timer_type(adapter);
1201 
1202 	for (i = 0; i < nb_evtims; i++) {
1203 		n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1204 		if (n_state == RTE_EVENT_TIMER_ARMED) {
1205 			rte_errno = EALREADY;
1206 			break;
1207 		} else if (!(n_state == RTE_EVENT_TIMER_NOT_ARMED ||
1208 			     n_state == RTE_EVENT_TIMER_CANCELED)) {
1209 			rte_errno = EINVAL;
1210 			break;
1211 		}
1212 
1213 		ret = check_timeout(evtims[i], adapter);
1214 		if (unlikely(ret == -1)) {
1215 			__atomic_store_n(&evtims[i]->state,
1216 					RTE_EVENT_TIMER_ERROR_TOOLATE,
1217 					__ATOMIC_RELAXED);
1218 			rte_errno = EINVAL;
1219 			break;
1220 		} else if (unlikely(ret == -2)) {
1221 			__atomic_store_n(&evtims[i]->state,
1222 					RTE_EVENT_TIMER_ERROR_TOOEARLY,
1223 					__ATOMIC_RELAXED);
1224 			rte_errno = EINVAL;
1225 			break;
1226 		}
1227 
1228 		if (unlikely(check_destination_event_queue(evtims[i],
1229 							   adapter) < 0)) {
1230 			__atomic_store_n(&evtims[i]->state,
1231 					RTE_EVENT_TIMER_ERROR,
1232 					__ATOMIC_RELAXED);
1233 			rte_errno = EINVAL;
1234 			break;
1235 		}
1236 
1237 		tim = tims[i];
1238 		rte_timer_init(tim);
1239 
1240 		evtims[i]->impl_opaque[0] = (uintptr_t)tim;
1241 		evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
1242 
1243 		cycles = get_timeout_cycles(evtims[i], adapter);
1244 		ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
1245 					  type, lcore_id, NULL, evtims[i]);
1246 		if (ret < 0) {
1247 			/* tim was in RUNNING or CONFIG state */
1248 			__atomic_store_n(&evtims[i]->state,
1249 					RTE_EVENT_TIMER_ERROR,
1250 					__ATOMIC_RELEASE);
1251 			break;
1252 		}
1253 
1254 		EVTIM_LOG_DBG("armed an event timer");
1255 		/* RELEASE ordering guarantees the adapter specific value
1256 		 * changes observed before the update of state.
1257 		 */
1258 		__atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_ARMED,
1259 				__ATOMIC_RELEASE);
1260 	}
1261 
1262 	if (i < nb_evtims)
1263 		rte_mempool_put_bulk(sw->tim_pool,
1264 				     (void **)&tims[i], nb_evtims - i);
1265 
1266 	return i;
1267 }
1268 
1269 static uint16_t
1270 swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1271 		struct rte_event_timer **evtims,
1272 		uint16_t nb_evtims)
1273 {
1274 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
1275 }
1276 
1277 static uint16_t
1278 swtim_cancel_burst(const struct rte_event_timer_adapter *adapter,
1279 		   struct rte_event_timer **evtims,
1280 		   uint16_t nb_evtims)
1281 {
1282 	int i, ret;
1283 	struct rte_timer *timp;
1284 	uint64_t opaque;
1285 	struct swtim *sw = swtim_pmd_priv(adapter);
1286 	enum rte_event_timer_state n_state;
1287 
1288 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1289 	/* Check that the service is running. */
1290 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1291 		rte_errno = EINVAL;
1292 		return 0;
1293 	}
1294 #endif
1295 
1296 	for (i = 0; i < nb_evtims; i++) {
1297 		/* Don't modify the event timer state in these cases */
1298 		/* ACQUIRE ordering guarantees the access of implementation
1299 		 * specific opaque data under the correct state.
1300 		 */
1301 		n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1302 		if (n_state == RTE_EVENT_TIMER_CANCELED) {
1303 			rte_errno = EALREADY;
1304 			break;
1305 		} else if (n_state != RTE_EVENT_TIMER_ARMED) {
1306 			rte_errno = EINVAL;
1307 			break;
1308 		}
1309 
1310 		opaque = evtims[i]->impl_opaque[0];
1311 		timp = (struct rte_timer *)(uintptr_t)opaque;
1312 		RTE_ASSERT(timp != NULL);
1313 
1314 		ret = rte_timer_alt_stop(sw->timer_data_id, timp);
1315 		if (ret < 0) {
1316 			/* Timer is running or being configured */
1317 			rte_errno = EAGAIN;
1318 			break;
1319 		}
1320 
1321 		rte_mempool_put(sw->tim_pool, (void **)timp);
1322 
1323 		/* The RELEASE ordering here pairs with atomic ordering
1324 		 * to make sure the state update data observed between
1325 		 * threads.
1326 		 */
1327 		__atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED,
1328 				__ATOMIC_RELEASE);
1329 	}
1330 
1331 	return i;
1332 }
1333 
1334 static uint16_t
1335 swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
1336 			 struct rte_event_timer **evtims,
1337 			 uint64_t timeout_ticks,
1338 			 uint16_t nb_evtims)
1339 {
1340 	int i;
1341 
1342 	for (i = 0; i < nb_evtims; i++)
1343 		evtims[i]->timeout_ticks = timeout_ticks;
1344 
1345 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
1346 }
1347 
1348 static const struct event_timer_adapter_ops swtim_ops = {
1349 	.init = swtim_init,
1350 	.uninit = swtim_uninit,
1351 	.start = swtim_start,
1352 	.stop = swtim_stop,
1353 	.get_info = swtim_get_info,
1354 	.stats_get = swtim_stats_get,
1355 	.stats_reset = swtim_stats_reset,
1356 	.arm_burst = swtim_arm_burst,
1357 	.arm_tmo_tick_burst = swtim_arm_tmo_tick_burst,
1358 	.cancel_burst = swtim_cancel_burst,
1359 	.remaining_ticks_get = swtim_remaining_ticks_get,
1360 };
1361 
1362 static int
1363 handle_ta_info(const char *cmd __rte_unused, const char *params,
1364 		struct rte_tel_data *d)
1365 {
1366 	struct rte_event_timer_adapter_info adapter_info;
1367 	struct rte_event_timer_adapter *adapter;
1368 	uint16_t adapter_id;
1369 	int ret;
1370 
1371 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1372 		return -1;
1373 
1374 	adapter_id = atoi(params);
1375 
1376 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
1377 		EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id);
1378 		return -EINVAL;
1379 	}
1380 
1381 	adapter = &adapters[adapter_id];
1382 
1383 	ret = rte_event_timer_adapter_get_info(adapter, &adapter_info);
1384 	if (ret < 0) {
1385 		EVTIM_LOG_ERR("Failed to get info for timer adapter id %u", adapter_id);
1386 		return ret;
1387 	}
1388 
1389 	rte_tel_data_start_dict(d);
1390 	rte_tel_data_add_dict_uint(d, "timer_adapter_id", adapter_id);
1391 	rte_tel_data_add_dict_uint(d, "min_resolution_ns",
1392 				   adapter_info.min_resolution_ns);
1393 	rte_tel_data_add_dict_uint(d, "max_tmo_ns", adapter_info.max_tmo_ns);
1394 	rte_tel_data_add_dict_uint(d, "event_dev_id",
1395 				   adapter_info.conf.event_dev_id);
1396 	rte_tel_data_add_dict_uint(d, "socket_id",
1397 				   adapter_info.conf.socket_id);
1398 	rte_tel_data_add_dict_uint(d, "clk_src", adapter_info.conf.clk_src);
1399 	rte_tel_data_add_dict_uint(d, "timer_tick_ns",
1400 				   adapter_info.conf.timer_tick_ns);
1401 	rte_tel_data_add_dict_uint(d, "nb_timers",
1402 				   adapter_info.conf.nb_timers);
1403 	rte_tel_data_add_dict_uint(d, "flags", adapter_info.conf.flags);
1404 
1405 	return 0;
1406 }
1407 
1408 static int
1409 handle_ta_stats(const char *cmd __rte_unused, const char *params,
1410 		struct rte_tel_data *d)
1411 {
1412 	struct rte_event_timer_adapter_stats stats;
1413 	struct rte_event_timer_adapter *adapter;
1414 	uint16_t adapter_id;
1415 	int ret;
1416 
1417 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1418 		return -1;
1419 
1420 	adapter_id = atoi(params);
1421 
1422 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
1423 		EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id);
1424 		return -EINVAL;
1425 	}
1426 
1427 	adapter = &adapters[adapter_id];
1428 
1429 	ret = rte_event_timer_adapter_stats_get(adapter, &stats);
1430 	if (ret < 0) {
1431 		EVTIM_LOG_ERR("Failed to get stats for timer adapter id %u", adapter_id);
1432 		return ret;
1433 	}
1434 
1435 	rte_tel_data_start_dict(d);
1436 	rte_tel_data_add_dict_uint(d, "timer_adapter_id", adapter_id);
1437 	rte_tel_data_add_dict_uint(d, "evtim_exp_count",
1438 				   stats.evtim_exp_count);
1439 	rte_tel_data_add_dict_uint(d, "ev_enq_count", stats.ev_enq_count);
1440 	rte_tel_data_add_dict_uint(d, "ev_inv_count", stats.ev_inv_count);
1441 	rte_tel_data_add_dict_uint(d, "evtim_retry_count",
1442 				   stats.evtim_retry_count);
1443 	rte_tel_data_add_dict_uint(d, "adapter_tick_count",
1444 				   stats.adapter_tick_count);
1445 
1446 	return 0;
1447 }
1448 
1449 RTE_INIT(ta_init_telemetry)
1450 {
1451 	rte_telemetry_register_cmd("/eventdev/ta_info",
1452 		handle_ta_info,
1453 		"Returns Timer adapter info. Parameter: Timer adapter id");
1454 
1455 	rte_telemetry_register_cmd("/eventdev/ta_stats",
1456 		handle_ta_stats,
1457 		"Returns Timer adapter stats. Parameter: Timer adapter id");
1458 }
1459