xref: /dpdk/lib/eventdev/rte_event_timer_adapter.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation.
3  * All rights reserved.
4  */
5 
6 #include <ctype.h>
7 #include <string.h>
8 #include <inttypes.h>
9 #include <stdbool.h>
10 #include <stdlib.h>
11 #include <math.h>
12 
13 #include <rte_memzone.h>
14 #include <rte_errno.h>
15 #include <rte_malloc.h>
16 #include <rte_mempool.h>
17 #include <rte_common.h>
18 #include <rte_timer.h>
19 #include <rte_service_component.h>
20 #include <rte_telemetry.h>
21 #include <rte_reciprocal.h>
22 
23 #include "event_timer_adapter_pmd.h"
24 #include "eventdev_pmd.h"
25 #include "rte_event_timer_adapter.h"
26 #include "rte_eventdev.h"
27 #include "eventdev_trace.h"
28 
29 #define DATA_MZ_NAME_MAX_LEN 64
30 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
31 
32 RTE_LOG_REGISTER_SUFFIX(evtim_logtype, adapter.timer, NOTICE);
33 RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
34 RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
35 
36 static struct rte_event_timer_adapter *adapters;
37 
38 static const struct event_timer_adapter_ops swtim_ops;
39 
40 #define EVTIM_LOG(level, logtype, ...) \
41 	rte_log(RTE_LOG_ ## level, logtype, \
42 		RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
43 			"\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
44 
45 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
46 
47 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
48 #define EVTIM_LOG_DBG(...) \
49 	EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
50 #define EVTIM_BUF_LOG_DBG(...) \
51 	EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
52 #define EVTIM_SVC_LOG_DBG(...) \
53 	EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
54 #else
55 #define EVTIM_LOG_DBG(...) (void)0
56 #define EVTIM_BUF_LOG_DBG(...) (void)0
57 #define EVTIM_SVC_LOG_DBG(...) (void)0
58 #endif
59 
60 static inline enum rte_timer_type
61 get_timer_type(const struct rte_event_timer_adapter *adapter)
62 {
63 	return (adapter->data->conf.flags &
64 			RTE_EVENT_TIMER_ADAPTER_F_PERIODIC) ?
65 			PERIODICAL : SINGLE;
66 }
67 
68 static int
69 default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
70 		     void *conf_arg)
71 {
72 	struct rte_event_timer_adapter *adapter;
73 	struct rte_eventdev *dev;
74 	struct rte_event_dev_config dev_conf;
75 	struct rte_event_port_conf *port_conf, def_port_conf = {0};
76 	int started;
77 	uint8_t port_id;
78 	uint8_t dev_id;
79 	int ret;
80 
81 	RTE_SET_USED(event_dev_id);
82 
83 	adapter = &adapters[id];
84 	dev = &rte_eventdevs[adapter->data->event_dev_id];
85 	dev_id = dev->data->dev_id;
86 	dev_conf = dev->data->dev_conf;
87 
88 	started = dev->data->dev_started;
89 	if (started)
90 		rte_event_dev_stop(dev_id);
91 
92 	port_id = dev_conf.nb_event_ports;
93 	if (conf_arg != NULL)
94 		port_conf = conf_arg;
95 	else {
96 		port_conf = &def_port_conf;
97 		ret = rte_event_port_default_conf_get(dev_id, (port_id - 1),
98 						      port_conf);
99 		if (ret < 0)
100 			return ret;
101 	}
102 
103 	dev_conf.nb_event_ports += 1;
104 	if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_SINGLE_LINK)
105 		dev_conf.nb_single_link_event_port_queues += 1;
106 
107 	ret = rte_event_dev_configure(dev_id, &dev_conf);
108 	if (ret < 0) {
109 		EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
110 		if (started)
111 			if (rte_event_dev_start(dev_id))
112 				return -EIO;
113 
114 		return ret;
115 	}
116 
117 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
118 	if (ret < 0) {
119 		EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
120 			      port_id, dev_id);
121 		return ret;
122 	}
123 
124 	*event_port_id = port_id;
125 
126 	if (started)
127 		ret = rte_event_dev_start(dev_id);
128 
129 	return ret;
130 }
131 
132 struct rte_event_timer_adapter *
133 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
134 {
135 	return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
136 						  NULL);
137 }
138 
139 struct rte_event_timer_adapter *
140 rte_event_timer_adapter_create_ext(
141 		const struct rte_event_timer_adapter_conf *conf,
142 		rte_event_timer_adapter_port_conf_cb_t conf_cb,
143 		void *conf_arg)
144 {
145 	uint16_t adapter_id;
146 	struct rte_event_timer_adapter *adapter;
147 	const struct rte_memzone *mz;
148 	char mz_name[DATA_MZ_NAME_MAX_LEN];
149 	int n, ret;
150 	struct rte_eventdev *dev;
151 
152 	if (adapters == NULL) {
153 		adapters = rte_zmalloc("Eventdev",
154 				       sizeof(struct rte_event_timer_adapter) *
155 					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
156 				       RTE_CACHE_LINE_SIZE);
157 		if (adapters == NULL) {
158 			rte_errno = ENOMEM;
159 			return NULL;
160 		}
161 	}
162 
163 	if (conf == NULL) {
164 		rte_errno = EINVAL;
165 		return NULL;
166 	}
167 
168 	/* Check eventdev ID */
169 	if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
170 		rte_errno = EINVAL;
171 		return NULL;
172 	}
173 	dev = &rte_eventdevs[conf->event_dev_id];
174 
175 	adapter_id = conf->timer_adapter_id;
176 
177 	/* Check that adapter_id is in range */
178 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
179 		rte_errno = EINVAL;
180 		return NULL;
181 	}
182 
183 	/* Check adapter ID not already allocated */
184 	adapter = &adapters[adapter_id];
185 	if (adapter->allocated) {
186 		rte_errno = EEXIST;
187 		return NULL;
188 	}
189 
190 	/* Create shared data area. */
191 	n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
192 	if (n >= (int)sizeof(mz_name)) {
193 		rte_errno = EINVAL;
194 		return NULL;
195 	}
196 	mz = rte_memzone_reserve(mz_name,
197 				 sizeof(struct rte_event_timer_adapter_data),
198 				 conf->socket_id, 0);
199 	if (mz == NULL)
200 		/* rte_errno set by rte_memzone_reserve */
201 		return NULL;
202 
203 	adapter->data = mz->addr;
204 	memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
205 
206 	adapter->data->mz = mz;
207 	adapter->data->event_dev_id = conf->event_dev_id;
208 	adapter->data->id = adapter_id;
209 	adapter->data->socket_id = conf->socket_id;
210 	adapter->data->conf = *conf;  /* copy conf structure */
211 
212 	/* Query eventdev PMD for timer adapter capabilities and ops */
213 	if (dev->dev_ops->timer_adapter_caps_get) {
214 		ret = dev->dev_ops->timer_adapter_caps_get(dev,
215 				adapter->data->conf.flags,
216 				&adapter->data->caps, &adapter->ops);
217 		if (ret < 0) {
218 			rte_errno = -ret;
219 			goto free_memzone;
220 		}
221 	}
222 
223 	if (!(adapter->data->caps &
224 	      RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
225 		FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, EINVAL);
226 		ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
227 			      &adapter->data->event_port_id, conf_arg);
228 		if (ret < 0) {
229 			rte_errno = -ret;
230 			goto free_memzone;
231 		}
232 	}
233 
234 	/* If eventdev PMD did not provide ops, use default software
235 	 * implementation.
236 	 */
237 	if (adapter->ops == NULL)
238 		adapter->ops = &swtim_ops;
239 
240 	/* Allow driver to do some setup */
241 	FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, ENOTSUP);
242 	ret = adapter->ops->init(adapter);
243 	if (ret < 0) {
244 		rte_errno = -ret;
245 		goto free_memzone;
246 	}
247 
248 	/* Set fast-path function pointers */
249 	adapter->arm_burst = adapter->ops->arm_burst;
250 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
251 	adapter->cancel_burst = adapter->ops->cancel_burst;
252 
253 	adapter->allocated = 1;
254 
255 	rte_eventdev_trace_timer_adapter_create(adapter_id, adapter, conf,
256 		conf_cb);
257 	return adapter;
258 
259 free_memzone:
260 	rte_memzone_free(adapter->data->mz);
261 	return NULL;
262 }
263 
264 int
265 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
266 		struct rte_event_timer_adapter_info *adapter_info)
267 {
268 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
269 
270 	if (adapter->ops->get_info)
271 		/* let driver set values it knows */
272 		adapter->ops->get_info(adapter, adapter_info);
273 
274 	/* Set common values */
275 	adapter_info->conf = adapter->data->conf;
276 	adapter_info->event_dev_port_id = adapter->data->event_port_id;
277 	adapter_info->caps = adapter->data->caps;
278 
279 	rte_eventdev_trace_timer_adapter_get_info(adapter, adapter_info);
280 
281 	return 0;
282 }
283 
284 int
285 rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
286 {
287 	int ret;
288 
289 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
290 	FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
291 
292 	if (adapter->data->started) {
293 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" already started",
294 			      adapter->data->id);
295 		return -EALREADY;
296 	}
297 
298 	ret = adapter->ops->start(adapter);
299 	if (ret < 0)
300 		return ret;
301 
302 	adapter->data->started = 1;
303 	rte_eventdev_trace_timer_adapter_start(adapter);
304 	return 0;
305 }
306 
307 int
308 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
309 {
310 	int ret;
311 
312 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
313 	FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
314 
315 	if (adapter->data->started == 0) {
316 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
317 			      adapter->data->id);
318 		return 0;
319 	}
320 
321 	ret = adapter->ops->stop(adapter);
322 	if (ret < 0)
323 		return ret;
324 
325 	adapter->data->started = 0;
326 	rte_eventdev_trace_timer_adapter_stop(adapter);
327 	return 0;
328 }
329 
330 struct rte_event_timer_adapter *
331 rte_event_timer_adapter_lookup(uint16_t adapter_id)
332 {
333 	char name[DATA_MZ_NAME_MAX_LEN];
334 	const struct rte_memzone *mz;
335 	struct rte_event_timer_adapter_data *data;
336 	struct rte_event_timer_adapter *adapter;
337 	int ret;
338 	struct rte_eventdev *dev;
339 
340 	if (adapters == NULL) {
341 		adapters = rte_zmalloc("Eventdev",
342 				       sizeof(struct rte_event_timer_adapter) *
343 					       RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
344 				       RTE_CACHE_LINE_SIZE);
345 		if (adapters == NULL) {
346 			rte_errno = ENOMEM;
347 			return NULL;
348 		}
349 	}
350 
351 	if (adapters[adapter_id].allocated)
352 		return &adapters[adapter_id]; /* Adapter is already loaded */
353 
354 	snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
355 	mz = rte_memzone_lookup(name);
356 	if (mz == NULL) {
357 		rte_errno = ENOENT;
358 		return NULL;
359 	}
360 
361 	data = mz->addr;
362 
363 	adapter = &adapters[data->id];
364 	adapter->data = data;
365 
366 	dev = &rte_eventdevs[adapter->data->event_dev_id];
367 
368 	/* Query eventdev PMD for timer adapter capabilities and ops */
369 	if (dev->dev_ops->timer_adapter_caps_get) {
370 		ret = dev->dev_ops->timer_adapter_caps_get(dev,
371 				adapter->data->conf.flags,
372 				&adapter->data->caps, &adapter->ops);
373 		if (ret < 0) {
374 			rte_errno = EINVAL;
375 			return NULL;
376 		}
377 	}
378 
379 	/* If eventdev PMD did not provide ops, use default software
380 	 * implementation.
381 	 */
382 	if (adapter->ops == NULL)
383 		adapter->ops = &swtim_ops;
384 
385 	/* Set fast-path function pointers */
386 	adapter->arm_burst = adapter->ops->arm_burst;
387 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
388 	adapter->cancel_burst = adapter->ops->cancel_burst;
389 
390 	adapter->allocated = 1;
391 
392 	rte_eventdev_trace_timer_adapter_lookup(adapter_id, adapter);
393 
394 	return adapter;
395 }
396 
397 int
398 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
399 {
400 	int i, ret;
401 
402 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
403 	FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
404 
405 	if (adapter->data->started == 1) {
406 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
407 			      "before freeing", adapter->data->id);
408 		return -EBUSY;
409 	}
410 
411 	/* free impl priv data */
412 	ret = adapter->ops->uninit(adapter);
413 	if (ret < 0)
414 		return ret;
415 
416 	/* free shared data area */
417 	ret = rte_memzone_free(adapter->data->mz);
418 	if (ret < 0)
419 		return ret;
420 
421 	adapter->data = NULL;
422 	adapter->allocated = 0;
423 
424 	ret = 0;
425 	for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++)
426 		if (adapters[i].allocated)
427 			ret = adapters[i].allocated;
428 
429 	if (!ret) {
430 		rte_free(adapters);
431 		adapters = NULL;
432 	}
433 
434 	rte_eventdev_trace_timer_adapter_free(adapter);
435 	return 0;
436 }
437 
438 int
439 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
440 				       uint32_t *service_id)
441 {
442 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
443 
444 	if (service_id == NULL)
445 		return -EINVAL;
446 
447 	if (adapter->data->service_inited && service_id != NULL)
448 		*service_id = adapter->data->service_id;
449 
450 	rte_eventdev_trace_timer_adapter_service_id_get(adapter, *service_id);
451 
452 	return adapter->data->service_inited ? 0 : -ESRCH;
453 }
454 
455 int
456 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
457 				  struct rte_event_timer_adapter_stats *stats)
458 {
459 	rte_eventdev_trace_timer_adapter_stats_get(adapter, stats);
460 
461 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
462 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
463 	if (stats == NULL)
464 		return -EINVAL;
465 
466 	return adapter->ops->stats_get(adapter, stats);
467 }
468 
469 int
470 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
471 {
472 	rte_eventdev_trace_timer_adapter_stats_reset(adapter);
473 
474 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
475 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
476 	return adapter->ops->stats_reset(adapter);
477 }
478 
479 int
480 rte_event_timer_remaining_ticks_get(
481 			const struct rte_event_timer_adapter *adapter,
482 			const struct rte_event_timer *evtim,
483 			uint64_t *ticks_remaining)
484 {
485 	rte_eventdev_trace_timer_remaining_ticks_get(adapter, evtim, ticks_remaining);
486 
487 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
488 	FUNC_PTR_OR_ERR_RET(adapter->ops->remaining_ticks_get, -ENOTSUP);
489 
490 	if (ticks_remaining == NULL)
491 		return -EINVAL;
492 
493 	return adapter->ops->remaining_ticks_get(adapter, evtim,
494 						 ticks_remaining);
495 }
496 
497 /*
498  * Software event timer adapter buffer helper functions
499  */
500 
501 #define NSECPERSEC 1E9
502 
503 /* Optimizations used to index into the buffer require that the buffer size
504  * be a power of 2.
505  */
506 #define EVENT_BUFFER_SZ 4096
507 #define EVENT_BUFFER_BATCHSZ 32
508 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
509 
510 #define EXP_TIM_BUF_SZ 128
511 
512 struct event_buffer {
513 	size_t head;
514 	size_t tail;
515 	struct rte_event events[EVENT_BUFFER_SZ];
516 } __rte_cache_aligned;
517 
518 static inline bool
519 event_buffer_full(struct event_buffer *bufp)
520 {
521 	return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
522 }
523 
524 static inline bool
525 event_buffer_batch_ready(struct event_buffer *bufp)
526 {
527 	return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
528 }
529 
530 static void
531 event_buffer_init(struct event_buffer *bufp)
532 {
533 	bufp->head = bufp->tail = 0;
534 	memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
535 }
536 
537 static int
538 event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
539 {
540 	size_t head_idx;
541 	struct rte_event *buf_eventp;
542 
543 	if (event_buffer_full(bufp))
544 		return -1;
545 
546 	/* Instead of modulus, bitwise AND with mask to get head_idx. */
547 	head_idx = bufp->head & EVENT_BUFFER_MASK;
548 	buf_eventp = &bufp->events[head_idx];
549 	rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
550 
551 	/* Wrap automatically when overflow occurs. */
552 	bufp->head++;
553 
554 	return 0;
555 }
556 
557 static void
558 event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
559 		   uint16_t *nb_events_flushed,
560 		   uint16_t *nb_events_inv)
561 {
562 	struct rte_event *events = bufp->events;
563 	size_t head_idx, tail_idx;
564 	uint16_t n = 0;
565 
566 	/* Instead of modulus, bitwise AND with mask to get index. */
567 	head_idx = bufp->head & EVENT_BUFFER_MASK;
568 	tail_idx = bufp->tail & EVENT_BUFFER_MASK;
569 
570 	RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ);
571 
572 	/* Determine the largest contiguous run we can attempt to enqueue to the
573 	 * event device.
574 	 */
575 	if (head_idx > tail_idx)
576 		n = head_idx - tail_idx;
577 	else if (head_idx < tail_idx)
578 		n = EVENT_BUFFER_SZ - tail_idx;
579 	else if (event_buffer_full(bufp))
580 		n = EVENT_BUFFER_SZ - tail_idx;
581 	else {
582 		*nb_events_flushed = 0;
583 		return;
584 	}
585 
586 	n = RTE_MIN(EVENT_BUFFER_BATCHSZ, n);
587 	*nb_events_inv = 0;
588 
589 	*nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
590 						     &events[tail_idx], n);
591 	if (*nb_events_flushed != n) {
592 		if (rte_errno == EINVAL) {
593 			EVTIM_LOG_ERR("failed to enqueue invalid event - "
594 				      "dropping it");
595 			(*nb_events_inv)++;
596 		} else if (rte_errno == ENOSPC)
597 			rte_pause();
598 	}
599 
600 	if (*nb_events_flushed > 0)
601 		EVTIM_BUF_LOG_DBG("enqueued %"PRIu16" timer events to event "
602 				  "device", *nb_events_flushed);
603 
604 	bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
605 }
606 
607 /*
608  * Software event timer adapter implementation
609  */
610 struct swtim {
611 	/* Identifier of service executing timer management logic. */
612 	uint32_t service_id;
613 	/* The cycle count at which the adapter should next tick */
614 	uint64_t next_tick_cycles;
615 	/* The tick resolution used by adapter instance. May have been
616 	 * adjusted from what user requested
617 	 */
618 	uint64_t timer_tick_ns;
619 	/* Maximum timeout in nanoseconds allowed by adapter instance. */
620 	uint64_t max_tmo_ns;
621 	/* Buffered timer expiry events to be enqueued to an event device. */
622 	struct event_buffer buffer;
623 	/* Statistics */
624 	struct rte_event_timer_adapter_stats stats;
625 	/* Mempool of timer objects */
626 	struct rte_mempool *tim_pool;
627 	/* Back pointer for convenience */
628 	struct rte_event_timer_adapter *adapter;
629 	/* Identifier of timer data instance */
630 	uint32_t timer_data_id;
631 	/* Track which cores have actually armed a timer */
632 	struct {
633 		uint16_t v;
634 	} __rte_cache_aligned in_use[RTE_MAX_LCORE];
635 	/* Track which cores' timer lists should be polled */
636 	unsigned int poll_lcores[RTE_MAX_LCORE];
637 	/* The number of lists that should be polled */
638 	int n_poll_lcores;
639 	/* Timers which have expired and can be returned to a mempool */
640 	struct rte_timer *expired_timers[EXP_TIM_BUF_SZ];
641 	/* The number of timers that can be returned to a mempool */
642 	size_t n_expired_timers;
643 };
644 
645 static inline struct swtim *
646 swtim_pmd_priv(const struct rte_event_timer_adapter *adapter)
647 {
648 	return adapter->data->adapter_priv;
649 }
650 
651 static void
652 swtim_callback(struct rte_timer *tim)
653 {
654 	struct rte_event_timer *evtim = tim->arg;
655 	struct rte_event_timer_adapter *adapter;
656 	unsigned int lcore = rte_lcore_id();
657 	struct swtim *sw;
658 	uint16_t nb_evs_flushed = 0;
659 	uint16_t nb_evs_invalid = 0;
660 	uint64_t opaque;
661 	int ret;
662 	int n_lcores;
663 	enum rte_timer_type type;
664 
665 	opaque = evtim->impl_opaque[1];
666 	adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
667 	sw = swtim_pmd_priv(adapter);
668 	type = get_timer_type(adapter);
669 
670 	if (unlikely(sw->in_use[lcore].v == 0)) {
671 		sw->in_use[lcore].v = 1;
672 		n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
673 					     __ATOMIC_RELAXED);
674 		__atomic_store_n(&sw->poll_lcores[n_lcores], lcore,
675 				__ATOMIC_RELAXED);
676 	}
677 
678 	ret = event_buffer_add(&sw->buffer, &evtim->ev);
679 	if (ret < 0) {
680 		if (type == SINGLE) {
681 			/* If event buffer is full, put timer back in list with
682 			 * immediate expiry value, so that we process it again
683 			 * on the next iteration.
684 			 */
685 			ret = rte_timer_alt_reset(sw->timer_data_id, tim, 0,
686 						SINGLE,	lcore, NULL, evtim);
687 			if (ret < 0) {
688 				EVTIM_LOG_DBG("event buffer full, failed to "
689 						"reset timer with immediate "
690 						"expiry value");
691 			} else {
692 				sw->stats.evtim_retry_count++;
693 				EVTIM_LOG_DBG("event buffer full, resetting "
694 						"rte_timer with immediate "
695 						"expiry value");
696 			}
697 		} else {
698 			sw->stats.evtim_drop_count++;
699 		}
700 
701 	} else {
702 		EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
703 
704 		/* Empty the buffer here, if necessary, to free older expired
705 		 * timers only
706 		 */
707 		if (unlikely(sw->n_expired_timers == EXP_TIM_BUF_SZ)) {
708 			rte_mempool_put_bulk(sw->tim_pool,
709 					     (void **)sw->expired_timers,
710 					     sw->n_expired_timers);
711 			sw->n_expired_timers = 0;
712 		}
713 
714 		/* Don't free rte_timer for a periodic event timer until
715 		 * it is cancelled
716 		 */
717 		if (type == SINGLE)
718 			sw->expired_timers[sw->n_expired_timers++] = tim;
719 		sw->stats.evtim_exp_count++;
720 
721 		if (type == SINGLE)
722 			__atomic_store_n(&evtim->state, RTE_EVENT_TIMER_NOT_ARMED,
723 				__ATOMIC_RELEASE);
724 	}
725 
726 	if (event_buffer_batch_ready(&sw->buffer)) {
727 		event_buffer_flush(&sw->buffer,
728 				   adapter->data->event_dev_id,
729 				   adapter->data->event_port_id,
730 				   &nb_evs_flushed,
731 				   &nb_evs_invalid);
732 
733 		sw->stats.ev_enq_count += nb_evs_flushed;
734 		sw->stats.ev_inv_count += nb_evs_invalid;
735 	}
736 }
737 
738 static __rte_always_inline int
739 get_timeout_cycles(struct rte_event_timer *evtim,
740 		   const struct rte_event_timer_adapter *adapter,
741 		   uint64_t *timeout_cycles)
742 {
743 	static struct rte_reciprocal_u64 nsecpersec_inverse;
744 	static uint64_t timer_hz;
745 	uint64_t rem_cycles, secs_cycles = 0;
746 	uint64_t secs, timeout_nsecs;
747 	uint64_t nsecpersec;
748 	struct swtim *sw;
749 
750 	sw = swtim_pmd_priv(adapter);
751 	nsecpersec = (uint64_t)NSECPERSEC;
752 
753 	timeout_nsecs = evtim->timeout_ticks * sw->timer_tick_ns;
754 	if (timeout_nsecs > sw->max_tmo_ns)
755 		return -1;
756 	if (timeout_nsecs < sw->timer_tick_ns)
757 		return -2;
758 
759 	/* Set these values in the first invocation */
760 	if (!timer_hz) {
761 		timer_hz = rte_get_timer_hz();
762 		nsecpersec_inverse = rte_reciprocal_value_u64(nsecpersec);
763 	}
764 
765 	/* If timeout_nsecs > nsecpersec, decrease timeout_nsecs by the number
766 	 * of whole seconds it contains and convert that value to a number
767 	 * of cycles. This keeps timeout_nsecs in the interval [0..nsecpersec)
768 	 * in order to avoid overflow when we later multiply by timer_hz.
769 	 */
770 	if (timeout_nsecs > nsecpersec) {
771 		secs = rte_reciprocal_divide_u64(timeout_nsecs,
772 						 &nsecpersec_inverse);
773 		secs_cycles = secs * timer_hz;
774 		timeout_nsecs -= secs * nsecpersec;
775 	}
776 
777 	rem_cycles = rte_reciprocal_divide_u64(timeout_nsecs * timer_hz,
778 					       &nsecpersec_inverse);
779 
780 	*timeout_cycles = secs_cycles + rem_cycles;
781 
782 	return 0;
783 }
784 
785 /* This function returns true if one or more (adapter) ticks have occurred since
786  * the last time it was called.
787  */
788 static inline bool
789 swtim_did_tick(struct swtim *sw)
790 {
791 	uint64_t cycles_per_adapter_tick, start_cycles;
792 	uint64_t *next_tick_cyclesp;
793 
794 	next_tick_cyclesp = &sw->next_tick_cycles;
795 	cycles_per_adapter_tick = sw->timer_tick_ns *
796 			(rte_get_timer_hz() / NSECPERSEC);
797 	start_cycles = rte_get_timer_cycles();
798 
799 	/* Note: initially, *next_tick_cyclesp == 0, so the clause below will
800 	 * execute, and set things going.
801 	 */
802 
803 	if (start_cycles >= *next_tick_cyclesp) {
804 		/* Snap the current cycle count to the preceding adapter tick
805 		 * boundary.
806 		 */
807 		start_cycles -= start_cycles % cycles_per_adapter_tick;
808 		*next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
809 
810 		return true;
811 	}
812 
813 	return false;
814 }
815 
816 /* Check that event timer event queue sched type matches destination event queue
817  * sched type
818  */
819 static __rte_always_inline int
820 check_destination_event_queue(struct rte_event_timer *evtim,
821 			      const struct rte_event_timer_adapter *adapter)
822 {
823 	int ret;
824 	uint32_t sched_type;
825 
826 	ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
827 				       evtim->ev.queue_id,
828 				       RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
829 				       &sched_type);
830 
831 	if ((ret == 0 && evtim->ev.sched_type == sched_type) ||
832 	    ret == -EOVERFLOW)
833 		return 0;
834 
835 	return -1;
836 }
837 
838 static int
839 swtim_service_func(void *arg)
840 {
841 	struct rte_event_timer_adapter *adapter = arg;
842 	struct swtim *sw = swtim_pmd_priv(adapter);
843 	uint16_t nb_evs_flushed = 0;
844 	uint16_t nb_evs_invalid = 0;
845 	const uint64_t prior_enq_count = sw->stats.ev_enq_count;
846 
847 	if (swtim_did_tick(sw)) {
848 		rte_timer_alt_manage(sw->timer_data_id,
849 				     sw->poll_lcores,
850 				     sw->n_poll_lcores,
851 				     swtim_callback);
852 
853 		/* Return expired timer objects back to mempool */
854 		rte_mempool_put_bulk(sw->tim_pool, (void **)sw->expired_timers,
855 				     sw->n_expired_timers);
856 		sw->n_expired_timers = 0;
857 
858 		event_buffer_flush(&sw->buffer,
859 				   adapter->data->event_dev_id,
860 				   adapter->data->event_port_id,
861 				   &nb_evs_flushed,
862 				   &nb_evs_invalid);
863 
864 		sw->stats.ev_enq_count += nb_evs_flushed;
865 		sw->stats.ev_inv_count += nb_evs_invalid;
866 		sw->stats.adapter_tick_count++;
867 	}
868 
869 	rte_event_maintain(adapter->data->event_dev_id,
870 			   adapter->data->event_port_id, 0);
871 
872 	return prior_enq_count == sw->stats.ev_enq_count ? -EAGAIN : 0;
873 }
874 
875 /* The adapter initialization function rounds the mempool size up to the next
876  * power of 2, so we can take the difference between that value and what the
877  * user requested, and use the space for caches.  This avoids a scenario where a
878  * user can't arm the number of timers the adapter was configured with because
879  * mempool objects have been lost to caches.
880  *
881  * nb_actual should always be a power of 2, so we can iterate over the powers
882  * of 2 to see what the largest cache size we can use is.
883  */
884 static int
885 compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
886 {
887 	int i;
888 	int size;
889 	int cache_size = 0;
890 
891 	for (i = 0;; i++) {
892 		size = 1 << i;
893 
894 		if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
895 		    size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
896 		    size <= nb_actual / 1.5)
897 			cache_size = size;
898 		else
899 			break;
900 	}
901 
902 	return cache_size;
903 }
904 
905 static int
906 swtim_init(struct rte_event_timer_adapter *adapter)
907 {
908 	int i, ret;
909 	struct swtim *sw;
910 	unsigned int flags;
911 	struct rte_service_spec service;
912 
913 	/* Allocate storage for private data area */
914 #define SWTIM_NAMESIZE 32
915 	char swtim_name[SWTIM_NAMESIZE];
916 	snprintf(swtim_name, SWTIM_NAMESIZE, "swtim_%"PRIu8,
917 			adapter->data->id);
918 	sw = rte_zmalloc_socket(swtim_name, sizeof(*sw), RTE_CACHE_LINE_SIZE,
919 			adapter->data->socket_id);
920 	if (sw == NULL) {
921 		EVTIM_LOG_ERR("failed to allocate space for private data");
922 		rte_errno = ENOMEM;
923 		return -1;
924 	}
925 
926 	/* Connect storage to adapter instance */
927 	adapter->data->adapter_priv = sw;
928 	sw->adapter = adapter;
929 
930 	sw->timer_tick_ns = adapter->data->conf.timer_tick_ns;
931 	sw->max_tmo_ns = adapter->data->conf.max_tmo_ns;
932 
933 	/* Create a timer pool */
934 	char pool_name[SWTIM_NAMESIZE];
935 	snprintf(pool_name, SWTIM_NAMESIZE, "swtim_pool_%"PRIu8,
936 		 adapter->data->id);
937 	/* Optimal mempool size is a power of 2 minus one */
938 	uint64_t nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
939 	int pool_size = nb_timers - 1;
940 	int cache_size = compute_msg_mempool_cache_size(
941 				adapter->data->conf.nb_timers, nb_timers);
942 	flags = 0; /* pool is multi-producer, multi-consumer */
943 	sw->tim_pool = rte_mempool_create(pool_name, pool_size,
944 			sizeof(struct rte_timer), cache_size, 0, NULL, NULL,
945 			NULL, NULL, adapter->data->socket_id, flags);
946 	if (sw->tim_pool == NULL) {
947 		EVTIM_LOG_ERR("failed to create timer object mempool");
948 		rte_errno = ENOMEM;
949 		goto free_alloc;
950 	}
951 
952 	/* Initialize the variables that track in-use timer lists */
953 	for (i = 0; i < RTE_MAX_LCORE; i++)
954 		sw->in_use[i].v = 0;
955 
956 	/* Initialize the timer subsystem and allocate timer data instance */
957 	ret = rte_timer_subsystem_init();
958 	if (ret < 0) {
959 		if (ret != -EALREADY) {
960 			EVTIM_LOG_ERR("failed to initialize timer subsystem");
961 			rte_errno = -ret;
962 			goto free_mempool;
963 		}
964 	}
965 
966 	ret = rte_timer_data_alloc(&sw->timer_data_id);
967 	if (ret < 0) {
968 		EVTIM_LOG_ERR("failed to allocate timer data instance");
969 		rte_errno = -ret;
970 		goto free_mempool;
971 	}
972 
973 	/* Initialize timer event buffer */
974 	event_buffer_init(&sw->buffer);
975 
976 	sw->adapter = adapter;
977 
978 	/* Register a service component to run adapter logic */
979 	memset(&service, 0, sizeof(service));
980 	snprintf(service.name, RTE_SERVICE_NAME_MAX,
981 		 "swtim_svc_%"PRIu8, adapter->data->id);
982 	service.socket_id = adapter->data->socket_id;
983 	service.callback = swtim_service_func;
984 	service.callback_userdata = adapter;
985 	service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
986 	ret = rte_service_component_register(&service, &sw->service_id);
987 	if (ret < 0) {
988 		EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
989 			      ": err = %d", service.name, sw->service_id,
990 			      ret);
991 
992 		rte_errno = ENOSPC;
993 		goto free_mempool;
994 	}
995 
996 	EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
997 		      sw->service_id);
998 
999 	adapter->data->service_id = sw->service_id;
1000 	adapter->data->service_inited = 1;
1001 
1002 	return 0;
1003 free_mempool:
1004 	rte_mempool_free(sw->tim_pool);
1005 free_alloc:
1006 	rte_free(sw);
1007 	return -1;
1008 }
1009 
1010 static void
1011 swtim_free_tim(struct rte_timer *tim, void *arg)
1012 {
1013 	struct swtim *sw = arg;
1014 
1015 	rte_mempool_put(sw->tim_pool, tim);
1016 }
1017 
1018 /* Traverse the list of outstanding timers and put them back in the mempool
1019  * before freeing the adapter to avoid leaking the memory.
1020  */
1021 static int
1022 swtim_uninit(struct rte_event_timer_adapter *adapter)
1023 {
1024 	int ret;
1025 	struct swtim *sw = swtim_pmd_priv(adapter);
1026 
1027 	/* Free outstanding timers */
1028 	rte_timer_stop_all(sw->timer_data_id,
1029 			   sw->poll_lcores,
1030 			   sw->n_poll_lcores,
1031 			   swtim_free_tim,
1032 			   sw);
1033 
1034 	ret = rte_timer_data_dealloc(sw->timer_data_id);
1035 	if (ret < 0) {
1036 		EVTIM_LOG_ERR("failed to deallocate timer data instance");
1037 		return ret;
1038 	}
1039 
1040 	ret = rte_service_component_unregister(sw->service_id);
1041 	if (ret < 0) {
1042 		EVTIM_LOG_ERR("failed to unregister service component");
1043 		return ret;
1044 	}
1045 
1046 	rte_mempool_free(sw->tim_pool);
1047 	rte_free(sw);
1048 	adapter->data->adapter_priv = NULL;
1049 
1050 	return 0;
1051 }
1052 
1053 static inline int32_t
1054 get_mapped_count_for_service(uint32_t service_id)
1055 {
1056 	int32_t core_count, i, mapped_count = 0;
1057 	uint32_t lcore_arr[RTE_MAX_LCORE];
1058 
1059 	core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
1060 
1061 	for (i = 0; i < core_count; i++)
1062 		if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
1063 			mapped_count++;
1064 
1065 	return mapped_count;
1066 }
1067 
1068 static int
1069 swtim_start(const struct rte_event_timer_adapter *adapter)
1070 {
1071 	int mapped_count;
1072 	struct swtim *sw = swtim_pmd_priv(adapter);
1073 
1074 	/* Mapping the service to more than one service core can introduce
1075 	 * delays while one thread is waiting to acquire a lock, so only allow
1076 	 * one core to be mapped to the service.
1077 	 *
1078 	 * Note: the service could be modified such that it spreads cores to
1079 	 * poll over multiple service instances.
1080 	 */
1081 	mapped_count = get_mapped_count_for_service(sw->service_id);
1082 
1083 	if (mapped_count != 1)
1084 		return mapped_count < 1 ? -ENOENT : -ENOTSUP;
1085 
1086 	return rte_service_component_runstate_set(sw->service_id, 1);
1087 }
1088 
1089 static int
1090 swtim_stop(const struct rte_event_timer_adapter *adapter)
1091 {
1092 	int ret;
1093 	struct swtim *sw = swtim_pmd_priv(adapter);
1094 
1095 	ret = rte_service_component_runstate_set(sw->service_id, 0);
1096 	if (ret < 0)
1097 		return ret;
1098 
1099 	/* Wait for the service to complete its final iteration */
1100 	while (rte_service_may_be_active(sw->service_id))
1101 		rte_pause();
1102 
1103 	return 0;
1104 }
1105 
1106 static void
1107 swtim_get_info(const struct rte_event_timer_adapter *adapter,
1108 		struct rte_event_timer_adapter_info *adapter_info)
1109 {
1110 	struct swtim *sw = swtim_pmd_priv(adapter);
1111 	adapter_info->min_resolution_ns = sw->timer_tick_ns;
1112 	adapter_info->max_tmo_ns = sw->max_tmo_ns;
1113 }
1114 
1115 static int
1116 swtim_stats_get(const struct rte_event_timer_adapter *adapter,
1117 		struct rte_event_timer_adapter_stats *stats)
1118 {
1119 	struct swtim *sw = swtim_pmd_priv(adapter);
1120 	*stats = sw->stats; /* structure copy */
1121 	return 0;
1122 }
1123 
1124 static int
1125 swtim_stats_reset(const struct rte_event_timer_adapter *adapter)
1126 {
1127 	struct swtim *sw = swtim_pmd_priv(adapter);
1128 	memset(&sw->stats, 0, sizeof(sw->stats));
1129 	return 0;
1130 }
1131 
1132 static int
1133 swtim_remaining_ticks_get(const struct rte_event_timer_adapter *adapter,
1134 			  const struct rte_event_timer *evtim,
1135 			  uint64_t *ticks_remaining)
1136 {
1137 	uint64_t nsecs_per_adapter_tick, opaque, cycles_remaining;
1138 	enum rte_event_timer_state n_state;
1139 	double nsecs_per_cycle;
1140 	struct rte_timer *tim;
1141 	uint64_t cur_cycles;
1142 
1143 	/* Check that timer is armed */
1144 	n_state = __atomic_load_n(&evtim->state, __ATOMIC_ACQUIRE);
1145 	if (n_state != RTE_EVENT_TIMER_ARMED)
1146 		return -EINVAL;
1147 
1148 	opaque = evtim->impl_opaque[0];
1149 	tim = (struct rte_timer *)(uintptr_t)opaque;
1150 
1151 	cur_cycles = rte_get_timer_cycles();
1152 	if (cur_cycles > tim->expire) {
1153 		*ticks_remaining = 0;
1154 		return 0;
1155 	}
1156 
1157 	cycles_remaining = tim->expire - cur_cycles;
1158 	nsecs_per_cycle = (double)NSECPERSEC / rte_get_timer_hz();
1159 	nsecs_per_adapter_tick = adapter->data->conf.timer_tick_ns;
1160 
1161 	*ticks_remaining = (uint64_t)ceil((cycles_remaining * nsecs_per_cycle) /
1162 					  nsecs_per_adapter_tick);
1163 
1164 	return 0;
1165 }
1166 
1167 static uint16_t
1168 __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1169 		struct rte_event_timer **evtims,
1170 		uint16_t nb_evtims)
1171 {
1172 	int i, ret;
1173 	struct swtim *sw = swtim_pmd_priv(adapter);
1174 	uint32_t lcore_id = rte_lcore_id();
1175 	struct rte_timer *tim, *tims[nb_evtims];
1176 	uint64_t cycles;
1177 	int n_lcores;
1178 	/* Timer list for this lcore is not in use. */
1179 	uint16_t exp_state = 0;
1180 	enum rte_event_timer_state n_state;
1181 	enum rte_timer_type type = SINGLE;
1182 
1183 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1184 	/* Check that the service is running. */
1185 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1186 		rte_errno = EINVAL;
1187 		return 0;
1188 	}
1189 #endif
1190 
1191 	/* Adjust lcore_id if non-EAL thread. Arbitrarily pick the timer list of
1192 	 * the highest lcore to insert such timers into
1193 	 */
1194 	if (lcore_id == LCORE_ID_ANY)
1195 		lcore_id = RTE_MAX_LCORE - 1;
1196 
1197 	/* If this is the first time we're arming an event timer on this lcore,
1198 	 * mark this lcore as "in use"; this will cause the service
1199 	 * function to process the timer list that corresponds to this lcore.
1200 	 * The atomic compare-and-swap operation can prevent the race condition
1201 	 * on in_use flag between multiple non-EAL threads.
1202 	 */
1203 	if (unlikely(__atomic_compare_exchange_n(&sw->in_use[lcore_id].v,
1204 			&exp_state, 1, 0,
1205 			__ATOMIC_RELAXED, __ATOMIC_RELAXED))) {
1206 		EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll",
1207 			      lcore_id);
1208 		n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
1209 					     __ATOMIC_RELAXED);
1210 		__atomic_store_n(&sw->poll_lcores[n_lcores], lcore_id,
1211 				__ATOMIC_RELAXED);
1212 	}
1213 
1214 	ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims,
1215 				   nb_evtims);
1216 	if (ret < 0) {
1217 		rte_errno = ENOSPC;
1218 		return 0;
1219 	}
1220 
1221 	/* update timer type for periodic adapter */
1222 	type = get_timer_type(adapter);
1223 
1224 	for (i = 0; i < nb_evtims; i++) {
1225 		n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1226 		if (n_state == RTE_EVENT_TIMER_ARMED) {
1227 			rte_errno = EALREADY;
1228 			break;
1229 		} else if (!(n_state == RTE_EVENT_TIMER_NOT_ARMED ||
1230 			     n_state == RTE_EVENT_TIMER_CANCELED)) {
1231 			rte_errno = EINVAL;
1232 			break;
1233 		}
1234 
1235 		if (unlikely(check_destination_event_queue(evtims[i],
1236 							   adapter) < 0)) {
1237 			__atomic_store_n(&evtims[i]->state,
1238 					RTE_EVENT_TIMER_ERROR,
1239 					__ATOMIC_RELAXED);
1240 			rte_errno = EINVAL;
1241 			break;
1242 		}
1243 
1244 		tim = tims[i];
1245 		rte_timer_init(tim);
1246 
1247 		evtims[i]->impl_opaque[0] = (uintptr_t)tim;
1248 		evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
1249 
1250 		ret = get_timeout_cycles(evtims[i], adapter, &cycles);
1251 		if (unlikely(ret == -1)) {
1252 			__atomic_store_n(&evtims[i]->state,
1253 					RTE_EVENT_TIMER_ERROR_TOOLATE,
1254 					__ATOMIC_RELAXED);
1255 			rte_errno = EINVAL;
1256 			break;
1257 		} else if (unlikely(ret == -2)) {
1258 			__atomic_store_n(&evtims[i]->state,
1259 					RTE_EVENT_TIMER_ERROR_TOOEARLY,
1260 					__ATOMIC_RELAXED);
1261 			rte_errno = EINVAL;
1262 			break;
1263 		}
1264 
1265 		ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
1266 					  type, lcore_id, NULL, evtims[i]);
1267 		if (ret < 0) {
1268 			/* tim was in RUNNING or CONFIG state */
1269 			__atomic_store_n(&evtims[i]->state,
1270 					RTE_EVENT_TIMER_ERROR,
1271 					__ATOMIC_RELEASE);
1272 			break;
1273 		}
1274 
1275 		EVTIM_LOG_DBG("armed an event timer");
1276 		/* RELEASE ordering guarantees the adapter specific value
1277 		 * changes observed before the update of state.
1278 		 */
1279 		__atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_ARMED,
1280 				__ATOMIC_RELEASE);
1281 	}
1282 
1283 	if (i < nb_evtims)
1284 		rte_mempool_put_bulk(sw->tim_pool,
1285 				     (void **)&tims[i], nb_evtims - i);
1286 
1287 	return i;
1288 }
1289 
1290 static uint16_t
1291 swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1292 		struct rte_event_timer **evtims,
1293 		uint16_t nb_evtims)
1294 {
1295 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
1296 }
1297 
1298 static uint16_t
1299 swtim_cancel_burst(const struct rte_event_timer_adapter *adapter,
1300 		   struct rte_event_timer **evtims,
1301 		   uint16_t nb_evtims)
1302 {
1303 	int i, ret;
1304 	struct rte_timer *timp;
1305 	uint64_t opaque;
1306 	struct swtim *sw = swtim_pmd_priv(adapter);
1307 	enum rte_event_timer_state n_state;
1308 
1309 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1310 	/* Check that the service is running. */
1311 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1312 		rte_errno = EINVAL;
1313 		return 0;
1314 	}
1315 #endif
1316 
1317 	for (i = 0; i < nb_evtims; i++) {
1318 		/* Don't modify the event timer state in these cases */
1319 		/* ACQUIRE ordering guarantees the access of implementation
1320 		 * specific opaque data under the correct state.
1321 		 */
1322 		n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1323 		if (n_state == RTE_EVENT_TIMER_CANCELED) {
1324 			rte_errno = EALREADY;
1325 			break;
1326 		} else if (n_state != RTE_EVENT_TIMER_ARMED) {
1327 			rte_errno = EINVAL;
1328 			break;
1329 		}
1330 
1331 		opaque = evtims[i]->impl_opaque[0];
1332 		timp = (struct rte_timer *)(uintptr_t)opaque;
1333 		RTE_ASSERT(timp != NULL);
1334 
1335 		ret = rte_timer_alt_stop(sw->timer_data_id, timp);
1336 		if (ret < 0) {
1337 			/* Timer is running or being configured */
1338 			rte_errno = EAGAIN;
1339 			break;
1340 		}
1341 
1342 		rte_mempool_put(sw->tim_pool, (void **)timp);
1343 
1344 		/* The RELEASE ordering here pairs with atomic ordering
1345 		 * to make sure the state update data observed between
1346 		 * threads.
1347 		 */
1348 		__atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED,
1349 				__ATOMIC_RELEASE);
1350 	}
1351 
1352 	return i;
1353 }
1354 
1355 static uint16_t
1356 swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
1357 			 struct rte_event_timer **evtims,
1358 			 uint64_t timeout_ticks,
1359 			 uint16_t nb_evtims)
1360 {
1361 	int i;
1362 
1363 	for (i = 0; i < nb_evtims; i++)
1364 		evtims[i]->timeout_ticks = timeout_ticks;
1365 
1366 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
1367 }
1368 
1369 static const struct event_timer_adapter_ops swtim_ops = {
1370 	.init = swtim_init,
1371 	.uninit = swtim_uninit,
1372 	.start = swtim_start,
1373 	.stop = swtim_stop,
1374 	.get_info = swtim_get_info,
1375 	.stats_get = swtim_stats_get,
1376 	.stats_reset = swtim_stats_reset,
1377 	.arm_burst = swtim_arm_burst,
1378 	.arm_tmo_tick_burst = swtim_arm_tmo_tick_burst,
1379 	.cancel_burst = swtim_cancel_burst,
1380 	.remaining_ticks_get = swtim_remaining_ticks_get,
1381 };
1382 
1383 static int
1384 handle_ta_info(const char *cmd __rte_unused, const char *params,
1385 		struct rte_tel_data *d)
1386 {
1387 	struct rte_event_timer_adapter_info adapter_info;
1388 	struct rte_event_timer_adapter *adapter;
1389 	uint16_t adapter_id;
1390 	int ret;
1391 
1392 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1393 		return -1;
1394 
1395 	adapter_id = atoi(params);
1396 
1397 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
1398 		EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id);
1399 		return -EINVAL;
1400 	}
1401 
1402 	adapter = &adapters[adapter_id];
1403 
1404 	ret = rte_event_timer_adapter_get_info(adapter, &adapter_info);
1405 	if (ret < 0) {
1406 		EVTIM_LOG_ERR("Failed to get info for timer adapter id %u", adapter_id);
1407 		return ret;
1408 	}
1409 
1410 	rte_tel_data_start_dict(d);
1411 	rte_tel_data_add_dict_uint(d, "timer_adapter_id", adapter_id);
1412 	rte_tel_data_add_dict_uint(d, "min_resolution_ns",
1413 				   adapter_info.min_resolution_ns);
1414 	rte_tel_data_add_dict_uint(d, "max_tmo_ns", adapter_info.max_tmo_ns);
1415 	rte_tel_data_add_dict_uint(d, "event_dev_id",
1416 				   adapter_info.conf.event_dev_id);
1417 	rte_tel_data_add_dict_uint(d, "socket_id",
1418 				   adapter_info.conf.socket_id);
1419 	rte_tel_data_add_dict_uint(d, "clk_src", adapter_info.conf.clk_src);
1420 	rte_tel_data_add_dict_uint(d, "timer_tick_ns",
1421 				   adapter_info.conf.timer_tick_ns);
1422 	rte_tel_data_add_dict_uint(d, "nb_timers",
1423 				   adapter_info.conf.nb_timers);
1424 	rte_tel_data_add_dict_uint(d, "flags", adapter_info.conf.flags);
1425 
1426 	return 0;
1427 }
1428 
1429 static int
1430 handle_ta_stats(const char *cmd __rte_unused, const char *params,
1431 		struct rte_tel_data *d)
1432 {
1433 	struct rte_event_timer_adapter_stats stats;
1434 	struct rte_event_timer_adapter *adapter;
1435 	uint16_t adapter_id;
1436 	int ret;
1437 
1438 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1439 		return -1;
1440 
1441 	adapter_id = atoi(params);
1442 
1443 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
1444 		EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id);
1445 		return -EINVAL;
1446 	}
1447 
1448 	adapter = &adapters[adapter_id];
1449 
1450 	ret = rte_event_timer_adapter_stats_get(adapter, &stats);
1451 	if (ret < 0) {
1452 		EVTIM_LOG_ERR("Failed to get stats for timer adapter id %u", adapter_id);
1453 		return ret;
1454 	}
1455 
1456 	rte_tel_data_start_dict(d);
1457 	rte_tel_data_add_dict_uint(d, "timer_adapter_id", adapter_id);
1458 	rte_tel_data_add_dict_uint(d, "evtim_exp_count",
1459 				   stats.evtim_exp_count);
1460 	rte_tel_data_add_dict_uint(d, "ev_enq_count", stats.ev_enq_count);
1461 	rte_tel_data_add_dict_uint(d, "ev_inv_count", stats.ev_inv_count);
1462 	rte_tel_data_add_dict_uint(d, "evtim_retry_count",
1463 				   stats.evtim_retry_count);
1464 	rte_tel_data_add_dict_uint(d, "adapter_tick_count",
1465 				   stats.adapter_tick_count);
1466 
1467 	return 0;
1468 }
1469 
1470 RTE_INIT(ta_init_telemetry)
1471 {
1472 	rte_telemetry_register_cmd("/eventdev/ta_info",
1473 		handle_ta_info,
1474 		"Returns Timer adapter info. Parameter: Timer adapter id");
1475 
1476 	rte_telemetry_register_cmd("/eventdev/ta_stats",
1477 		handle_ta_stats,
1478 		"Returns Timer adapter stats. Parameter: Timer adapter id");
1479 }
1480