1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation.
3 * All rights reserved.
4 */
5
6 #include <ctype.h>
7 #include <string.h>
8 #include <inttypes.h>
9 #include <stdalign.h>
10 #include <stdbool.h>
11 #include <stdlib.h>
12 #include <math.h>
13
14 #include <rte_memzone.h>
15 #include <rte_errno.h>
16 #include <rte_malloc.h>
17 #include <rte_mempool.h>
18 #include <rte_common.h>
19 #include <rte_timer.h>
20 #include <rte_service_component.h>
21 #include <rte_telemetry.h>
22 #include <rte_reciprocal.h>
23
24 #include "event_timer_adapter_pmd.h"
25 #include "eventdev_pmd.h"
26 #include "rte_event_timer_adapter.h"
27 #include "rte_eventdev.h"
28 #include "eventdev_trace.h"
29
30 #define DATA_MZ_NAME_MAX_LEN 64
31 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
32
33 RTE_LOG_REGISTER_SUFFIX(evtim_logtype, adapter.timer, NOTICE);
34 #define RTE_LOGTYPE_EVTIM evtim_logtype
35 RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
36 #define RTE_LOGTYPE_EVTIM_BUF evtim_buffer_logtype
37 RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
38 #define RTE_LOGTYPE_EVTIM_SVC evtim_svc_logtype
39
40 static struct rte_event_timer_adapter *adapters;
41
42 static const struct event_timer_adapter_ops swtim_ops;
43
44 #define EVTIM_LOG(level, logtype, ...) \
45 RTE_LOG_LINE_PREFIX(level, logtype, \
46 "EVTIMER: %s() line %u: ", __func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
47
48 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, EVTIM, __VA_ARGS__)
49
50 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
51 #define EVTIM_LOG_DBG(...) \
52 EVTIM_LOG(DEBUG, EVTIM, __VA_ARGS__)
53 #define EVTIM_BUF_LOG_DBG(...) \
54 EVTIM_LOG(DEBUG, EVTIM_BUF, __VA_ARGS__)
55 #define EVTIM_SVC_LOG_DBG(...) \
56 EVTIM_LOG(DEBUG, EVTIM_SVC, __VA_ARGS__)
57 #else
58 #define EVTIM_LOG_DBG(...) (void)0
59 #define EVTIM_BUF_LOG_DBG(...) (void)0
60 #define EVTIM_SVC_LOG_DBG(...) (void)0
61 #endif
62
63 static inline enum rte_timer_type
get_timer_type(const struct rte_event_timer_adapter * adapter)64 get_timer_type(const struct rte_event_timer_adapter *adapter)
65 {
66 return (adapter->data->conf.flags &
67 RTE_EVENT_TIMER_ADAPTER_F_PERIODIC) ?
68 PERIODICAL : SINGLE;
69 }
70
71 static int
default_port_conf_cb(uint16_t id,uint8_t event_dev_id,uint8_t * event_port_id,void * conf_arg)72 default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
73 void *conf_arg)
74 {
75 struct rte_event_timer_adapter *adapter;
76 struct rte_eventdev *dev;
77 struct rte_event_dev_config dev_conf;
78 struct rte_event_port_conf *port_conf, def_port_conf = {0};
79 int started;
80 uint8_t port_id;
81 uint8_t dev_id;
82 int ret;
83
84 RTE_SET_USED(event_dev_id);
85
86 adapter = &adapters[id];
87 dev = &rte_eventdevs[adapter->data->event_dev_id];
88 dev_id = dev->data->dev_id;
89 dev_conf = dev->data->dev_conf;
90
91 started = dev->data->dev_started;
92 if (started)
93 rte_event_dev_stop(dev_id);
94
95 port_id = dev_conf.nb_event_ports;
96 if (conf_arg != NULL)
97 port_conf = conf_arg;
98 else {
99 port_conf = &def_port_conf;
100 ret = rte_event_port_default_conf_get(dev_id, (port_id - 1),
101 port_conf);
102 if (ret < 0)
103 return ret;
104 }
105
106 dev_conf.nb_event_ports += 1;
107 if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_SINGLE_LINK)
108 dev_conf.nb_single_link_event_port_queues += 1;
109
110 ret = rte_event_dev_configure(dev_id, &dev_conf);
111 if (ret < 0) {
112 EVTIM_LOG_ERR("failed to configure event dev %u", dev_id);
113 if (started)
114 if (rte_event_dev_start(dev_id))
115 return -EIO;
116
117 return ret;
118 }
119
120 ret = rte_event_port_setup(dev_id, port_id, port_conf);
121 if (ret < 0) {
122 EVTIM_LOG_ERR("failed to setup event port %u on event dev %u",
123 port_id, dev_id);
124 return ret;
125 }
126
127 *event_port_id = port_id;
128
129 if (started)
130 ret = rte_event_dev_start(dev_id);
131
132 return ret;
133 }
134
135 struct rte_event_timer_adapter *
rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf * conf)136 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
137 {
138 return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
139 NULL);
140 }
141
142 struct rte_event_timer_adapter *
rte_event_timer_adapter_create_ext(const struct rte_event_timer_adapter_conf * conf,rte_event_timer_adapter_port_conf_cb_t conf_cb,void * conf_arg)143 rte_event_timer_adapter_create_ext(
144 const struct rte_event_timer_adapter_conf *conf,
145 rte_event_timer_adapter_port_conf_cb_t conf_cb,
146 void *conf_arg)
147 {
148 uint16_t adapter_id;
149 struct rte_event_timer_adapter *adapter;
150 const struct rte_memzone *mz;
151 char mz_name[DATA_MZ_NAME_MAX_LEN];
152 int n, ret;
153 struct rte_eventdev *dev;
154
155 if (adapters == NULL) {
156 adapters = rte_zmalloc("Eventdev",
157 sizeof(struct rte_event_timer_adapter) *
158 RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
159 RTE_CACHE_LINE_SIZE);
160 if (adapters == NULL) {
161 rte_errno = ENOMEM;
162 return NULL;
163 }
164 }
165
166 if (conf == NULL) {
167 rte_errno = EINVAL;
168 return NULL;
169 }
170
171 /* Check eventdev ID */
172 if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
173 rte_errno = EINVAL;
174 return NULL;
175 }
176 dev = &rte_eventdevs[conf->event_dev_id];
177
178 adapter_id = conf->timer_adapter_id;
179
180 /* Check that adapter_id is in range */
181 if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
182 rte_errno = EINVAL;
183 return NULL;
184 }
185
186 /* Check adapter ID not already allocated */
187 adapter = &adapters[adapter_id];
188 if (adapter->allocated) {
189 rte_errno = EEXIST;
190 return NULL;
191 }
192
193 /* Create shared data area. */
194 n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
195 if (n >= (int)sizeof(mz_name)) {
196 rte_errno = EINVAL;
197 return NULL;
198 }
199 mz = rte_memzone_reserve(mz_name,
200 sizeof(struct rte_event_timer_adapter_data),
201 conf->socket_id, 0);
202 if (mz == NULL)
203 /* rte_errno set by rte_memzone_reserve */
204 return NULL;
205
206 adapter->data = mz->addr;
207 memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
208
209 adapter->data->mz = mz;
210 adapter->data->event_dev_id = conf->event_dev_id;
211 adapter->data->id = adapter_id;
212 adapter->data->socket_id = conf->socket_id;
213 adapter->data->conf = *conf; /* copy conf structure */
214
215 /* Query eventdev PMD for timer adapter capabilities and ops */
216 if (dev->dev_ops->timer_adapter_caps_get) {
217 ret = dev->dev_ops->timer_adapter_caps_get(dev,
218 adapter->data->conf.flags,
219 &adapter->data->caps, &adapter->ops);
220 if (ret < 0) {
221 rte_errno = -ret;
222 goto free_memzone;
223 }
224 }
225
226 if (!(adapter->data->caps &
227 RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
228 FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, EINVAL);
229 ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
230 &adapter->data->event_port_id, conf_arg);
231 if (ret < 0) {
232 rte_errno = -ret;
233 goto free_memzone;
234 }
235 }
236
237 /* If eventdev PMD did not provide ops, use default software
238 * implementation.
239 */
240 if (adapter->ops == NULL)
241 adapter->ops = &swtim_ops;
242
243 /* Allow driver to do some setup */
244 FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, ENOTSUP);
245 ret = adapter->ops->init(adapter);
246 if (ret < 0) {
247 rte_errno = -ret;
248 goto free_memzone;
249 }
250
251 /* Set fast-path function pointers */
252 adapter->arm_burst = adapter->ops->arm_burst;
253 adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
254 adapter->cancel_burst = adapter->ops->cancel_burst;
255
256 adapter->allocated = 1;
257
258 rte_eventdev_trace_timer_adapter_create(adapter_id, adapter, conf,
259 conf_cb);
260 return adapter;
261
262 free_memzone:
263 rte_memzone_free(adapter->data->mz);
264 return NULL;
265 }
266
267 int
rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter * adapter,struct rte_event_timer_adapter_info * adapter_info)268 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
269 struct rte_event_timer_adapter_info *adapter_info)
270 {
271 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
272
273 if (adapter->ops->get_info)
274 /* let driver set values it knows */
275 adapter->ops->get_info(adapter, adapter_info);
276
277 /* Set common values */
278 adapter_info->conf = adapter->data->conf;
279 adapter_info->event_dev_port_id = adapter->data->event_port_id;
280 adapter_info->caps = adapter->data->caps;
281
282 rte_eventdev_trace_timer_adapter_get_info(adapter, adapter_info);
283
284 return 0;
285 }
286
287 int
rte_event_timer_adapter_start(const struct rte_event_timer_adapter * adapter)288 rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
289 {
290 int ret;
291
292 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
293 FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
294
295 if (adapter->data->started) {
296 EVTIM_LOG_ERR("event timer adapter %"PRIu8" already started",
297 adapter->data->id);
298 return -EALREADY;
299 }
300
301 ret = adapter->ops->start(adapter);
302 if (ret < 0)
303 return ret;
304
305 adapter->data->started = 1;
306 rte_eventdev_trace_timer_adapter_start(adapter);
307 return 0;
308 }
309
310 int
rte_event_timer_adapter_stop(const struct rte_event_timer_adapter * adapter)311 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
312 {
313 int ret;
314
315 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
316 FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
317
318 if (adapter->data->started == 0) {
319 EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
320 adapter->data->id);
321 return 0;
322 }
323
324 ret = adapter->ops->stop(adapter);
325 if (ret < 0)
326 return ret;
327
328 adapter->data->started = 0;
329 rte_eventdev_trace_timer_adapter_stop(adapter);
330 return 0;
331 }
332
333 struct rte_event_timer_adapter *
rte_event_timer_adapter_lookup(uint16_t adapter_id)334 rte_event_timer_adapter_lookup(uint16_t adapter_id)
335 {
336 char name[DATA_MZ_NAME_MAX_LEN];
337 const struct rte_memzone *mz;
338 struct rte_event_timer_adapter_data *data;
339 struct rte_event_timer_adapter *adapter;
340 int ret;
341 struct rte_eventdev *dev;
342
343 if (adapters == NULL) {
344 adapters = rte_zmalloc("Eventdev",
345 sizeof(struct rte_event_timer_adapter) *
346 RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
347 RTE_CACHE_LINE_SIZE);
348 if (adapters == NULL) {
349 rte_errno = ENOMEM;
350 return NULL;
351 }
352 }
353
354 if (adapters[adapter_id].allocated)
355 return &adapters[adapter_id]; /* Adapter is already loaded */
356
357 snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
358 mz = rte_memzone_lookup(name);
359 if (mz == NULL) {
360 rte_errno = ENOENT;
361 return NULL;
362 }
363
364 data = mz->addr;
365
366 adapter = &adapters[data->id];
367 adapter->data = data;
368
369 dev = &rte_eventdevs[adapter->data->event_dev_id];
370
371 /* Query eventdev PMD for timer adapter capabilities and ops */
372 if (dev->dev_ops->timer_adapter_caps_get) {
373 ret = dev->dev_ops->timer_adapter_caps_get(dev,
374 adapter->data->conf.flags,
375 &adapter->data->caps, &adapter->ops);
376 if (ret < 0) {
377 rte_errno = EINVAL;
378 return NULL;
379 }
380 }
381
382 /* If eventdev PMD did not provide ops, use default software
383 * implementation.
384 */
385 if (adapter->ops == NULL)
386 adapter->ops = &swtim_ops;
387
388 /* Set fast-path function pointers */
389 adapter->arm_burst = adapter->ops->arm_burst;
390 adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
391 adapter->cancel_burst = adapter->ops->cancel_burst;
392
393 adapter->allocated = 1;
394
395 rte_eventdev_trace_timer_adapter_lookup(adapter_id, adapter);
396
397 return adapter;
398 }
399
400 int
rte_event_timer_adapter_free(struct rte_event_timer_adapter * adapter)401 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
402 {
403 int i, ret;
404
405 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
406 FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
407
408 if (adapter->data->started == 1) {
409 EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
410 "before freeing", adapter->data->id);
411 return -EBUSY;
412 }
413
414 /* free impl priv data */
415 ret = adapter->ops->uninit(adapter);
416 if (ret < 0)
417 return ret;
418
419 /* free shared data area */
420 ret = rte_memzone_free(adapter->data->mz);
421 if (ret < 0)
422 return ret;
423
424 adapter->data = NULL;
425 adapter->allocated = 0;
426
427 ret = 0;
428 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++)
429 if (adapters[i].allocated)
430 ret = adapters[i].allocated;
431
432 if (!ret) {
433 rte_free(adapters);
434 adapters = NULL;
435 }
436
437 rte_eventdev_trace_timer_adapter_free(adapter);
438 return 0;
439 }
440
441 int
rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter * adapter,uint32_t * service_id)442 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
443 uint32_t *service_id)
444 {
445 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
446
447 if (service_id == NULL)
448 return -EINVAL;
449
450 if (adapter->data->service_inited && service_id != NULL)
451 *service_id = adapter->data->service_id;
452
453 rte_eventdev_trace_timer_adapter_service_id_get(adapter, *service_id);
454
455 return adapter->data->service_inited ? 0 : -ESRCH;
456 }
457
458 int
rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter * adapter,struct rte_event_timer_adapter_stats * stats)459 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
460 struct rte_event_timer_adapter_stats *stats)
461 {
462 rte_eventdev_trace_timer_adapter_stats_get(adapter, stats);
463
464 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
465 FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
466 if (stats == NULL)
467 return -EINVAL;
468
469 return adapter->ops->stats_get(adapter, stats);
470 }
471
472 int
rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter * adapter)473 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
474 {
475 rte_eventdev_trace_timer_adapter_stats_reset(adapter);
476
477 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
478 FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
479 return adapter->ops->stats_reset(adapter);
480 }
481
482 int
rte_event_timer_remaining_ticks_get(const struct rte_event_timer_adapter * adapter,const struct rte_event_timer * evtim,uint64_t * ticks_remaining)483 rte_event_timer_remaining_ticks_get(
484 const struct rte_event_timer_adapter *adapter,
485 const struct rte_event_timer *evtim,
486 uint64_t *ticks_remaining)
487 {
488 rte_eventdev_trace_timer_remaining_ticks_get(adapter, evtim, ticks_remaining);
489
490 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
491 FUNC_PTR_OR_ERR_RET(adapter->ops->remaining_ticks_get, -ENOTSUP);
492
493 if (ticks_remaining == NULL)
494 return -EINVAL;
495
496 return adapter->ops->remaining_ticks_get(adapter, evtim,
497 ticks_remaining);
498 }
499
500 /*
501 * Software event timer adapter buffer helper functions
502 */
503
504 #define NSECPERSEC 1E9
505
506 /* Optimizations used to index into the buffer require that the buffer size
507 * be a power of 2.
508 */
509 #define EVENT_BUFFER_SZ 4096
510 #define EVENT_BUFFER_BATCHSZ 32
511 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
512
513 #define EXP_TIM_BUF_SZ 128
514
515 struct __rte_cache_aligned event_buffer {
516 size_t head;
517 size_t tail;
518 struct rte_event events[EVENT_BUFFER_SZ];
519 };
520
521 static inline bool
event_buffer_full(struct event_buffer * bufp)522 event_buffer_full(struct event_buffer *bufp)
523 {
524 return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
525 }
526
527 static inline bool
event_buffer_batch_ready(struct event_buffer * bufp)528 event_buffer_batch_ready(struct event_buffer *bufp)
529 {
530 return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
531 }
532
533 static void
event_buffer_init(struct event_buffer * bufp)534 event_buffer_init(struct event_buffer *bufp)
535 {
536 bufp->head = bufp->tail = 0;
537 memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
538 }
539
540 static int
event_buffer_add(struct event_buffer * bufp,struct rte_event * eventp)541 event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
542 {
543 size_t head_idx;
544 struct rte_event *buf_eventp;
545
546 if (event_buffer_full(bufp))
547 return -1;
548
549 /* Instead of modulus, bitwise AND with mask to get head_idx. */
550 head_idx = bufp->head & EVENT_BUFFER_MASK;
551 buf_eventp = &bufp->events[head_idx];
552 rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
553
554 /* Wrap automatically when overflow occurs. */
555 bufp->head++;
556
557 return 0;
558 }
559
560 static void
event_buffer_flush(struct event_buffer * bufp,uint8_t dev_id,uint8_t port_id,uint16_t * nb_events_flushed,uint16_t * nb_events_inv)561 event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
562 uint16_t *nb_events_flushed,
563 uint16_t *nb_events_inv)
564 {
565 struct rte_event *events = bufp->events;
566 size_t head_idx, tail_idx;
567 uint16_t n = 0;
568
569 /* Instead of modulus, bitwise AND with mask to get index. */
570 head_idx = bufp->head & EVENT_BUFFER_MASK;
571 tail_idx = bufp->tail & EVENT_BUFFER_MASK;
572
573 RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ);
574
575 /* Determine the largest contiguous run we can attempt to enqueue to the
576 * event device.
577 */
578 if (head_idx > tail_idx)
579 n = head_idx - tail_idx;
580 else if (head_idx < tail_idx)
581 n = EVENT_BUFFER_SZ - tail_idx;
582 else if (event_buffer_full(bufp))
583 n = EVENT_BUFFER_SZ - tail_idx;
584 else {
585 *nb_events_flushed = 0;
586 return;
587 }
588
589 n = RTE_MIN(EVENT_BUFFER_BATCHSZ, n);
590 *nb_events_inv = 0;
591
592 *nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
593 &events[tail_idx], n);
594 if (*nb_events_flushed != n) {
595 if (rte_errno == EINVAL) {
596 EVTIM_LOG_ERR("failed to enqueue invalid event - "
597 "dropping it");
598 (*nb_events_inv)++;
599 } else if (rte_errno == ENOSPC)
600 rte_pause();
601 }
602
603 if (*nb_events_flushed > 0)
604 EVTIM_BUF_LOG_DBG("enqueued %"PRIu16" timer events to event "
605 "device", *nb_events_flushed);
606
607 bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
608 }
609
610 /*
611 * Software event timer adapter implementation
612 */
613 struct swtim {
614 /* Identifier of service executing timer management logic. */
615 uint32_t service_id;
616 /* The cycle count at which the adapter should next tick */
617 uint64_t next_tick_cycles;
618 /* The tick resolution used by adapter instance. May have been
619 * adjusted from what user requested
620 */
621 uint64_t timer_tick_ns;
622 /* Maximum timeout in nanoseconds allowed by adapter instance. */
623 uint64_t max_tmo_ns;
624 /* Buffered timer expiry events to be enqueued to an event device. */
625 struct event_buffer buffer;
626 /* Statistics */
627 struct rte_event_timer_adapter_stats stats;
628 /* Mempool of timer objects */
629 struct rte_mempool *tim_pool;
630 /* Back pointer for convenience */
631 struct rte_event_timer_adapter *adapter;
632 /* Identifier of timer data instance */
633 uint32_t timer_data_id;
634 /* Track which cores have actually armed a timer */
635 alignas(RTE_CACHE_LINE_SIZE) struct {
636 RTE_ATOMIC(uint16_t) v;
637 } in_use[RTE_MAX_LCORE];
638 /* Track which cores' timer lists should be polled */
639 RTE_ATOMIC(unsigned int) poll_lcores[RTE_MAX_LCORE];
640 /* The number of lists that should be polled */
641 RTE_ATOMIC(int) n_poll_lcores;
642 /* Timers which have expired and can be returned to a mempool */
643 struct rte_timer *expired_timers[EXP_TIM_BUF_SZ];
644 /* The number of timers that can be returned to a mempool */
645 size_t n_expired_timers;
646 };
647
648 static inline struct swtim *
swtim_pmd_priv(const struct rte_event_timer_adapter * adapter)649 swtim_pmd_priv(const struct rte_event_timer_adapter *adapter)
650 {
651 return adapter->data->adapter_priv;
652 }
653
654 static void
swtim_callback(struct rte_timer * tim)655 swtim_callback(struct rte_timer *tim)
656 {
657 struct rte_event_timer *evtim = tim->arg;
658 struct rte_event_timer_adapter *adapter;
659 unsigned int lcore = rte_lcore_id();
660 struct swtim *sw;
661 uint16_t nb_evs_flushed = 0;
662 uint16_t nb_evs_invalid = 0;
663 uint64_t opaque;
664 int ret;
665 int n_lcores;
666 enum rte_timer_type type;
667
668 opaque = evtim->impl_opaque[1];
669 adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
670 sw = swtim_pmd_priv(adapter);
671 type = get_timer_type(adapter);
672
673 if (unlikely(sw->in_use[lcore].v == 0)) {
674 sw->in_use[lcore].v = 1;
675 n_lcores = rte_atomic_fetch_add_explicit(&sw->n_poll_lcores, 1,
676 rte_memory_order_relaxed);
677 rte_atomic_store_explicit(&sw->poll_lcores[n_lcores], lcore,
678 rte_memory_order_relaxed);
679 }
680
681 ret = event_buffer_add(&sw->buffer, &evtim->ev);
682 if (ret < 0) {
683 if (type == SINGLE) {
684 /* If event buffer is full, put timer back in list with
685 * immediate expiry value, so that we process it again
686 * on the next iteration.
687 */
688 ret = rte_timer_alt_reset(sw->timer_data_id, tim, 0,
689 SINGLE, lcore, NULL, evtim);
690 if (ret < 0) {
691 EVTIM_LOG_DBG("event buffer full, failed to "
692 "reset timer with immediate "
693 "expiry value");
694 } else {
695 sw->stats.evtim_retry_count++;
696 EVTIM_LOG_DBG("event buffer full, resetting "
697 "rte_timer with immediate "
698 "expiry value");
699 }
700 } else {
701 sw->stats.evtim_drop_count++;
702 }
703
704 } else {
705 EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
706
707 /* Empty the buffer here, if necessary, to free older expired
708 * timers only
709 */
710 if (unlikely(sw->n_expired_timers == EXP_TIM_BUF_SZ)) {
711 rte_mempool_put_bulk(sw->tim_pool,
712 (void **)sw->expired_timers,
713 sw->n_expired_timers);
714 sw->n_expired_timers = 0;
715 }
716
717 /* Don't free rte_timer for a periodic event timer until
718 * it is cancelled
719 */
720 if (type == SINGLE)
721 sw->expired_timers[sw->n_expired_timers++] = tim;
722 sw->stats.evtim_exp_count++;
723
724 if (type == SINGLE)
725 rte_atomic_store_explicit(&evtim->state, RTE_EVENT_TIMER_NOT_ARMED,
726 rte_memory_order_release);
727 }
728
729 if (event_buffer_batch_ready(&sw->buffer)) {
730 event_buffer_flush(&sw->buffer,
731 adapter->data->event_dev_id,
732 adapter->data->event_port_id,
733 &nb_evs_flushed,
734 &nb_evs_invalid);
735
736 sw->stats.ev_enq_count += nb_evs_flushed;
737 sw->stats.ev_inv_count += nb_evs_invalid;
738 }
739 }
740
741 static __rte_always_inline int
get_timeout_cycles(struct rte_event_timer * evtim,const struct rte_event_timer_adapter * adapter,uint64_t * timeout_cycles)742 get_timeout_cycles(struct rte_event_timer *evtim,
743 const struct rte_event_timer_adapter *adapter,
744 uint64_t *timeout_cycles)
745 {
746 static struct rte_reciprocal_u64 nsecpersec_inverse;
747 static uint64_t timer_hz;
748 uint64_t rem_cycles, secs_cycles = 0;
749 uint64_t secs, timeout_nsecs;
750 uint64_t nsecpersec;
751 struct swtim *sw;
752
753 sw = swtim_pmd_priv(adapter);
754 nsecpersec = (uint64_t)NSECPERSEC;
755
756 timeout_nsecs = evtim->timeout_ticks * sw->timer_tick_ns;
757 if (timeout_nsecs > sw->max_tmo_ns)
758 return -1;
759 if (timeout_nsecs < sw->timer_tick_ns)
760 return -2;
761
762 /* Set these values in the first invocation */
763 if (!timer_hz) {
764 timer_hz = rte_get_timer_hz();
765 nsecpersec_inverse = rte_reciprocal_value_u64(nsecpersec);
766 }
767
768 /* If timeout_nsecs > nsecpersec, decrease timeout_nsecs by the number
769 * of whole seconds it contains and convert that value to a number
770 * of cycles. This keeps timeout_nsecs in the interval [0..nsecpersec)
771 * in order to avoid overflow when we later multiply by timer_hz.
772 */
773 if (timeout_nsecs > nsecpersec) {
774 secs = rte_reciprocal_divide_u64(timeout_nsecs,
775 &nsecpersec_inverse);
776 secs_cycles = secs * timer_hz;
777 timeout_nsecs -= secs * nsecpersec;
778 }
779
780 rem_cycles = rte_reciprocal_divide_u64(timeout_nsecs * timer_hz,
781 &nsecpersec_inverse);
782
783 *timeout_cycles = secs_cycles + rem_cycles;
784
785 return 0;
786 }
787
788 /* This function returns true if one or more (adapter) ticks have occurred since
789 * the last time it was called.
790 */
791 static inline bool
swtim_did_tick(struct swtim * sw)792 swtim_did_tick(struct swtim *sw)
793 {
794 uint64_t cycles_per_adapter_tick, start_cycles;
795 uint64_t *next_tick_cyclesp;
796
797 next_tick_cyclesp = &sw->next_tick_cycles;
798 cycles_per_adapter_tick = sw->timer_tick_ns *
799 (rte_get_timer_hz() / NSECPERSEC);
800 start_cycles = rte_get_timer_cycles();
801
802 /* Note: initially, *next_tick_cyclesp == 0, so the clause below will
803 * execute, and set things going.
804 */
805
806 if (start_cycles >= *next_tick_cyclesp) {
807 /* Snap the current cycle count to the preceding adapter tick
808 * boundary.
809 */
810 start_cycles -= start_cycles % cycles_per_adapter_tick;
811 *next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
812
813 return true;
814 }
815
816 return false;
817 }
818
819 /* Check that event timer event queue sched type matches destination event queue
820 * sched type
821 */
822 static __rte_always_inline int
check_destination_event_queue(struct rte_event_timer * evtim,const struct rte_event_timer_adapter * adapter)823 check_destination_event_queue(struct rte_event_timer *evtim,
824 const struct rte_event_timer_adapter *adapter)
825 {
826 int ret;
827 uint32_t sched_type;
828
829 ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
830 evtim->ev.queue_id,
831 RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
832 &sched_type);
833
834 if ((ret == 0 && evtim->ev.sched_type == sched_type) ||
835 ret == -EOVERFLOW)
836 return 0;
837
838 return -1;
839 }
840
841 static int
swtim_service_func(void * arg)842 swtim_service_func(void *arg)
843 {
844 struct rte_event_timer_adapter *adapter = arg;
845 struct swtim *sw = swtim_pmd_priv(adapter);
846 uint16_t nb_evs_flushed = 0;
847 uint16_t nb_evs_invalid = 0;
848 const uint64_t prior_enq_count = sw->stats.ev_enq_count;
849
850 if (swtim_did_tick(sw)) {
851 rte_timer_alt_manage(sw->timer_data_id,
852 (unsigned int *)(uintptr_t)sw->poll_lcores,
853 sw->n_poll_lcores,
854 swtim_callback);
855
856 /* Return expired timer objects back to mempool */
857 rte_mempool_put_bulk(sw->tim_pool, (void **)sw->expired_timers,
858 sw->n_expired_timers);
859 sw->n_expired_timers = 0;
860
861 sw->stats.adapter_tick_count++;
862 }
863
864 event_buffer_flush(&sw->buffer,
865 adapter->data->event_dev_id,
866 adapter->data->event_port_id,
867 &nb_evs_flushed,
868 &nb_evs_invalid);
869
870 sw->stats.ev_enq_count += nb_evs_flushed;
871 sw->stats.ev_inv_count += nb_evs_invalid;
872
873 rte_event_maintain(adapter->data->event_dev_id,
874 adapter->data->event_port_id, 0);
875
876 return prior_enq_count == sw->stats.ev_enq_count ? -EAGAIN : 0;
877 }
878
879 /* The adapter initialization function rounds the mempool size up to the next
880 * power of 2, so we can take the difference between that value and what the
881 * user requested, and use the space for caches. This avoids a scenario where a
882 * user can't arm the number of timers the adapter was configured with because
883 * mempool objects have been lost to caches.
884 *
885 * nb_actual should always be a power of 2, so we can iterate over the powers
886 * of 2 to see what the largest cache size we can use is.
887 */
888 static int
compute_msg_mempool_cache_size(uint64_t nb_requested,uint64_t nb_actual)889 compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
890 {
891 int i;
892 int size;
893 int cache_size = 0;
894
895 for (i = 0;; i++) {
896 size = 1 << i;
897
898 if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
899 size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
900 size <= nb_actual / 1.5)
901 cache_size = size;
902 else
903 break;
904 }
905
906 return cache_size;
907 }
908
909 static int
swtim_init(struct rte_event_timer_adapter * adapter)910 swtim_init(struct rte_event_timer_adapter *adapter)
911 {
912 int i, ret;
913 struct swtim *sw;
914 unsigned int flags;
915 struct rte_service_spec service;
916
917 /* Allocate storage for private data area */
918 #define SWTIM_NAMESIZE 32
919 char swtim_name[SWTIM_NAMESIZE];
920 snprintf(swtim_name, SWTIM_NAMESIZE, "swtim_%"PRIu8,
921 adapter->data->id);
922 sw = rte_zmalloc_socket(swtim_name, sizeof(*sw), RTE_CACHE_LINE_SIZE,
923 adapter->data->socket_id);
924 if (sw == NULL) {
925 EVTIM_LOG_ERR("failed to allocate space for private data");
926 rte_errno = ENOMEM;
927 return -1;
928 }
929
930 /* Connect storage to adapter instance */
931 adapter->data->adapter_priv = sw;
932 sw->adapter = adapter;
933
934 sw->timer_tick_ns = adapter->data->conf.timer_tick_ns;
935 sw->max_tmo_ns = adapter->data->conf.max_tmo_ns;
936
937 /* Create a timer pool */
938 char pool_name[SWTIM_NAMESIZE];
939 snprintf(pool_name, SWTIM_NAMESIZE, "swtim_pool_%"PRIu8,
940 adapter->data->id);
941 /* Optimal mempool size is a power of 2 minus one */
942 uint64_t nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
943 int pool_size = nb_timers - 1;
944 int cache_size = compute_msg_mempool_cache_size(
945 adapter->data->conf.nb_timers, nb_timers);
946 flags = 0; /* pool is multi-producer, multi-consumer */
947 sw->tim_pool = rte_mempool_create(pool_name, pool_size,
948 sizeof(struct rte_timer), cache_size, 0, NULL, NULL,
949 NULL, NULL, adapter->data->socket_id, flags);
950 if (sw->tim_pool == NULL) {
951 EVTIM_LOG_ERR("failed to create timer object mempool");
952 rte_errno = ENOMEM;
953 goto free_alloc;
954 }
955
956 /* Initialize the variables that track in-use timer lists */
957 for (i = 0; i < RTE_MAX_LCORE; i++)
958 sw->in_use[i].v = 0;
959
960 /* Initialize the timer subsystem and allocate timer data instance */
961 ret = rte_timer_subsystem_init();
962 if (ret < 0) {
963 if (ret != -EALREADY) {
964 EVTIM_LOG_ERR("failed to initialize timer subsystem");
965 rte_errno = -ret;
966 goto free_mempool;
967 }
968 }
969
970 ret = rte_timer_data_alloc(&sw->timer_data_id);
971 if (ret < 0) {
972 EVTIM_LOG_ERR("failed to allocate timer data instance");
973 rte_errno = -ret;
974 goto free_mempool;
975 }
976
977 /* Initialize timer event buffer */
978 event_buffer_init(&sw->buffer);
979
980 sw->adapter = adapter;
981
982 /* Register a service component to run adapter logic */
983 memset(&service, 0, sizeof(service));
984 snprintf(service.name, RTE_SERVICE_NAME_MAX,
985 "swtim_svc_%"PRIu8, adapter->data->id);
986 service.socket_id = adapter->data->socket_id;
987 service.callback = swtim_service_func;
988 service.callback_userdata = adapter;
989 service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
990 ret = rte_service_component_register(&service, &sw->service_id);
991 if (ret < 0) {
992 EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
993 ": err = %d", service.name, sw->service_id,
994 ret);
995
996 rte_errno = ENOSPC;
997 goto free_mempool;
998 }
999
1000 EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
1001 sw->service_id);
1002
1003 adapter->data->service_id = sw->service_id;
1004 adapter->data->service_inited = 1;
1005
1006 return 0;
1007 free_mempool:
1008 rte_mempool_free(sw->tim_pool);
1009 free_alloc:
1010 rte_free(sw);
1011 return -1;
1012 }
1013
1014 static void
swtim_free_tim(struct rte_timer * tim,void * arg)1015 swtim_free_tim(struct rte_timer *tim, void *arg)
1016 {
1017 struct swtim *sw = arg;
1018
1019 rte_mempool_put(sw->tim_pool, tim);
1020 }
1021
1022 /* Traverse the list of outstanding timers and put them back in the mempool
1023 * before freeing the adapter to avoid leaking the memory.
1024 */
1025 static int
swtim_uninit(struct rte_event_timer_adapter * adapter)1026 swtim_uninit(struct rte_event_timer_adapter *adapter)
1027 {
1028 int ret;
1029 struct swtim *sw = swtim_pmd_priv(adapter);
1030
1031 /* Free outstanding timers */
1032 rte_timer_stop_all(sw->timer_data_id,
1033 (unsigned int *)(uintptr_t)sw->poll_lcores,
1034 sw->n_poll_lcores,
1035 swtim_free_tim,
1036 sw);
1037
1038 ret = rte_timer_data_dealloc(sw->timer_data_id);
1039 if (ret < 0) {
1040 EVTIM_LOG_ERR("failed to deallocate timer data instance");
1041 return ret;
1042 }
1043
1044 ret = rte_service_component_unregister(sw->service_id);
1045 if (ret < 0) {
1046 EVTIM_LOG_ERR("failed to unregister service component");
1047 return ret;
1048 }
1049
1050 rte_mempool_free(sw->tim_pool);
1051 rte_free(sw);
1052 adapter->data->adapter_priv = NULL;
1053
1054 return 0;
1055 }
1056
1057 static inline int32_t
get_mapped_count_for_service(uint32_t service_id)1058 get_mapped_count_for_service(uint32_t service_id)
1059 {
1060 int32_t core_count, i, mapped_count = 0;
1061 uint32_t lcore_arr[RTE_MAX_LCORE];
1062
1063 core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
1064
1065 for (i = 0; i < core_count; i++)
1066 if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
1067 mapped_count++;
1068
1069 return mapped_count;
1070 }
1071
1072 static int
swtim_start(const struct rte_event_timer_adapter * adapter)1073 swtim_start(const struct rte_event_timer_adapter *adapter)
1074 {
1075 int mapped_count;
1076 struct swtim *sw = swtim_pmd_priv(adapter);
1077
1078 /* Mapping the service to more than one service core can introduce
1079 * delays while one thread is waiting to acquire a lock, so only allow
1080 * one core to be mapped to the service.
1081 *
1082 * Note: the service could be modified such that it spreads cores to
1083 * poll over multiple service instances.
1084 */
1085 mapped_count = get_mapped_count_for_service(sw->service_id);
1086
1087 if (mapped_count != 1)
1088 return mapped_count < 1 ? -ENOENT : -ENOTSUP;
1089
1090 return rte_service_component_runstate_set(sw->service_id, 1);
1091 }
1092
1093 static int
swtim_stop(const struct rte_event_timer_adapter * adapter)1094 swtim_stop(const struct rte_event_timer_adapter *adapter)
1095 {
1096 int ret;
1097 struct swtim *sw = swtim_pmd_priv(adapter);
1098
1099 ret = rte_service_component_runstate_set(sw->service_id, 0);
1100 if (ret < 0)
1101 return ret;
1102
1103 /* Wait for the service to complete its final iteration */
1104 while (rte_service_may_be_active(sw->service_id))
1105 rte_pause();
1106
1107 return 0;
1108 }
1109
1110 static void
swtim_get_info(const struct rte_event_timer_adapter * adapter,struct rte_event_timer_adapter_info * adapter_info)1111 swtim_get_info(const struct rte_event_timer_adapter *adapter,
1112 struct rte_event_timer_adapter_info *adapter_info)
1113 {
1114 struct swtim *sw = swtim_pmd_priv(adapter);
1115 adapter_info->min_resolution_ns = sw->timer_tick_ns;
1116 adapter_info->max_tmo_ns = sw->max_tmo_ns;
1117 }
1118
1119 static int
swtim_stats_get(const struct rte_event_timer_adapter * adapter,struct rte_event_timer_adapter_stats * stats)1120 swtim_stats_get(const struct rte_event_timer_adapter *adapter,
1121 struct rte_event_timer_adapter_stats *stats)
1122 {
1123 struct swtim *sw = swtim_pmd_priv(adapter);
1124 *stats = sw->stats; /* structure copy */
1125 return 0;
1126 }
1127
1128 static int
swtim_stats_reset(const struct rte_event_timer_adapter * adapter)1129 swtim_stats_reset(const struct rte_event_timer_adapter *adapter)
1130 {
1131 struct swtim *sw = swtim_pmd_priv(adapter);
1132 memset(&sw->stats, 0, sizeof(sw->stats));
1133 return 0;
1134 }
1135
1136 static int
swtim_remaining_ticks_get(const struct rte_event_timer_adapter * adapter,const struct rte_event_timer * evtim,uint64_t * ticks_remaining)1137 swtim_remaining_ticks_get(const struct rte_event_timer_adapter *adapter,
1138 const struct rte_event_timer *evtim,
1139 uint64_t *ticks_remaining)
1140 {
1141 uint64_t nsecs_per_adapter_tick, opaque, cycles_remaining;
1142 enum rte_event_timer_state n_state;
1143 double nsecs_per_cycle;
1144 struct rte_timer *tim;
1145 uint64_t cur_cycles;
1146
1147 /* Check that timer is armed */
1148 n_state = rte_atomic_load_explicit(&evtim->state, rte_memory_order_acquire);
1149 if (n_state != RTE_EVENT_TIMER_ARMED)
1150 return -EINVAL;
1151
1152 opaque = evtim->impl_opaque[0];
1153 tim = (struct rte_timer *)(uintptr_t)opaque;
1154
1155 cur_cycles = rte_get_timer_cycles();
1156 if (cur_cycles > tim->expire) {
1157 *ticks_remaining = 0;
1158 return 0;
1159 }
1160
1161 cycles_remaining = tim->expire - cur_cycles;
1162 nsecs_per_cycle = (double)NSECPERSEC / rte_get_timer_hz();
1163 nsecs_per_adapter_tick = adapter->data->conf.timer_tick_ns;
1164
1165 *ticks_remaining = (uint64_t)ceil((cycles_remaining * nsecs_per_cycle) /
1166 nsecs_per_adapter_tick);
1167
1168 return 0;
1169 }
1170
1171 static uint16_t
__swtim_arm_burst(const struct rte_event_timer_adapter * adapter,struct rte_event_timer ** evtims,uint16_t nb_evtims)1172 __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1173 struct rte_event_timer **evtims,
1174 uint16_t nb_evtims)
1175 {
1176 int i, ret;
1177 struct swtim *sw = swtim_pmd_priv(adapter);
1178 uint32_t lcore_id = rte_lcore_id();
1179 struct rte_timer *tim, *tims[nb_evtims];
1180 uint64_t cycles;
1181 int n_lcores;
1182 /* Timer list for this lcore is not in use. */
1183 uint16_t exp_state = 0;
1184 enum rte_event_timer_state n_state;
1185 enum rte_timer_type type = SINGLE;
1186
1187 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1188 /* Check that the service is running. */
1189 if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1190 rte_errno = EINVAL;
1191 return 0;
1192 }
1193 #endif
1194
1195 /* Adjust lcore_id if non-EAL thread. Arbitrarily pick the timer list of
1196 * the highest lcore to insert such timers into
1197 */
1198 if (lcore_id == LCORE_ID_ANY)
1199 lcore_id = RTE_MAX_LCORE - 1;
1200
1201 /* If this is the first time we're arming an event timer on this lcore,
1202 * mark this lcore as "in use"; this will cause the service
1203 * function to process the timer list that corresponds to this lcore.
1204 * The atomic compare-and-swap operation can prevent the race condition
1205 * on in_use flag between multiple non-EAL threads.
1206 */
1207 if (unlikely(rte_atomic_compare_exchange_strong_explicit(&sw->in_use[lcore_id].v,
1208 &exp_state, 1,
1209 rte_memory_order_relaxed, rte_memory_order_relaxed))) {
1210 EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll",
1211 lcore_id);
1212 n_lcores = rte_atomic_fetch_add_explicit(&sw->n_poll_lcores, 1,
1213 rte_memory_order_relaxed);
1214 rte_atomic_store_explicit(&sw->poll_lcores[n_lcores], lcore_id,
1215 rte_memory_order_relaxed);
1216 }
1217
1218 ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims,
1219 nb_evtims);
1220 if (ret < 0) {
1221 rte_errno = ENOSPC;
1222 return 0;
1223 }
1224
1225 /* update timer type for periodic adapter */
1226 type = get_timer_type(adapter);
1227
1228 for (i = 0; i < nb_evtims; i++) {
1229 n_state = rte_atomic_load_explicit(&evtims[i]->state, rte_memory_order_acquire);
1230 if (n_state == RTE_EVENT_TIMER_ARMED) {
1231 rte_errno = EALREADY;
1232 break;
1233 } else if (!(n_state == RTE_EVENT_TIMER_NOT_ARMED ||
1234 n_state == RTE_EVENT_TIMER_CANCELED)) {
1235 rte_errno = EINVAL;
1236 break;
1237 }
1238
1239 if (unlikely(check_destination_event_queue(evtims[i],
1240 adapter) < 0)) {
1241 rte_atomic_store_explicit(&evtims[i]->state,
1242 RTE_EVENT_TIMER_ERROR,
1243 rte_memory_order_relaxed);
1244 rte_errno = EINVAL;
1245 break;
1246 }
1247
1248 tim = tims[i];
1249 rte_timer_init(tim);
1250
1251 evtims[i]->impl_opaque[0] = (uintptr_t)tim;
1252 evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
1253
1254 ret = get_timeout_cycles(evtims[i], adapter, &cycles);
1255 if (unlikely(ret == -1)) {
1256 rte_atomic_store_explicit(&evtims[i]->state,
1257 RTE_EVENT_TIMER_ERROR_TOOLATE,
1258 rte_memory_order_relaxed);
1259 rte_errno = EINVAL;
1260 break;
1261 } else if (unlikely(ret == -2)) {
1262 rte_atomic_store_explicit(&evtims[i]->state,
1263 RTE_EVENT_TIMER_ERROR_TOOEARLY,
1264 rte_memory_order_relaxed);
1265 rte_errno = EINVAL;
1266 break;
1267 }
1268
1269 ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
1270 type, lcore_id, NULL, evtims[i]);
1271 if (ret < 0) {
1272 /* tim was in RUNNING or CONFIG state */
1273 rte_atomic_store_explicit(&evtims[i]->state,
1274 RTE_EVENT_TIMER_ERROR,
1275 rte_memory_order_release);
1276 break;
1277 }
1278
1279 EVTIM_LOG_DBG("armed an event timer");
1280 /* RELEASE ordering guarantees the adapter specific value
1281 * changes observed before the update of state.
1282 */
1283 rte_atomic_store_explicit(&evtims[i]->state, RTE_EVENT_TIMER_ARMED,
1284 rte_memory_order_release);
1285 }
1286
1287 if (i < nb_evtims)
1288 rte_mempool_put_bulk(sw->tim_pool,
1289 (void **)&tims[i], nb_evtims - i);
1290
1291 return i;
1292 }
1293
1294 static uint16_t
swtim_arm_burst(const struct rte_event_timer_adapter * adapter,struct rte_event_timer ** evtims,uint16_t nb_evtims)1295 swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1296 struct rte_event_timer **evtims,
1297 uint16_t nb_evtims)
1298 {
1299 return __swtim_arm_burst(adapter, evtims, nb_evtims);
1300 }
1301
1302 static uint16_t
swtim_cancel_burst(const struct rte_event_timer_adapter * adapter,struct rte_event_timer ** evtims,uint16_t nb_evtims)1303 swtim_cancel_burst(const struct rte_event_timer_adapter *adapter,
1304 struct rte_event_timer **evtims,
1305 uint16_t nb_evtims)
1306 {
1307 int i, ret;
1308 struct rte_timer *timp;
1309 uint64_t opaque;
1310 struct swtim *sw = swtim_pmd_priv(adapter);
1311 enum rte_event_timer_state n_state;
1312
1313 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1314 /* Check that the service is running. */
1315 if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1316 rte_errno = EINVAL;
1317 return 0;
1318 }
1319 #endif
1320
1321 for (i = 0; i < nb_evtims; i++) {
1322 /* Don't modify the event timer state in these cases */
1323 /* ACQUIRE ordering guarantees the access of implementation
1324 * specific opaque data under the correct state.
1325 */
1326 n_state = rte_atomic_load_explicit(&evtims[i]->state, rte_memory_order_acquire);
1327 if (n_state == RTE_EVENT_TIMER_CANCELED) {
1328 rte_errno = EALREADY;
1329 break;
1330 } else if (n_state != RTE_EVENT_TIMER_ARMED) {
1331 rte_errno = EINVAL;
1332 break;
1333 }
1334
1335 opaque = evtims[i]->impl_opaque[0];
1336 timp = (struct rte_timer *)(uintptr_t)opaque;
1337 RTE_ASSERT(timp != NULL);
1338
1339 ret = rte_timer_alt_stop(sw->timer_data_id, timp);
1340 if (ret < 0) {
1341 /* Timer is running or being configured */
1342 rte_errno = EAGAIN;
1343 break;
1344 }
1345
1346 rte_mempool_put(sw->tim_pool, (void **)timp);
1347
1348 /* The RELEASE ordering here pairs with atomic ordering
1349 * to make sure the state update data observed between
1350 * threads.
1351 */
1352 rte_atomic_store_explicit(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED,
1353 rte_memory_order_release);
1354 }
1355
1356 return i;
1357 }
1358
1359 static uint16_t
swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter * adapter,struct rte_event_timer ** evtims,uint64_t timeout_ticks,uint16_t nb_evtims)1360 swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
1361 struct rte_event_timer **evtims,
1362 uint64_t timeout_ticks,
1363 uint16_t nb_evtims)
1364 {
1365 int i;
1366
1367 for (i = 0; i < nb_evtims; i++)
1368 evtims[i]->timeout_ticks = timeout_ticks;
1369
1370 return __swtim_arm_burst(adapter, evtims, nb_evtims);
1371 }
1372
1373 static const struct event_timer_adapter_ops swtim_ops = {
1374 .init = swtim_init,
1375 .uninit = swtim_uninit,
1376 .start = swtim_start,
1377 .stop = swtim_stop,
1378 .get_info = swtim_get_info,
1379 .stats_get = swtim_stats_get,
1380 .stats_reset = swtim_stats_reset,
1381 .arm_burst = swtim_arm_burst,
1382 .arm_tmo_tick_burst = swtim_arm_tmo_tick_burst,
1383 .cancel_burst = swtim_cancel_burst,
1384 .remaining_ticks_get = swtim_remaining_ticks_get,
1385 };
1386
1387 static int
handle_ta_info(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)1388 handle_ta_info(const char *cmd __rte_unused, const char *params,
1389 struct rte_tel_data *d)
1390 {
1391 struct rte_event_timer_adapter_info adapter_info;
1392 struct rte_event_timer_adapter *adapter;
1393 uint16_t adapter_id;
1394 int ret;
1395
1396 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1397 return -1;
1398
1399 adapter_id = atoi(params);
1400
1401 if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
1402 EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id);
1403 return -EINVAL;
1404 }
1405
1406 adapter = &adapters[adapter_id];
1407
1408 ret = rte_event_timer_adapter_get_info(adapter, &adapter_info);
1409 if (ret < 0) {
1410 EVTIM_LOG_ERR("Failed to get info for timer adapter id %u", adapter_id);
1411 return ret;
1412 }
1413
1414 rte_tel_data_start_dict(d);
1415 rte_tel_data_add_dict_uint(d, "timer_adapter_id", adapter_id);
1416 rte_tel_data_add_dict_uint(d, "min_resolution_ns",
1417 adapter_info.min_resolution_ns);
1418 rte_tel_data_add_dict_uint(d, "max_tmo_ns", adapter_info.max_tmo_ns);
1419 rte_tel_data_add_dict_uint(d, "event_dev_id",
1420 adapter_info.conf.event_dev_id);
1421 rte_tel_data_add_dict_uint(d, "socket_id",
1422 adapter_info.conf.socket_id);
1423 rte_tel_data_add_dict_uint(d, "clk_src", adapter_info.conf.clk_src);
1424 rte_tel_data_add_dict_uint(d, "timer_tick_ns",
1425 adapter_info.conf.timer_tick_ns);
1426 rte_tel_data_add_dict_uint(d, "nb_timers",
1427 adapter_info.conf.nb_timers);
1428 rte_tel_data_add_dict_uint(d, "flags", adapter_info.conf.flags);
1429
1430 return 0;
1431 }
1432
1433 static int
handle_ta_stats(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)1434 handle_ta_stats(const char *cmd __rte_unused, const char *params,
1435 struct rte_tel_data *d)
1436 {
1437 struct rte_event_timer_adapter_stats stats;
1438 struct rte_event_timer_adapter *adapter;
1439 uint16_t adapter_id;
1440 int ret;
1441
1442 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1443 return -1;
1444
1445 adapter_id = atoi(params);
1446
1447 if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
1448 EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id);
1449 return -EINVAL;
1450 }
1451
1452 adapter = &adapters[adapter_id];
1453
1454 ret = rte_event_timer_adapter_stats_get(adapter, &stats);
1455 if (ret < 0) {
1456 EVTIM_LOG_ERR("Failed to get stats for timer adapter id %u", adapter_id);
1457 return ret;
1458 }
1459
1460 rte_tel_data_start_dict(d);
1461 rte_tel_data_add_dict_uint(d, "timer_adapter_id", adapter_id);
1462 rte_tel_data_add_dict_uint(d, "evtim_exp_count",
1463 stats.evtim_exp_count);
1464 rte_tel_data_add_dict_uint(d, "ev_enq_count", stats.ev_enq_count);
1465 rte_tel_data_add_dict_uint(d, "ev_inv_count", stats.ev_inv_count);
1466 rte_tel_data_add_dict_uint(d, "evtim_retry_count",
1467 stats.evtim_retry_count);
1468 rte_tel_data_add_dict_uint(d, "adapter_tick_count",
1469 stats.adapter_tick_count);
1470
1471 return 0;
1472 }
1473
RTE_INIT(ta_init_telemetry)1474 RTE_INIT(ta_init_telemetry)
1475 {
1476 rte_telemetry_register_cmd("/eventdev/ta_info",
1477 handle_ta_info,
1478 "Returns Timer adapter info. Parameter: Timer adapter id");
1479
1480 rte_telemetry_register_cmd("/eventdev/ta_stats",
1481 handle_ta_stats,
1482 "Returns Timer adapter stats. Parameter: Timer adapter id");
1483 }
1484