1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017-2018 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include <ctype.h> 7 #include <string.h> 8 #include <inttypes.h> 9 #include <stdbool.h> 10 #include <stdlib.h> 11 #include <math.h> 12 13 #include <rte_memzone.h> 14 #include <rte_errno.h> 15 #include <rte_malloc.h> 16 #include <rte_mempool.h> 17 #include <rte_common.h> 18 #include <rte_timer.h> 19 #include <rte_service_component.h> 20 #include <rte_telemetry.h> 21 22 #include "event_timer_adapter_pmd.h" 23 #include "eventdev_pmd.h" 24 #include "rte_event_timer_adapter.h" 25 #include "rte_eventdev.h" 26 #include "eventdev_trace.h" 27 28 #define DATA_MZ_NAME_MAX_LEN 64 29 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d" 30 31 RTE_LOG_REGISTER_SUFFIX(evtim_logtype, adapter.timer, NOTICE); 32 RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE); 33 RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE); 34 35 static struct rte_event_timer_adapter *adapters; 36 37 static const struct event_timer_adapter_ops swtim_ops; 38 39 #define EVTIM_LOG(level, logtype, ...) \ 40 rte_log(RTE_LOG_ ## level, logtype, \ 41 RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \ 42 "\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,))) 43 44 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__) 45 46 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 47 #define EVTIM_LOG_DBG(...) \ 48 EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__) 49 #define EVTIM_BUF_LOG_DBG(...) \ 50 EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__) 51 #define EVTIM_SVC_LOG_DBG(...) \ 52 EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__) 53 #else 54 #define EVTIM_LOG_DBG(...) (void)0 55 #define EVTIM_BUF_LOG_DBG(...) (void)0 56 #define EVTIM_SVC_LOG_DBG(...) (void)0 57 #endif 58 59 static inline enum rte_timer_type 60 get_timer_type(const struct rte_event_timer_adapter *adapter) 61 { 62 return (adapter->data->conf.flags & 63 RTE_EVENT_TIMER_ADAPTER_F_PERIODIC) ? 64 PERIODICAL : SINGLE; 65 } 66 67 static int 68 default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id, 69 void *conf_arg) 70 { 71 struct rte_event_timer_adapter *adapter; 72 struct rte_eventdev *dev; 73 struct rte_event_dev_config dev_conf; 74 struct rte_event_port_conf *port_conf, def_port_conf = {0}; 75 int started; 76 uint8_t port_id; 77 uint8_t dev_id; 78 int ret; 79 80 RTE_SET_USED(event_dev_id); 81 82 adapter = &adapters[id]; 83 dev = &rte_eventdevs[adapter->data->event_dev_id]; 84 dev_id = dev->data->dev_id; 85 dev_conf = dev->data->dev_conf; 86 87 started = dev->data->dev_started; 88 if (started) 89 rte_event_dev_stop(dev_id); 90 91 port_id = dev_conf.nb_event_ports; 92 if (conf_arg != NULL) 93 port_conf = conf_arg; 94 else { 95 port_conf = &def_port_conf; 96 ret = rte_event_port_default_conf_get(dev_id, (port_id - 1), 97 port_conf); 98 if (ret < 0) 99 return ret; 100 } 101 102 dev_conf.nb_event_ports += 1; 103 if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_SINGLE_LINK) 104 dev_conf.nb_single_link_event_port_queues += 1; 105 106 ret = rte_event_dev_configure(dev_id, &dev_conf); 107 if (ret < 0) { 108 EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id); 109 if (started) 110 if (rte_event_dev_start(dev_id)) 111 return -EIO; 112 113 return ret; 114 } 115 116 ret = rte_event_port_setup(dev_id, port_id, port_conf); 117 if (ret < 0) { 118 EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n", 119 port_id, dev_id); 120 return ret; 121 } 122 123 *event_port_id = port_id; 124 125 if (started) 126 ret = rte_event_dev_start(dev_id); 127 128 return ret; 129 } 130 131 struct rte_event_timer_adapter * 132 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf) 133 { 134 return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb, 135 NULL); 136 } 137 138 struct rte_event_timer_adapter * 139 rte_event_timer_adapter_create_ext( 140 const struct rte_event_timer_adapter_conf *conf, 141 rte_event_timer_adapter_port_conf_cb_t conf_cb, 142 void *conf_arg) 143 { 144 uint16_t adapter_id; 145 struct rte_event_timer_adapter *adapter; 146 const struct rte_memzone *mz; 147 char mz_name[DATA_MZ_NAME_MAX_LEN]; 148 int n, ret; 149 struct rte_eventdev *dev; 150 151 if (adapters == NULL) { 152 adapters = rte_zmalloc("Eventdev", 153 sizeof(struct rte_event_timer_adapter) * 154 RTE_EVENT_TIMER_ADAPTER_NUM_MAX, 155 RTE_CACHE_LINE_SIZE); 156 if (adapters == NULL) { 157 rte_errno = ENOMEM; 158 return NULL; 159 } 160 } 161 162 if (conf == NULL) { 163 rte_errno = EINVAL; 164 return NULL; 165 } 166 167 /* Check eventdev ID */ 168 if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) { 169 rte_errno = EINVAL; 170 return NULL; 171 } 172 dev = &rte_eventdevs[conf->event_dev_id]; 173 174 adapter_id = conf->timer_adapter_id; 175 176 /* Check that adapter_id is in range */ 177 if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) { 178 rte_errno = EINVAL; 179 return NULL; 180 } 181 182 /* Check adapter ID not already allocated */ 183 adapter = &adapters[adapter_id]; 184 if (adapter->allocated) { 185 rte_errno = EEXIST; 186 return NULL; 187 } 188 189 /* Create shared data area. */ 190 n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id); 191 if (n >= (int)sizeof(mz_name)) { 192 rte_errno = EINVAL; 193 return NULL; 194 } 195 mz = rte_memzone_reserve(mz_name, 196 sizeof(struct rte_event_timer_adapter_data), 197 conf->socket_id, 0); 198 if (mz == NULL) 199 /* rte_errno set by rte_memzone_reserve */ 200 return NULL; 201 202 adapter->data = mz->addr; 203 memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data)); 204 205 adapter->data->mz = mz; 206 adapter->data->event_dev_id = conf->event_dev_id; 207 adapter->data->id = adapter_id; 208 adapter->data->socket_id = conf->socket_id; 209 adapter->data->conf = *conf; /* copy conf structure */ 210 211 /* Query eventdev PMD for timer adapter capabilities and ops */ 212 if (dev->dev_ops->timer_adapter_caps_get) { 213 ret = dev->dev_ops->timer_adapter_caps_get(dev, 214 adapter->data->conf.flags, 215 &adapter->data->caps, &adapter->ops); 216 if (ret < 0) { 217 rte_errno = -ret; 218 goto free_memzone; 219 } 220 } 221 222 if (!(adapter->data->caps & 223 RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) { 224 FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, EINVAL); 225 ret = conf_cb(adapter->data->id, adapter->data->event_dev_id, 226 &adapter->data->event_port_id, conf_arg); 227 if (ret < 0) { 228 rte_errno = -ret; 229 goto free_memzone; 230 } 231 } 232 233 /* If eventdev PMD did not provide ops, use default software 234 * implementation. 235 */ 236 if (adapter->ops == NULL) 237 adapter->ops = &swtim_ops; 238 239 /* Allow driver to do some setup */ 240 FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, ENOTSUP); 241 ret = adapter->ops->init(adapter); 242 if (ret < 0) { 243 rte_errno = -ret; 244 goto free_memzone; 245 } 246 247 /* Set fast-path function pointers */ 248 adapter->arm_burst = adapter->ops->arm_burst; 249 adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst; 250 adapter->cancel_burst = adapter->ops->cancel_burst; 251 252 adapter->allocated = 1; 253 254 rte_eventdev_trace_timer_adapter_create(adapter_id, adapter, conf, 255 conf_cb); 256 return adapter; 257 258 free_memzone: 259 rte_memzone_free(adapter->data->mz); 260 return NULL; 261 } 262 263 int 264 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter, 265 struct rte_event_timer_adapter_info *adapter_info) 266 { 267 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL); 268 269 if (adapter->ops->get_info) 270 /* let driver set values it knows */ 271 adapter->ops->get_info(adapter, adapter_info); 272 273 /* Set common values */ 274 adapter_info->conf = adapter->data->conf; 275 adapter_info->event_dev_port_id = adapter->data->event_port_id; 276 adapter_info->caps = adapter->data->caps; 277 278 return 0; 279 } 280 281 int 282 rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter) 283 { 284 int ret; 285 286 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL); 287 FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL); 288 289 if (adapter->data->started) { 290 EVTIM_LOG_ERR("event timer adapter %"PRIu8" already started", 291 adapter->data->id); 292 return -EALREADY; 293 } 294 295 ret = adapter->ops->start(adapter); 296 if (ret < 0) 297 return ret; 298 299 adapter->data->started = 1; 300 rte_eventdev_trace_timer_adapter_start(adapter); 301 return 0; 302 } 303 304 int 305 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter) 306 { 307 int ret; 308 309 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL); 310 FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL); 311 312 if (adapter->data->started == 0) { 313 EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped", 314 adapter->data->id); 315 return 0; 316 } 317 318 ret = adapter->ops->stop(adapter); 319 if (ret < 0) 320 return ret; 321 322 adapter->data->started = 0; 323 rte_eventdev_trace_timer_adapter_stop(adapter); 324 return 0; 325 } 326 327 struct rte_event_timer_adapter * 328 rte_event_timer_adapter_lookup(uint16_t adapter_id) 329 { 330 char name[DATA_MZ_NAME_MAX_LEN]; 331 const struct rte_memzone *mz; 332 struct rte_event_timer_adapter_data *data; 333 struct rte_event_timer_adapter *adapter; 334 int ret; 335 struct rte_eventdev *dev; 336 337 if (adapters == NULL) { 338 adapters = rte_zmalloc("Eventdev", 339 sizeof(struct rte_event_timer_adapter) * 340 RTE_EVENT_TIMER_ADAPTER_NUM_MAX, 341 RTE_CACHE_LINE_SIZE); 342 if (adapters == NULL) { 343 rte_errno = ENOMEM; 344 return NULL; 345 } 346 } 347 348 if (adapters[adapter_id].allocated) 349 return &adapters[adapter_id]; /* Adapter is already loaded */ 350 351 snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id); 352 mz = rte_memzone_lookup(name); 353 if (mz == NULL) { 354 rte_errno = ENOENT; 355 return NULL; 356 } 357 358 data = mz->addr; 359 360 adapter = &adapters[data->id]; 361 adapter->data = data; 362 363 dev = &rte_eventdevs[adapter->data->event_dev_id]; 364 365 /* Query eventdev PMD for timer adapter capabilities and ops */ 366 if (dev->dev_ops->timer_adapter_caps_get) { 367 ret = dev->dev_ops->timer_adapter_caps_get(dev, 368 adapter->data->conf.flags, 369 &adapter->data->caps, &adapter->ops); 370 if (ret < 0) { 371 rte_errno = EINVAL; 372 return NULL; 373 } 374 } 375 376 /* If eventdev PMD did not provide ops, use default software 377 * implementation. 378 */ 379 if (adapter->ops == NULL) 380 adapter->ops = &swtim_ops; 381 382 /* Set fast-path function pointers */ 383 adapter->arm_burst = adapter->ops->arm_burst; 384 adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst; 385 adapter->cancel_burst = adapter->ops->cancel_burst; 386 387 adapter->allocated = 1; 388 389 return adapter; 390 } 391 392 int 393 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter) 394 { 395 int i, ret; 396 397 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL); 398 FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL); 399 400 if (adapter->data->started == 1) { 401 EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped " 402 "before freeing", adapter->data->id); 403 return -EBUSY; 404 } 405 406 /* free impl priv data */ 407 ret = adapter->ops->uninit(adapter); 408 if (ret < 0) 409 return ret; 410 411 /* free shared data area */ 412 ret = rte_memzone_free(adapter->data->mz); 413 if (ret < 0) 414 return ret; 415 416 adapter->data = NULL; 417 adapter->allocated = 0; 418 419 ret = 0; 420 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) 421 if (adapters[i].allocated) 422 ret = adapters[i].allocated; 423 424 if (!ret) { 425 rte_free(adapters); 426 adapters = NULL; 427 } 428 429 rte_eventdev_trace_timer_adapter_free(adapter); 430 return 0; 431 } 432 433 int 434 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter, 435 uint32_t *service_id) 436 { 437 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL); 438 439 if (adapter->data->service_inited && service_id != NULL) 440 *service_id = adapter->data->service_id; 441 442 return adapter->data->service_inited ? 0 : -ESRCH; 443 } 444 445 int 446 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter, 447 struct rte_event_timer_adapter_stats *stats) 448 { 449 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL); 450 FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL); 451 if (stats == NULL) 452 return -EINVAL; 453 454 return adapter->ops->stats_get(adapter, stats); 455 } 456 457 int 458 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter) 459 { 460 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL); 461 FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL); 462 return adapter->ops->stats_reset(adapter); 463 } 464 465 int 466 rte_event_timer_remaining_ticks_get( 467 const struct rte_event_timer_adapter *adapter, 468 const struct rte_event_timer *evtim, 469 uint64_t *ticks_remaining) 470 { 471 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL); 472 FUNC_PTR_OR_ERR_RET(adapter->ops->remaining_ticks_get, -ENOTSUP); 473 474 if (ticks_remaining == NULL) 475 return -EINVAL; 476 477 return adapter->ops->remaining_ticks_get(adapter, evtim, 478 ticks_remaining); 479 } 480 481 /* 482 * Software event timer adapter buffer helper functions 483 */ 484 485 #define NSECPERSEC 1E9 486 487 /* Optimizations used to index into the buffer require that the buffer size 488 * be a power of 2. 489 */ 490 #define EVENT_BUFFER_SZ 4096 491 #define EVENT_BUFFER_BATCHSZ 32 492 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1) 493 494 #define EXP_TIM_BUF_SZ 128 495 496 struct event_buffer { 497 size_t head; 498 size_t tail; 499 struct rte_event events[EVENT_BUFFER_SZ]; 500 } __rte_cache_aligned; 501 502 static inline bool 503 event_buffer_full(struct event_buffer *bufp) 504 { 505 return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ; 506 } 507 508 static inline bool 509 event_buffer_batch_ready(struct event_buffer *bufp) 510 { 511 return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ; 512 } 513 514 static void 515 event_buffer_init(struct event_buffer *bufp) 516 { 517 bufp->head = bufp->tail = 0; 518 memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ); 519 } 520 521 static int 522 event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp) 523 { 524 size_t head_idx; 525 struct rte_event *buf_eventp; 526 527 if (event_buffer_full(bufp)) 528 return -1; 529 530 /* Instead of modulus, bitwise AND with mask to get head_idx. */ 531 head_idx = bufp->head & EVENT_BUFFER_MASK; 532 buf_eventp = &bufp->events[head_idx]; 533 rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event)); 534 535 /* Wrap automatically when overflow occurs. */ 536 bufp->head++; 537 538 return 0; 539 } 540 541 static void 542 event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id, 543 uint16_t *nb_events_flushed, 544 uint16_t *nb_events_inv) 545 { 546 struct rte_event *events = bufp->events; 547 size_t head_idx, tail_idx; 548 uint16_t n = 0; 549 550 /* Instead of modulus, bitwise AND with mask to get index. */ 551 head_idx = bufp->head & EVENT_BUFFER_MASK; 552 tail_idx = bufp->tail & EVENT_BUFFER_MASK; 553 554 RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ); 555 556 /* Determine the largest contiguous run we can attempt to enqueue to the 557 * event device. 558 */ 559 if (head_idx > tail_idx) 560 n = head_idx - tail_idx; 561 else if (head_idx < tail_idx) 562 n = EVENT_BUFFER_SZ - tail_idx; 563 else if (event_buffer_full(bufp)) 564 n = EVENT_BUFFER_SZ - tail_idx; 565 else { 566 *nb_events_flushed = 0; 567 return; 568 } 569 570 n = RTE_MIN(EVENT_BUFFER_BATCHSZ, n); 571 *nb_events_inv = 0; 572 573 *nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id, 574 &events[tail_idx], n); 575 if (*nb_events_flushed != n) { 576 if (rte_errno == EINVAL) { 577 EVTIM_LOG_ERR("failed to enqueue invalid event - " 578 "dropping it"); 579 (*nb_events_inv)++; 580 } else if (rte_errno == ENOSPC) 581 rte_pause(); 582 } 583 584 if (*nb_events_flushed > 0) 585 EVTIM_BUF_LOG_DBG("enqueued %"PRIu16" timer events to event " 586 "device", *nb_events_flushed); 587 588 bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv; 589 } 590 591 /* 592 * Software event timer adapter implementation 593 */ 594 struct swtim { 595 /* Identifier of service executing timer management logic. */ 596 uint32_t service_id; 597 /* The cycle count at which the adapter should next tick */ 598 uint64_t next_tick_cycles; 599 /* The tick resolution used by adapter instance. May have been 600 * adjusted from what user requested 601 */ 602 uint64_t timer_tick_ns; 603 /* Maximum timeout in nanoseconds allowed by adapter instance. */ 604 uint64_t max_tmo_ns; 605 /* Buffered timer expiry events to be enqueued to an event device. */ 606 struct event_buffer buffer; 607 /* Statistics */ 608 struct rte_event_timer_adapter_stats stats; 609 /* Mempool of timer objects */ 610 struct rte_mempool *tim_pool; 611 /* Back pointer for convenience */ 612 struct rte_event_timer_adapter *adapter; 613 /* Identifier of timer data instance */ 614 uint32_t timer_data_id; 615 /* Track which cores have actually armed a timer */ 616 struct { 617 uint16_t v; 618 } __rte_cache_aligned in_use[RTE_MAX_LCORE]; 619 /* Track which cores' timer lists should be polled */ 620 unsigned int poll_lcores[RTE_MAX_LCORE]; 621 /* The number of lists that should be polled */ 622 int n_poll_lcores; 623 /* Timers which have expired and can be returned to a mempool */ 624 struct rte_timer *expired_timers[EXP_TIM_BUF_SZ]; 625 /* The number of timers that can be returned to a mempool */ 626 size_t n_expired_timers; 627 }; 628 629 static inline struct swtim * 630 swtim_pmd_priv(const struct rte_event_timer_adapter *adapter) 631 { 632 return adapter->data->adapter_priv; 633 } 634 635 static void 636 swtim_callback(struct rte_timer *tim) 637 { 638 struct rte_event_timer *evtim = tim->arg; 639 struct rte_event_timer_adapter *adapter; 640 unsigned int lcore = rte_lcore_id(); 641 struct swtim *sw; 642 uint16_t nb_evs_flushed = 0; 643 uint16_t nb_evs_invalid = 0; 644 uint64_t opaque; 645 int ret; 646 int n_lcores; 647 enum rte_timer_type type; 648 649 opaque = evtim->impl_opaque[1]; 650 adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque; 651 sw = swtim_pmd_priv(adapter); 652 type = get_timer_type(adapter); 653 654 if (unlikely(sw->in_use[lcore].v == 0)) { 655 sw->in_use[lcore].v = 1; 656 n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1, 657 __ATOMIC_RELAXED); 658 __atomic_store_n(&sw->poll_lcores[n_lcores], lcore, 659 __ATOMIC_RELAXED); 660 } 661 662 ret = event_buffer_add(&sw->buffer, &evtim->ev); 663 if (ret < 0) { 664 if (type == SINGLE) { 665 /* If event buffer is full, put timer back in list with 666 * immediate expiry value, so that we process it again 667 * on the next iteration. 668 */ 669 ret = rte_timer_alt_reset(sw->timer_data_id, tim, 0, 670 SINGLE, lcore, NULL, evtim); 671 if (ret < 0) { 672 EVTIM_LOG_DBG("event buffer full, failed to " 673 "reset timer with immediate " 674 "expiry value"); 675 } else { 676 sw->stats.evtim_retry_count++; 677 EVTIM_LOG_DBG("event buffer full, resetting " 678 "rte_timer with immediate " 679 "expiry value"); 680 } 681 } else { 682 sw->stats.evtim_drop_count++; 683 } 684 685 } else { 686 EVTIM_BUF_LOG_DBG("buffered an event timer expiry event"); 687 688 /* Empty the buffer here, if necessary, to free older expired 689 * timers only 690 */ 691 if (unlikely(sw->n_expired_timers == EXP_TIM_BUF_SZ)) { 692 rte_mempool_put_bulk(sw->tim_pool, 693 (void **)sw->expired_timers, 694 sw->n_expired_timers); 695 sw->n_expired_timers = 0; 696 } 697 698 /* Don't free rte_timer for a periodic event timer until 699 * it is cancelled 700 */ 701 if (type == SINGLE) 702 sw->expired_timers[sw->n_expired_timers++] = tim; 703 sw->stats.evtim_exp_count++; 704 705 if (type == SINGLE) 706 __atomic_store_n(&evtim->state, RTE_EVENT_TIMER_NOT_ARMED, 707 __ATOMIC_RELEASE); 708 } 709 710 if (event_buffer_batch_ready(&sw->buffer)) { 711 event_buffer_flush(&sw->buffer, 712 adapter->data->event_dev_id, 713 adapter->data->event_port_id, 714 &nb_evs_flushed, 715 &nb_evs_invalid); 716 717 sw->stats.ev_enq_count += nb_evs_flushed; 718 sw->stats.ev_inv_count += nb_evs_invalid; 719 } 720 } 721 722 static __rte_always_inline uint64_t 723 get_timeout_cycles(struct rte_event_timer *evtim, 724 const struct rte_event_timer_adapter *adapter) 725 { 726 struct swtim *sw = swtim_pmd_priv(adapter); 727 uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns; 728 return timeout_ns * rte_get_timer_hz() / NSECPERSEC; 729 } 730 731 /* This function returns true if one or more (adapter) ticks have occurred since 732 * the last time it was called. 733 */ 734 static inline bool 735 swtim_did_tick(struct swtim *sw) 736 { 737 uint64_t cycles_per_adapter_tick, start_cycles; 738 uint64_t *next_tick_cyclesp; 739 740 next_tick_cyclesp = &sw->next_tick_cycles; 741 cycles_per_adapter_tick = sw->timer_tick_ns * 742 (rte_get_timer_hz() / NSECPERSEC); 743 start_cycles = rte_get_timer_cycles(); 744 745 /* Note: initially, *next_tick_cyclesp == 0, so the clause below will 746 * execute, and set things going. 747 */ 748 749 if (start_cycles >= *next_tick_cyclesp) { 750 /* Snap the current cycle count to the preceding adapter tick 751 * boundary. 752 */ 753 start_cycles -= start_cycles % cycles_per_adapter_tick; 754 *next_tick_cyclesp = start_cycles + cycles_per_adapter_tick; 755 756 return true; 757 } 758 759 return false; 760 } 761 762 /* Check that event timer timeout value is in range */ 763 static __rte_always_inline int 764 check_timeout(struct rte_event_timer *evtim, 765 const struct rte_event_timer_adapter *adapter) 766 { 767 uint64_t tmo_nsec; 768 struct swtim *sw = swtim_pmd_priv(adapter); 769 770 tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns; 771 if (tmo_nsec > sw->max_tmo_ns) 772 return -1; 773 if (tmo_nsec < sw->timer_tick_ns) 774 return -2; 775 776 return 0; 777 } 778 779 /* Check that event timer event queue sched type matches destination event queue 780 * sched type 781 */ 782 static __rte_always_inline int 783 check_destination_event_queue(struct rte_event_timer *evtim, 784 const struct rte_event_timer_adapter *adapter) 785 { 786 int ret; 787 uint32_t sched_type; 788 789 ret = rte_event_queue_attr_get(adapter->data->event_dev_id, 790 evtim->ev.queue_id, 791 RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE, 792 &sched_type); 793 794 if ((ret == 0 && evtim->ev.sched_type == sched_type) || 795 ret == -EOVERFLOW) 796 return 0; 797 798 return -1; 799 } 800 801 static int 802 swtim_service_func(void *arg) 803 { 804 struct rte_event_timer_adapter *adapter = arg; 805 struct swtim *sw = swtim_pmd_priv(adapter); 806 uint16_t nb_evs_flushed = 0; 807 uint16_t nb_evs_invalid = 0; 808 const uint64_t prior_enq_count = sw->stats.ev_enq_count; 809 810 if (swtim_did_tick(sw)) { 811 rte_timer_alt_manage(sw->timer_data_id, 812 sw->poll_lcores, 813 sw->n_poll_lcores, 814 swtim_callback); 815 816 /* Return expired timer objects back to mempool */ 817 rte_mempool_put_bulk(sw->tim_pool, (void **)sw->expired_timers, 818 sw->n_expired_timers); 819 sw->n_expired_timers = 0; 820 821 event_buffer_flush(&sw->buffer, 822 adapter->data->event_dev_id, 823 adapter->data->event_port_id, 824 &nb_evs_flushed, 825 &nb_evs_invalid); 826 827 sw->stats.ev_enq_count += nb_evs_flushed; 828 sw->stats.ev_inv_count += nb_evs_invalid; 829 sw->stats.adapter_tick_count++; 830 } 831 832 rte_event_maintain(adapter->data->event_dev_id, 833 adapter->data->event_port_id, 0); 834 835 return prior_enq_count == sw->stats.ev_enq_count ? -EAGAIN : 0; 836 } 837 838 /* The adapter initialization function rounds the mempool size up to the next 839 * power of 2, so we can take the difference between that value and what the 840 * user requested, and use the space for caches. This avoids a scenario where a 841 * user can't arm the number of timers the adapter was configured with because 842 * mempool objects have been lost to caches. 843 * 844 * nb_actual should always be a power of 2, so we can iterate over the powers 845 * of 2 to see what the largest cache size we can use is. 846 */ 847 static int 848 compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual) 849 { 850 int i; 851 int size; 852 int cache_size = 0; 853 854 for (i = 0;; i++) { 855 size = 1 << i; 856 857 if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) && 858 size < RTE_MEMPOOL_CACHE_MAX_SIZE && 859 size <= nb_actual / 1.5) 860 cache_size = size; 861 else 862 break; 863 } 864 865 return cache_size; 866 } 867 868 static int 869 swtim_init(struct rte_event_timer_adapter *adapter) 870 { 871 int i, ret; 872 struct swtim *sw; 873 unsigned int flags; 874 struct rte_service_spec service; 875 876 /* Allocate storage for private data area */ 877 #define SWTIM_NAMESIZE 32 878 char swtim_name[SWTIM_NAMESIZE]; 879 snprintf(swtim_name, SWTIM_NAMESIZE, "swtim_%"PRIu8, 880 adapter->data->id); 881 sw = rte_zmalloc_socket(swtim_name, sizeof(*sw), RTE_CACHE_LINE_SIZE, 882 adapter->data->socket_id); 883 if (sw == NULL) { 884 EVTIM_LOG_ERR("failed to allocate space for private data"); 885 rte_errno = ENOMEM; 886 return -1; 887 } 888 889 /* Connect storage to adapter instance */ 890 adapter->data->adapter_priv = sw; 891 sw->adapter = adapter; 892 893 sw->timer_tick_ns = adapter->data->conf.timer_tick_ns; 894 sw->max_tmo_ns = adapter->data->conf.max_tmo_ns; 895 896 /* Create a timer pool */ 897 char pool_name[SWTIM_NAMESIZE]; 898 snprintf(pool_name, SWTIM_NAMESIZE, "swtim_pool_%"PRIu8, 899 adapter->data->id); 900 /* Optimal mempool size is a power of 2 minus one */ 901 uint64_t nb_timers = rte_align64pow2(adapter->data->conf.nb_timers); 902 int pool_size = nb_timers - 1; 903 int cache_size = compute_msg_mempool_cache_size( 904 adapter->data->conf.nb_timers, nb_timers); 905 flags = 0; /* pool is multi-producer, multi-consumer */ 906 sw->tim_pool = rte_mempool_create(pool_name, pool_size, 907 sizeof(struct rte_timer), cache_size, 0, NULL, NULL, 908 NULL, NULL, adapter->data->socket_id, flags); 909 if (sw->tim_pool == NULL) { 910 EVTIM_LOG_ERR("failed to create timer object mempool"); 911 rte_errno = ENOMEM; 912 goto free_alloc; 913 } 914 915 /* Initialize the variables that track in-use timer lists */ 916 for (i = 0; i < RTE_MAX_LCORE; i++) 917 sw->in_use[i].v = 0; 918 919 /* Initialize the timer subsystem and allocate timer data instance */ 920 ret = rte_timer_subsystem_init(); 921 if (ret < 0) { 922 if (ret != -EALREADY) { 923 EVTIM_LOG_ERR("failed to initialize timer subsystem"); 924 rte_errno = -ret; 925 goto free_mempool; 926 } 927 } 928 929 ret = rte_timer_data_alloc(&sw->timer_data_id); 930 if (ret < 0) { 931 EVTIM_LOG_ERR("failed to allocate timer data instance"); 932 rte_errno = -ret; 933 goto free_mempool; 934 } 935 936 /* Initialize timer event buffer */ 937 event_buffer_init(&sw->buffer); 938 939 sw->adapter = adapter; 940 941 /* Register a service component to run adapter logic */ 942 memset(&service, 0, sizeof(service)); 943 snprintf(service.name, RTE_SERVICE_NAME_MAX, 944 "swtim_svc_%"PRIu8, adapter->data->id); 945 service.socket_id = adapter->data->socket_id; 946 service.callback = swtim_service_func; 947 service.callback_userdata = adapter; 948 service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE); 949 ret = rte_service_component_register(&service, &sw->service_id); 950 if (ret < 0) { 951 EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32 952 ": err = %d", service.name, sw->service_id, 953 ret); 954 955 rte_errno = ENOSPC; 956 goto free_mempool; 957 } 958 959 EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name, 960 sw->service_id); 961 962 adapter->data->service_id = sw->service_id; 963 adapter->data->service_inited = 1; 964 965 return 0; 966 free_mempool: 967 rte_mempool_free(sw->tim_pool); 968 free_alloc: 969 rte_free(sw); 970 return -1; 971 } 972 973 static void 974 swtim_free_tim(struct rte_timer *tim, void *arg) 975 { 976 struct swtim *sw = arg; 977 978 rte_mempool_put(sw->tim_pool, tim); 979 } 980 981 /* Traverse the list of outstanding timers and put them back in the mempool 982 * before freeing the adapter to avoid leaking the memory. 983 */ 984 static int 985 swtim_uninit(struct rte_event_timer_adapter *adapter) 986 { 987 int ret; 988 struct swtim *sw = swtim_pmd_priv(adapter); 989 990 /* Free outstanding timers */ 991 rte_timer_stop_all(sw->timer_data_id, 992 sw->poll_lcores, 993 sw->n_poll_lcores, 994 swtim_free_tim, 995 sw); 996 997 ret = rte_timer_data_dealloc(sw->timer_data_id); 998 if (ret < 0) { 999 EVTIM_LOG_ERR("failed to deallocate timer data instance"); 1000 return ret; 1001 } 1002 1003 ret = rte_service_component_unregister(sw->service_id); 1004 if (ret < 0) { 1005 EVTIM_LOG_ERR("failed to unregister service component"); 1006 return ret; 1007 } 1008 1009 rte_mempool_free(sw->tim_pool); 1010 rte_free(sw); 1011 adapter->data->adapter_priv = NULL; 1012 1013 return 0; 1014 } 1015 1016 static inline int32_t 1017 get_mapped_count_for_service(uint32_t service_id) 1018 { 1019 int32_t core_count, i, mapped_count = 0; 1020 uint32_t lcore_arr[RTE_MAX_LCORE]; 1021 1022 core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE); 1023 1024 for (i = 0; i < core_count; i++) 1025 if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1) 1026 mapped_count++; 1027 1028 return mapped_count; 1029 } 1030 1031 static int 1032 swtim_start(const struct rte_event_timer_adapter *adapter) 1033 { 1034 int mapped_count; 1035 struct swtim *sw = swtim_pmd_priv(adapter); 1036 1037 /* Mapping the service to more than one service core can introduce 1038 * delays while one thread is waiting to acquire a lock, so only allow 1039 * one core to be mapped to the service. 1040 * 1041 * Note: the service could be modified such that it spreads cores to 1042 * poll over multiple service instances. 1043 */ 1044 mapped_count = get_mapped_count_for_service(sw->service_id); 1045 1046 if (mapped_count != 1) 1047 return mapped_count < 1 ? -ENOENT : -ENOTSUP; 1048 1049 return rte_service_component_runstate_set(sw->service_id, 1); 1050 } 1051 1052 static int 1053 swtim_stop(const struct rte_event_timer_adapter *adapter) 1054 { 1055 int ret; 1056 struct swtim *sw = swtim_pmd_priv(adapter); 1057 1058 ret = rte_service_component_runstate_set(sw->service_id, 0); 1059 if (ret < 0) 1060 return ret; 1061 1062 /* Wait for the service to complete its final iteration */ 1063 while (rte_service_may_be_active(sw->service_id)) 1064 rte_pause(); 1065 1066 return 0; 1067 } 1068 1069 static void 1070 swtim_get_info(const struct rte_event_timer_adapter *adapter, 1071 struct rte_event_timer_adapter_info *adapter_info) 1072 { 1073 struct swtim *sw = swtim_pmd_priv(adapter); 1074 adapter_info->min_resolution_ns = sw->timer_tick_ns; 1075 adapter_info->max_tmo_ns = sw->max_tmo_ns; 1076 } 1077 1078 static int 1079 swtim_stats_get(const struct rte_event_timer_adapter *adapter, 1080 struct rte_event_timer_adapter_stats *stats) 1081 { 1082 struct swtim *sw = swtim_pmd_priv(adapter); 1083 *stats = sw->stats; /* structure copy */ 1084 return 0; 1085 } 1086 1087 static int 1088 swtim_stats_reset(const struct rte_event_timer_adapter *adapter) 1089 { 1090 struct swtim *sw = swtim_pmd_priv(adapter); 1091 memset(&sw->stats, 0, sizeof(sw->stats)); 1092 return 0; 1093 } 1094 1095 static int 1096 swtim_remaining_ticks_get(const struct rte_event_timer_adapter *adapter, 1097 const struct rte_event_timer *evtim, 1098 uint64_t *ticks_remaining) 1099 { 1100 uint64_t nsecs_per_adapter_tick, opaque, cycles_remaining; 1101 enum rte_event_timer_state n_state; 1102 double nsecs_per_cycle; 1103 struct rte_timer *tim; 1104 uint64_t cur_cycles; 1105 1106 /* Check that timer is armed */ 1107 n_state = __atomic_load_n(&evtim->state, __ATOMIC_ACQUIRE); 1108 if (n_state != RTE_EVENT_TIMER_ARMED) 1109 return -EINVAL; 1110 1111 opaque = evtim->impl_opaque[0]; 1112 tim = (struct rte_timer *)(uintptr_t)opaque; 1113 1114 cur_cycles = rte_get_timer_cycles(); 1115 if (cur_cycles > tim->expire) { 1116 *ticks_remaining = 0; 1117 return 0; 1118 } 1119 1120 cycles_remaining = tim->expire - cur_cycles; 1121 nsecs_per_cycle = (double)NSECPERSEC / rte_get_timer_hz(); 1122 nsecs_per_adapter_tick = adapter->data->conf.timer_tick_ns; 1123 1124 *ticks_remaining = (uint64_t)ceil((cycles_remaining * nsecs_per_cycle) / 1125 nsecs_per_adapter_tick); 1126 1127 return 0; 1128 } 1129 1130 static uint16_t 1131 __swtim_arm_burst(const struct rte_event_timer_adapter *adapter, 1132 struct rte_event_timer **evtims, 1133 uint16_t nb_evtims) 1134 { 1135 int i, ret; 1136 struct swtim *sw = swtim_pmd_priv(adapter); 1137 uint32_t lcore_id = rte_lcore_id(); 1138 struct rte_timer *tim, *tims[nb_evtims]; 1139 uint64_t cycles; 1140 int n_lcores; 1141 /* Timer list for this lcore is not in use. */ 1142 uint16_t exp_state = 0; 1143 enum rte_event_timer_state n_state; 1144 enum rte_timer_type type = SINGLE; 1145 1146 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 1147 /* Check that the service is running. */ 1148 if (rte_service_runstate_get(adapter->data->service_id) != 1) { 1149 rte_errno = EINVAL; 1150 return 0; 1151 } 1152 #endif 1153 1154 /* Adjust lcore_id if non-EAL thread. Arbitrarily pick the timer list of 1155 * the highest lcore to insert such timers into 1156 */ 1157 if (lcore_id == LCORE_ID_ANY) 1158 lcore_id = RTE_MAX_LCORE - 1; 1159 1160 /* If this is the first time we're arming an event timer on this lcore, 1161 * mark this lcore as "in use"; this will cause the service 1162 * function to process the timer list that corresponds to this lcore. 1163 * The atomic compare-and-swap operation can prevent the race condition 1164 * on in_use flag between multiple non-EAL threads. 1165 */ 1166 if (unlikely(__atomic_compare_exchange_n(&sw->in_use[lcore_id].v, 1167 &exp_state, 1, 0, 1168 __ATOMIC_RELAXED, __ATOMIC_RELAXED))) { 1169 EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll", 1170 lcore_id); 1171 n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1, 1172 __ATOMIC_RELAXED); 1173 __atomic_store_n(&sw->poll_lcores[n_lcores], lcore_id, 1174 __ATOMIC_RELAXED); 1175 } 1176 1177 ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims, 1178 nb_evtims); 1179 if (ret < 0) { 1180 rte_errno = ENOSPC; 1181 return 0; 1182 } 1183 1184 /* update timer type for periodic adapter */ 1185 type = get_timer_type(adapter); 1186 1187 for (i = 0; i < nb_evtims; i++) { 1188 n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE); 1189 if (n_state == RTE_EVENT_TIMER_ARMED) { 1190 rte_errno = EALREADY; 1191 break; 1192 } else if (!(n_state == RTE_EVENT_TIMER_NOT_ARMED || 1193 n_state == RTE_EVENT_TIMER_CANCELED)) { 1194 rte_errno = EINVAL; 1195 break; 1196 } 1197 1198 ret = check_timeout(evtims[i], adapter); 1199 if (unlikely(ret == -1)) { 1200 __atomic_store_n(&evtims[i]->state, 1201 RTE_EVENT_TIMER_ERROR_TOOLATE, 1202 __ATOMIC_RELAXED); 1203 rte_errno = EINVAL; 1204 break; 1205 } else if (unlikely(ret == -2)) { 1206 __atomic_store_n(&evtims[i]->state, 1207 RTE_EVENT_TIMER_ERROR_TOOEARLY, 1208 __ATOMIC_RELAXED); 1209 rte_errno = EINVAL; 1210 break; 1211 } 1212 1213 if (unlikely(check_destination_event_queue(evtims[i], 1214 adapter) < 0)) { 1215 __atomic_store_n(&evtims[i]->state, 1216 RTE_EVENT_TIMER_ERROR, 1217 __ATOMIC_RELAXED); 1218 rte_errno = EINVAL; 1219 break; 1220 } 1221 1222 tim = tims[i]; 1223 rte_timer_init(tim); 1224 1225 evtims[i]->impl_opaque[0] = (uintptr_t)tim; 1226 evtims[i]->impl_opaque[1] = (uintptr_t)adapter; 1227 1228 cycles = get_timeout_cycles(evtims[i], adapter); 1229 ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles, 1230 type, lcore_id, NULL, evtims[i]); 1231 if (ret < 0) { 1232 /* tim was in RUNNING or CONFIG state */ 1233 __atomic_store_n(&evtims[i]->state, 1234 RTE_EVENT_TIMER_ERROR, 1235 __ATOMIC_RELEASE); 1236 break; 1237 } 1238 1239 EVTIM_LOG_DBG("armed an event timer"); 1240 /* RELEASE ordering guarantees the adapter specific value 1241 * changes observed before the update of state. 1242 */ 1243 __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_ARMED, 1244 __ATOMIC_RELEASE); 1245 } 1246 1247 if (i < nb_evtims) 1248 rte_mempool_put_bulk(sw->tim_pool, 1249 (void **)&tims[i], nb_evtims - i); 1250 1251 return i; 1252 } 1253 1254 static uint16_t 1255 swtim_arm_burst(const struct rte_event_timer_adapter *adapter, 1256 struct rte_event_timer **evtims, 1257 uint16_t nb_evtims) 1258 { 1259 return __swtim_arm_burst(adapter, evtims, nb_evtims); 1260 } 1261 1262 static uint16_t 1263 swtim_cancel_burst(const struct rte_event_timer_adapter *adapter, 1264 struct rte_event_timer **evtims, 1265 uint16_t nb_evtims) 1266 { 1267 int i, ret; 1268 struct rte_timer *timp; 1269 uint64_t opaque; 1270 struct swtim *sw = swtim_pmd_priv(adapter); 1271 enum rte_event_timer_state n_state; 1272 1273 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 1274 /* Check that the service is running. */ 1275 if (rte_service_runstate_get(adapter->data->service_id) != 1) { 1276 rte_errno = EINVAL; 1277 return 0; 1278 } 1279 #endif 1280 1281 for (i = 0; i < nb_evtims; i++) { 1282 /* Don't modify the event timer state in these cases */ 1283 /* ACQUIRE ordering guarantees the access of implementation 1284 * specific opaque data under the correct state. 1285 */ 1286 n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE); 1287 if (n_state == RTE_EVENT_TIMER_CANCELED) { 1288 rte_errno = EALREADY; 1289 break; 1290 } else if (n_state != RTE_EVENT_TIMER_ARMED) { 1291 rte_errno = EINVAL; 1292 break; 1293 } 1294 1295 opaque = evtims[i]->impl_opaque[0]; 1296 timp = (struct rte_timer *)(uintptr_t)opaque; 1297 RTE_ASSERT(timp != NULL); 1298 1299 ret = rte_timer_alt_stop(sw->timer_data_id, timp); 1300 if (ret < 0) { 1301 /* Timer is running or being configured */ 1302 rte_errno = EAGAIN; 1303 break; 1304 } 1305 1306 rte_mempool_put(sw->tim_pool, (void **)timp); 1307 1308 /* The RELEASE ordering here pairs with atomic ordering 1309 * to make sure the state update data observed between 1310 * threads. 1311 */ 1312 __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED, 1313 __ATOMIC_RELEASE); 1314 } 1315 1316 return i; 1317 } 1318 1319 static uint16_t 1320 swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter, 1321 struct rte_event_timer **evtims, 1322 uint64_t timeout_ticks, 1323 uint16_t nb_evtims) 1324 { 1325 int i; 1326 1327 for (i = 0; i < nb_evtims; i++) 1328 evtims[i]->timeout_ticks = timeout_ticks; 1329 1330 return __swtim_arm_burst(adapter, evtims, nb_evtims); 1331 } 1332 1333 static const struct event_timer_adapter_ops swtim_ops = { 1334 .init = swtim_init, 1335 .uninit = swtim_uninit, 1336 .start = swtim_start, 1337 .stop = swtim_stop, 1338 .get_info = swtim_get_info, 1339 .stats_get = swtim_stats_get, 1340 .stats_reset = swtim_stats_reset, 1341 .arm_burst = swtim_arm_burst, 1342 .arm_tmo_tick_burst = swtim_arm_tmo_tick_burst, 1343 .cancel_burst = swtim_cancel_burst, 1344 .remaining_ticks_get = swtim_remaining_ticks_get, 1345 }; 1346 1347 static int 1348 handle_ta_info(const char *cmd __rte_unused, const char *params, 1349 struct rte_tel_data *d) 1350 { 1351 struct rte_event_timer_adapter_info adapter_info; 1352 struct rte_event_timer_adapter *adapter; 1353 uint16_t adapter_id; 1354 int ret; 1355 1356 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1357 return -1; 1358 1359 adapter_id = atoi(params); 1360 1361 if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) { 1362 EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id); 1363 return -EINVAL; 1364 } 1365 1366 adapter = &adapters[adapter_id]; 1367 1368 ret = rte_event_timer_adapter_get_info(adapter, &adapter_info); 1369 if (ret < 0) { 1370 EVTIM_LOG_ERR("Failed to get info for timer adapter id %u", adapter_id); 1371 return ret; 1372 } 1373 1374 rte_tel_data_start_dict(d); 1375 rte_tel_data_add_dict_u64(d, "timer_adapter_id", adapter_id); 1376 rte_tel_data_add_dict_u64(d, "min_resolution_ns", adapter_info.min_resolution_ns); 1377 rte_tel_data_add_dict_u64(d, "max_tmo_ns", adapter_info.max_tmo_ns); 1378 rte_tel_data_add_dict_u64(d, "event_dev_id", adapter_info.conf.event_dev_id); 1379 rte_tel_data_add_dict_u64(d, "socket_id", adapter_info.conf.socket_id); 1380 rte_tel_data_add_dict_u64(d, "clk_src", adapter_info.conf.clk_src); 1381 rte_tel_data_add_dict_u64(d, "timer_tick_ns", adapter_info.conf.timer_tick_ns); 1382 rte_tel_data_add_dict_u64(d, "nb_timers", adapter_info.conf.nb_timers); 1383 rte_tel_data_add_dict_u64(d, "flags", adapter_info.conf.flags); 1384 1385 return 0; 1386 } 1387 1388 static int 1389 handle_ta_stats(const char *cmd __rte_unused, const char *params, 1390 struct rte_tel_data *d) 1391 { 1392 struct rte_event_timer_adapter_stats stats; 1393 struct rte_event_timer_adapter *adapter; 1394 uint16_t adapter_id; 1395 int ret; 1396 1397 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1398 return -1; 1399 1400 adapter_id = atoi(params); 1401 1402 if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) { 1403 EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id); 1404 return -EINVAL; 1405 } 1406 1407 adapter = &adapters[adapter_id]; 1408 1409 ret = rte_event_timer_adapter_stats_get(adapter, &stats); 1410 if (ret < 0) { 1411 EVTIM_LOG_ERR("Failed to get stats for timer adapter id %u", adapter_id); 1412 return ret; 1413 } 1414 1415 rte_tel_data_start_dict(d); 1416 rte_tel_data_add_dict_u64(d, "timer_adapter_id", adapter_id); 1417 rte_tel_data_add_dict_u64(d, "evtim_exp_count", stats.evtim_exp_count); 1418 rte_tel_data_add_dict_u64(d, "ev_enq_count", stats.ev_enq_count); 1419 rte_tel_data_add_dict_u64(d, "ev_inv_count", stats.ev_inv_count); 1420 rte_tel_data_add_dict_u64(d, "evtim_retry_count", stats.evtim_retry_count); 1421 rte_tel_data_add_dict_u64(d, "adapter_tick_count", stats.adapter_tick_count); 1422 1423 return 0; 1424 } 1425 1426 RTE_INIT(ta_init_telemetry) 1427 { 1428 rte_telemetry_register_cmd("/eventdev/ta_info", 1429 handle_ta_info, 1430 "Returns Timer adapter info. Parameter: Timer adapter id"); 1431 1432 rte_telemetry_register_cmd("/eventdev/ta_stats", 1433 handle_ta_stats, 1434 "Returns Timer adapter stats. Parameter: Timer adapter id"); 1435 } 1436