1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017-2018 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include <string.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <sys/queue.h> 10 11 #include <rte_memzone.h> 12 #include <rte_memory.h> 13 #include <rte_dev.h> 14 #include <rte_errno.h> 15 #include <rte_malloc.h> 16 #include <rte_ring.h> 17 #include <rte_mempool.h> 18 #include <rte_common.h> 19 #include <rte_timer.h> 20 #include <rte_service_component.h> 21 #include <rte_cycles.h> 22 23 #include "rte_eventdev.h" 24 #include "eventdev_pmd.h" 25 #include "rte_eventdev_trace.h" 26 #include "rte_event_timer_adapter.h" 27 #include "rte_event_timer_adapter_pmd.h" 28 29 #define DATA_MZ_NAME_MAX_LEN 64 30 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d" 31 32 RTE_LOG_REGISTER(evtim_logtype, lib.eventdev.adapter.timer, NOTICE); 33 RTE_LOG_REGISTER(evtim_buffer_logtype, lib.eventdev.adapter.timer, NOTICE); 34 RTE_LOG_REGISTER(evtim_svc_logtype, lib.eventdev.adapter.timer.svc, NOTICE); 35 36 static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX]; 37 38 static const struct rte_event_timer_adapter_ops swtim_ops; 39 40 #define EVTIM_LOG(level, logtype, ...) \ 41 rte_log(RTE_LOG_ ## level, logtype, \ 42 RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \ 43 "\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,))) 44 45 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__) 46 47 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 48 #define EVTIM_LOG_DBG(...) \ 49 EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__) 50 #define EVTIM_BUF_LOG_DBG(...) \ 51 EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__) 52 #define EVTIM_SVC_LOG_DBG(...) \ 53 EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__) 54 #else 55 #define EVTIM_LOG_DBG(...) (void)0 56 #define EVTIM_BUF_LOG_DBG(...) (void)0 57 #define EVTIM_SVC_LOG_DBG(...) (void)0 58 #endif 59 60 static int 61 default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id, 62 void *conf_arg) 63 { 64 struct rte_event_timer_adapter *adapter; 65 struct rte_eventdev *dev; 66 struct rte_event_dev_config dev_conf; 67 struct rte_event_port_conf *port_conf, def_port_conf = {0}; 68 int started; 69 uint8_t port_id; 70 uint8_t dev_id; 71 int ret; 72 73 RTE_SET_USED(event_dev_id); 74 75 adapter = &adapters[id]; 76 dev = &rte_eventdevs[adapter->data->event_dev_id]; 77 dev_id = dev->data->dev_id; 78 dev_conf = dev->data->dev_conf; 79 80 started = dev->data->dev_started; 81 if (started) 82 rte_event_dev_stop(dev_id); 83 84 port_id = dev_conf.nb_event_ports; 85 dev_conf.nb_event_ports += 1; 86 ret = rte_event_dev_configure(dev_id, &dev_conf); 87 if (ret < 0) { 88 EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id); 89 if (started) 90 if (rte_event_dev_start(dev_id)) 91 return -EIO; 92 93 return ret; 94 } 95 96 if (conf_arg != NULL) 97 port_conf = conf_arg; 98 else { 99 port_conf = &def_port_conf; 100 ret = rte_event_port_default_conf_get(dev_id, port_id, 101 port_conf); 102 if (ret < 0) 103 return ret; 104 } 105 106 ret = rte_event_port_setup(dev_id, port_id, port_conf); 107 if (ret < 0) { 108 EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n", 109 port_id, dev_id); 110 return ret; 111 } 112 113 *event_port_id = port_id; 114 115 if (started) 116 ret = rte_event_dev_start(dev_id); 117 118 return ret; 119 } 120 121 struct rte_event_timer_adapter * 122 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf) 123 { 124 return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb, 125 NULL); 126 } 127 128 struct rte_event_timer_adapter * 129 rte_event_timer_adapter_create_ext( 130 const struct rte_event_timer_adapter_conf *conf, 131 rte_event_timer_adapter_port_conf_cb_t conf_cb, 132 void *conf_arg) 133 { 134 uint16_t adapter_id; 135 struct rte_event_timer_adapter *adapter; 136 const struct rte_memzone *mz; 137 char mz_name[DATA_MZ_NAME_MAX_LEN]; 138 int n, ret; 139 struct rte_eventdev *dev; 140 141 if (conf == NULL) { 142 rte_errno = EINVAL; 143 return NULL; 144 } 145 146 /* Check eventdev ID */ 147 if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) { 148 rte_errno = EINVAL; 149 return NULL; 150 } 151 dev = &rte_eventdevs[conf->event_dev_id]; 152 153 adapter_id = conf->timer_adapter_id; 154 155 /* Check that adapter_id is in range */ 156 if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) { 157 rte_errno = EINVAL; 158 return NULL; 159 } 160 161 /* Check adapter ID not already allocated */ 162 adapter = &adapters[adapter_id]; 163 if (adapter->allocated) { 164 rte_errno = EEXIST; 165 return NULL; 166 } 167 168 /* Create shared data area. */ 169 n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id); 170 if (n >= (int)sizeof(mz_name)) { 171 rte_errno = EINVAL; 172 return NULL; 173 } 174 mz = rte_memzone_reserve(mz_name, 175 sizeof(struct rte_event_timer_adapter_data), 176 conf->socket_id, 0); 177 if (mz == NULL) 178 /* rte_errno set by rte_memzone_reserve */ 179 return NULL; 180 181 adapter->data = mz->addr; 182 memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data)); 183 184 adapter->data->mz = mz; 185 adapter->data->event_dev_id = conf->event_dev_id; 186 adapter->data->id = adapter_id; 187 adapter->data->socket_id = conf->socket_id; 188 adapter->data->conf = *conf; /* copy conf structure */ 189 190 /* Query eventdev PMD for timer adapter capabilities and ops */ 191 ret = dev->dev_ops->timer_adapter_caps_get(dev, 192 adapter->data->conf.flags, 193 &adapter->data->caps, 194 &adapter->ops); 195 if (ret < 0) { 196 rte_errno = -ret; 197 goto free_memzone; 198 } 199 200 if (!(adapter->data->caps & 201 RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) { 202 FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, EINVAL); 203 ret = conf_cb(adapter->data->id, adapter->data->event_dev_id, 204 &adapter->data->event_port_id, conf_arg); 205 if (ret < 0) { 206 rte_errno = -ret; 207 goto free_memzone; 208 } 209 } 210 211 /* If eventdev PMD did not provide ops, use default software 212 * implementation. 213 */ 214 if (adapter->ops == NULL) 215 adapter->ops = &swtim_ops; 216 217 /* Allow driver to do some setup */ 218 FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, ENOTSUP); 219 ret = adapter->ops->init(adapter); 220 if (ret < 0) { 221 rte_errno = -ret; 222 goto free_memzone; 223 } 224 225 /* Set fast-path function pointers */ 226 adapter->arm_burst = adapter->ops->arm_burst; 227 adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst; 228 adapter->cancel_burst = adapter->ops->cancel_burst; 229 230 adapter->allocated = 1; 231 232 rte_eventdev_trace_timer_adapter_create(adapter_id, adapter, conf, 233 conf_cb); 234 return adapter; 235 236 free_memzone: 237 rte_memzone_free(adapter->data->mz); 238 return NULL; 239 } 240 241 int 242 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter, 243 struct rte_event_timer_adapter_info *adapter_info) 244 { 245 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL); 246 247 if (adapter->ops->get_info) 248 /* let driver set values it knows */ 249 adapter->ops->get_info(adapter, adapter_info); 250 251 /* Set common values */ 252 adapter_info->conf = adapter->data->conf; 253 adapter_info->event_dev_port_id = adapter->data->event_port_id; 254 adapter_info->caps = adapter->data->caps; 255 256 return 0; 257 } 258 259 int 260 rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter) 261 { 262 int ret; 263 264 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL); 265 FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL); 266 267 if (adapter->data->started) { 268 EVTIM_LOG_ERR("event timer adapter %"PRIu8" already started", 269 adapter->data->id); 270 return -EALREADY; 271 } 272 273 ret = adapter->ops->start(adapter); 274 if (ret < 0) 275 return ret; 276 277 adapter->data->started = 1; 278 rte_eventdev_trace_timer_adapter_start(adapter); 279 return 0; 280 } 281 282 int 283 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter) 284 { 285 int ret; 286 287 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL); 288 FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL); 289 290 if (adapter->data->started == 0) { 291 EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped", 292 adapter->data->id); 293 return 0; 294 } 295 296 ret = adapter->ops->stop(adapter); 297 if (ret < 0) 298 return ret; 299 300 adapter->data->started = 0; 301 rte_eventdev_trace_timer_adapter_stop(adapter); 302 return 0; 303 } 304 305 struct rte_event_timer_adapter * 306 rte_event_timer_adapter_lookup(uint16_t adapter_id) 307 { 308 char name[DATA_MZ_NAME_MAX_LEN]; 309 const struct rte_memzone *mz; 310 struct rte_event_timer_adapter_data *data; 311 struct rte_event_timer_adapter *adapter; 312 int ret; 313 struct rte_eventdev *dev; 314 315 if (adapters[adapter_id].allocated) 316 return &adapters[adapter_id]; /* Adapter is already loaded */ 317 318 snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id); 319 mz = rte_memzone_lookup(name); 320 if (mz == NULL) { 321 rte_errno = ENOENT; 322 return NULL; 323 } 324 325 data = mz->addr; 326 327 adapter = &adapters[data->id]; 328 adapter->data = data; 329 330 dev = &rte_eventdevs[adapter->data->event_dev_id]; 331 332 /* Query eventdev PMD for timer adapter capabilities and ops */ 333 ret = dev->dev_ops->timer_adapter_caps_get(dev, 334 adapter->data->conf.flags, 335 &adapter->data->caps, 336 &adapter->ops); 337 if (ret < 0) { 338 rte_errno = EINVAL; 339 return NULL; 340 } 341 342 /* If eventdev PMD did not provide ops, use default software 343 * implementation. 344 */ 345 if (adapter->ops == NULL) 346 adapter->ops = &swtim_ops; 347 348 /* Set fast-path function pointers */ 349 adapter->arm_burst = adapter->ops->arm_burst; 350 adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst; 351 adapter->cancel_burst = adapter->ops->cancel_burst; 352 353 adapter->allocated = 1; 354 355 return adapter; 356 } 357 358 int 359 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter) 360 { 361 int ret; 362 363 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL); 364 FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL); 365 366 if (adapter->data->started == 1) { 367 EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped " 368 "before freeing", adapter->data->id); 369 return -EBUSY; 370 } 371 372 /* free impl priv data */ 373 ret = adapter->ops->uninit(adapter); 374 if (ret < 0) 375 return ret; 376 377 /* free shared data area */ 378 ret = rte_memzone_free(adapter->data->mz); 379 if (ret < 0) 380 return ret; 381 382 adapter->data = NULL; 383 adapter->allocated = 0; 384 385 rte_eventdev_trace_timer_adapter_free(adapter); 386 return 0; 387 } 388 389 int 390 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter, 391 uint32_t *service_id) 392 { 393 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL); 394 395 if (adapter->data->service_inited && service_id != NULL) 396 *service_id = adapter->data->service_id; 397 398 return adapter->data->service_inited ? 0 : -ESRCH; 399 } 400 401 int 402 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter, 403 struct rte_event_timer_adapter_stats *stats) 404 { 405 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL); 406 FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL); 407 if (stats == NULL) 408 return -EINVAL; 409 410 return adapter->ops->stats_get(adapter, stats); 411 } 412 413 int 414 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter) 415 { 416 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL); 417 FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL); 418 return adapter->ops->stats_reset(adapter); 419 } 420 421 /* 422 * Software event timer adapter buffer helper functions 423 */ 424 425 #define NSECPERSEC 1E9 426 427 /* Optimizations used to index into the buffer require that the buffer size 428 * be a power of 2. 429 */ 430 #define EVENT_BUFFER_SZ 4096 431 #define EVENT_BUFFER_BATCHSZ 32 432 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1) 433 434 #define EXP_TIM_BUF_SZ 128 435 436 struct event_buffer { 437 size_t head; 438 size_t tail; 439 struct rte_event events[EVENT_BUFFER_SZ]; 440 } __rte_cache_aligned; 441 442 static inline bool 443 event_buffer_full(struct event_buffer *bufp) 444 { 445 return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ; 446 } 447 448 static inline bool 449 event_buffer_batch_ready(struct event_buffer *bufp) 450 { 451 return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ; 452 } 453 454 static void 455 event_buffer_init(struct event_buffer *bufp) 456 { 457 bufp->head = bufp->tail = 0; 458 memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ); 459 } 460 461 static int 462 event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp) 463 { 464 size_t head_idx; 465 struct rte_event *buf_eventp; 466 467 if (event_buffer_full(bufp)) 468 return -1; 469 470 /* Instead of modulus, bitwise AND with mask to get head_idx. */ 471 head_idx = bufp->head & EVENT_BUFFER_MASK; 472 buf_eventp = &bufp->events[head_idx]; 473 rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event)); 474 475 /* Wrap automatically when overflow occurs. */ 476 bufp->head++; 477 478 return 0; 479 } 480 481 static void 482 event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id, 483 uint16_t *nb_events_flushed, 484 uint16_t *nb_events_inv) 485 { 486 struct rte_event *events = bufp->events; 487 size_t head_idx, tail_idx; 488 uint16_t n = 0; 489 490 /* Instead of modulus, bitwise AND with mask to get index. */ 491 head_idx = bufp->head & EVENT_BUFFER_MASK; 492 tail_idx = bufp->tail & EVENT_BUFFER_MASK; 493 494 RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ); 495 496 /* Determine the largest contigous run we can attempt to enqueue to the 497 * event device. 498 */ 499 if (head_idx > tail_idx) 500 n = head_idx - tail_idx; 501 else if (head_idx < tail_idx) 502 n = EVENT_BUFFER_SZ - tail_idx; 503 else if (event_buffer_full(bufp)) 504 n = EVENT_BUFFER_SZ - tail_idx; 505 else { 506 *nb_events_flushed = 0; 507 return; 508 } 509 510 n = RTE_MIN(EVENT_BUFFER_BATCHSZ, n); 511 *nb_events_inv = 0; 512 513 *nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id, 514 &events[tail_idx], n); 515 if (*nb_events_flushed != n) { 516 if (rte_errno == EINVAL) { 517 EVTIM_LOG_ERR("failed to enqueue invalid event - " 518 "dropping it"); 519 (*nb_events_inv)++; 520 } else if (rte_errno == ENOSPC) 521 rte_pause(); 522 } 523 524 if (*nb_events_flushed > 0) 525 EVTIM_BUF_LOG_DBG("enqueued %"PRIu16" timer events to event " 526 "device", *nb_events_flushed); 527 528 bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv; 529 } 530 531 /* 532 * Software event timer adapter implementation 533 */ 534 struct swtim { 535 /* Identifier of service executing timer management logic. */ 536 uint32_t service_id; 537 /* The cycle count at which the adapter should next tick */ 538 uint64_t next_tick_cycles; 539 /* The tick resolution used by adapter instance. May have been 540 * adjusted from what user requested 541 */ 542 uint64_t timer_tick_ns; 543 /* Maximum timeout in nanoseconds allowed by adapter instance. */ 544 uint64_t max_tmo_ns; 545 /* Buffered timer expiry events to be enqueued to an event device. */ 546 struct event_buffer buffer; 547 /* Statistics */ 548 struct rte_event_timer_adapter_stats stats; 549 /* Mempool of timer objects */ 550 struct rte_mempool *tim_pool; 551 /* Back pointer for convenience */ 552 struct rte_event_timer_adapter *adapter; 553 /* Identifier of timer data instance */ 554 uint32_t timer_data_id; 555 /* Track which cores have actually armed a timer */ 556 struct { 557 uint16_t v; 558 } __rte_cache_aligned in_use[RTE_MAX_LCORE]; 559 /* Track which cores' timer lists should be polled */ 560 unsigned int poll_lcores[RTE_MAX_LCORE]; 561 /* The number of lists that should be polled */ 562 int n_poll_lcores; 563 /* Timers which have expired and can be returned to a mempool */ 564 struct rte_timer *expired_timers[EXP_TIM_BUF_SZ]; 565 /* The number of timers that can be returned to a mempool */ 566 size_t n_expired_timers; 567 }; 568 569 static inline struct swtim * 570 swtim_pmd_priv(const struct rte_event_timer_adapter *adapter) 571 { 572 return adapter->data->adapter_priv; 573 } 574 575 static void 576 swtim_callback(struct rte_timer *tim) 577 { 578 struct rte_event_timer *evtim = tim->arg; 579 struct rte_event_timer_adapter *adapter; 580 unsigned int lcore = rte_lcore_id(); 581 struct swtim *sw; 582 uint16_t nb_evs_flushed = 0; 583 uint16_t nb_evs_invalid = 0; 584 uint64_t opaque; 585 int ret; 586 int n_lcores; 587 588 opaque = evtim->impl_opaque[1]; 589 adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque; 590 sw = swtim_pmd_priv(adapter); 591 592 ret = event_buffer_add(&sw->buffer, &evtim->ev); 593 if (ret < 0) { 594 /* If event buffer is full, put timer back in list with 595 * immediate expiry value, so that we process it again on the 596 * next iteration. 597 */ 598 ret = rte_timer_alt_reset(sw->timer_data_id, tim, 0, SINGLE, 599 lcore, NULL, evtim); 600 if (ret < 0) { 601 EVTIM_LOG_DBG("event buffer full, failed to reset " 602 "timer with immediate expiry value"); 603 } else { 604 sw->stats.evtim_retry_count++; 605 EVTIM_LOG_DBG("event buffer full, resetting rte_timer " 606 "with immediate expiry value"); 607 } 608 609 if (unlikely(sw->in_use[lcore].v == 0)) { 610 sw->in_use[lcore].v = 1; 611 n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1, 612 __ATOMIC_RELAXED); 613 __atomic_store_n(&sw->poll_lcores[n_lcores], lcore, 614 __ATOMIC_RELAXED); 615 } 616 } else { 617 EVTIM_BUF_LOG_DBG("buffered an event timer expiry event"); 618 619 /* Empty the buffer here, if necessary, to free older expired 620 * timers only 621 */ 622 if (unlikely(sw->n_expired_timers == EXP_TIM_BUF_SZ)) { 623 rte_mempool_put_bulk(sw->tim_pool, 624 (void **)sw->expired_timers, 625 sw->n_expired_timers); 626 sw->n_expired_timers = 0; 627 } 628 629 sw->expired_timers[sw->n_expired_timers++] = tim; 630 sw->stats.evtim_exp_count++; 631 632 __atomic_store_n(&evtim->state, RTE_EVENT_TIMER_NOT_ARMED, 633 __ATOMIC_RELEASE); 634 } 635 636 if (event_buffer_batch_ready(&sw->buffer)) { 637 event_buffer_flush(&sw->buffer, 638 adapter->data->event_dev_id, 639 adapter->data->event_port_id, 640 &nb_evs_flushed, 641 &nb_evs_invalid); 642 643 sw->stats.ev_enq_count += nb_evs_flushed; 644 sw->stats.ev_inv_count += nb_evs_invalid; 645 } 646 } 647 648 static __rte_always_inline uint64_t 649 get_timeout_cycles(struct rte_event_timer *evtim, 650 const struct rte_event_timer_adapter *adapter) 651 { 652 struct swtim *sw = swtim_pmd_priv(adapter); 653 uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns; 654 return timeout_ns * rte_get_timer_hz() / NSECPERSEC; 655 } 656 657 /* This function returns true if one or more (adapter) ticks have occurred since 658 * the last time it was called. 659 */ 660 static inline bool 661 swtim_did_tick(struct swtim *sw) 662 { 663 uint64_t cycles_per_adapter_tick, start_cycles; 664 uint64_t *next_tick_cyclesp; 665 666 next_tick_cyclesp = &sw->next_tick_cycles; 667 cycles_per_adapter_tick = sw->timer_tick_ns * 668 (rte_get_timer_hz() / NSECPERSEC); 669 start_cycles = rte_get_timer_cycles(); 670 671 /* Note: initially, *next_tick_cyclesp == 0, so the clause below will 672 * execute, and set things going. 673 */ 674 675 if (start_cycles >= *next_tick_cyclesp) { 676 /* Snap the current cycle count to the preceding adapter tick 677 * boundary. 678 */ 679 start_cycles -= start_cycles % cycles_per_adapter_tick; 680 *next_tick_cyclesp = start_cycles + cycles_per_adapter_tick; 681 682 return true; 683 } 684 685 return false; 686 } 687 688 /* Check that event timer timeout value is in range */ 689 static __rte_always_inline int 690 check_timeout(struct rte_event_timer *evtim, 691 const struct rte_event_timer_adapter *adapter) 692 { 693 uint64_t tmo_nsec; 694 struct swtim *sw = swtim_pmd_priv(adapter); 695 696 tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns; 697 if (tmo_nsec > sw->max_tmo_ns) 698 return -1; 699 if (tmo_nsec < sw->timer_tick_ns) 700 return -2; 701 702 return 0; 703 } 704 705 /* Check that event timer event queue sched type matches destination event queue 706 * sched type 707 */ 708 static __rte_always_inline int 709 check_destination_event_queue(struct rte_event_timer *evtim, 710 const struct rte_event_timer_adapter *adapter) 711 { 712 int ret; 713 uint32_t sched_type; 714 715 ret = rte_event_queue_attr_get(adapter->data->event_dev_id, 716 evtim->ev.queue_id, 717 RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE, 718 &sched_type); 719 720 if ((ret == 0 && evtim->ev.sched_type == sched_type) || 721 ret == -EOVERFLOW) 722 return 0; 723 724 return -1; 725 } 726 727 static int 728 swtim_service_func(void *arg) 729 { 730 struct rte_event_timer_adapter *adapter = arg; 731 struct swtim *sw = swtim_pmd_priv(adapter); 732 uint16_t nb_evs_flushed = 0; 733 uint16_t nb_evs_invalid = 0; 734 735 if (swtim_did_tick(sw)) { 736 rte_timer_alt_manage(sw->timer_data_id, 737 sw->poll_lcores, 738 sw->n_poll_lcores, 739 swtim_callback); 740 741 /* Return expired timer objects back to mempool */ 742 rte_mempool_put_bulk(sw->tim_pool, (void **)sw->expired_timers, 743 sw->n_expired_timers); 744 sw->n_expired_timers = 0; 745 746 event_buffer_flush(&sw->buffer, 747 adapter->data->event_dev_id, 748 adapter->data->event_port_id, 749 &nb_evs_flushed, 750 &nb_evs_invalid); 751 752 sw->stats.ev_enq_count += nb_evs_flushed; 753 sw->stats.ev_inv_count += nb_evs_invalid; 754 sw->stats.adapter_tick_count++; 755 } 756 757 return 0; 758 } 759 760 /* The adapter initialization function rounds the mempool size up to the next 761 * power of 2, so we can take the difference between that value and what the 762 * user requested, and use the space for caches. This avoids a scenario where a 763 * user can't arm the number of timers the adapter was configured with because 764 * mempool objects have been lost to caches. 765 * 766 * nb_actual should always be a power of 2, so we can iterate over the powers 767 * of 2 to see what the largest cache size we can use is. 768 */ 769 static int 770 compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual) 771 { 772 int i; 773 int size; 774 int cache_size = 0; 775 776 for (i = 0;; i++) { 777 size = 1 << i; 778 779 if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) && 780 size < RTE_MEMPOOL_CACHE_MAX_SIZE && 781 size <= nb_actual / 1.5) 782 cache_size = size; 783 else 784 break; 785 } 786 787 return cache_size; 788 } 789 790 static int 791 swtim_init(struct rte_event_timer_adapter *adapter) 792 { 793 int i, ret; 794 struct swtim *sw; 795 unsigned int flags; 796 struct rte_service_spec service; 797 798 /* Allocate storage for private data area */ 799 #define SWTIM_NAMESIZE 32 800 char swtim_name[SWTIM_NAMESIZE]; 801 snprintf(swtim_name, SWTIM_NAMESIZE, "swtim_%"PRIu8, 802 adapter->data->id); 803 sw = rte_zmalloc_socket(swtim_name, sizeof(*sw), RTE_CACHE_LINE_SIZE, 804 adapter->data->socket_id); 805 if (sw == NULL) { 806 EVTIM_LOG_ERR("failed to allocate space for private data"); 807 rte_errno = ENOMEM; 808 return -1; 809 } 810 811 /* Connect storage to adapter instance */ 812 adapter->data->adapter_priv = sw; 813 sw->adapter = adapter; 814 815 sw->timer_tick_ns = adapter->data->conf.timer_tick_ns; 816 sw->max_tmo_ns = adapter->data->conf.max_tmo_ns; 817 818 /* Create a timer pool */ 819 char pool_name[SWTIM_NAMESIZE]; 820 snprintf(pool_name, SWTIM_NAMESIZE, "swtim_pool_%"PRIu8, 821 adapter->data->id); 822 /* Optimal mempool size is a power of 2 minus one */ 823 uint64_t nb_timers = rte_align64pow2(adapter->data->conf.nb_timers); 824 int pool_size = nb_timers - 1; 825 int cache_size = compute_msg_mempool_cache_size( 826 adapter->data->conf.nb_timers, nb_timers); 827 flags = 0; /* pool is multi-producer, multi-consumer */ 828 sw->tim_pool = rte_mempool_create(pool_name, pool_size, 829 sizeof(struct rte_timer), cache_size, 0, NULL, NULL, 830 NULL, NULL, adapter->data->socket_id, flags); 831 if (sw->tim_pool == NULL) { 832 EVTIM_LOG_ERR("failed to create timer object mempool"); 833 rte_errno = ENOMEM; 834 goto free_alloc; 835 } 836 837 /* Initialize the variables that track in-use timer lists */ 838 for (i = 0; i < RTE_MAX_LCORE; i++) 839 sw->in_use[i].v = 0; 840 841 /* Initialize the timer subsystem and allocate timer data instance */ 842 ret = rte_timer_subsystem_init(); 843 if (ret < 0) { 844 if (ret != -EALREADY) { 845 EVTIM_LOG_ERR("failed to initialize timer subsystem"); 846 rte_errno = -ret; 847 goto free_mempool; 848 } 849 } 850 851 ret = rte_timer_data_alloc(&sw->timer_data_id); 852 if (ret < 0) { 853 EVTIM_LOG_ERR("failed to allocate timer data instance"); 854 rte_errno = -ret; 855 goto free_mempool; 856 } 857 858 /* Initialize timer event buffer */ 859 event_buffer_init(&sw->buffer); 860 861 sw->adapter = adapter; 862 863 /* Register a service component to run adapter logic */ 864 memset(&service, 0, sizeof(service)); 865 snprintf(service.name, RTE_SERVICE_NAME_MAX, 866 "swtim_svc_%"PRIu8, adapter->data->id); 867 service.socket_id = adapter->data->socket_id; 868 service.callback = swtim_service_func; 869 service.callback_userdata = adapter; 870 service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE); 871 ret = rte_service_component_register(&service, &sw->service_id); 872 if (ret < 0) { 873 EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32 874 ": err = %d", service.name, sw->service_id, 875 ret); 876 877 rte_errno = ENOSPC; 878 goto free_mempool; 879 } 880 881 EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name, 882 sw->service_id); 883 884 adapter->data->service_id = sw->service_id; 885 adapter->data->service_inited = 1; 886 887 return 0; 888 free_mempool: 889 rte_mempool_free(sw->tim_pool); 890 free_alloc: 891 rte_free(sw); 892 return -1; 893 } 894 895 static void 896 swtim_free_tim(struct rte_timer *tim, void *arg) 897 { 898 struct swtim *sw = arg; 899 900 rte_mempool_put(sw->tim_pool, tim); 901 } 902 903 /* Traverse the list of outstanding timers and put them back in the mempool 904 * before freeing the adapter to avoid leaking the memory. 905 */ 906 static int 907 swtim_uninit(struct rte_event_timer_adapter *adapter) 908 { 909 int ret; 910 struct swtim *sw = swtim_pmd_priv(adapter); 911 912 /* Free outstanding timers */ 913 rte_timer_stop_all(sw->timer_data_id, 914 sw->poll_lcores, 915 sw->n_poll_lcores, 916 swtim_free_tim, 917 sw); 918 919 ret = rte_service_component_unregister(sw->service_id); 920 if (ret < 0) { 921 EVTIM_LOG_ERR("failed to unregister service component"); 922 return ret; 923 } 924 925 rte_mempool_free(sw->tim_pool); 926 rte_free(sw); 927 adapter->data->adapter_priv = NULL; 928 929 return 0; 930 } 931 932 static inline int32_t 933 get_mapped_count_for_service(uint32_t service_id) 934 { 935 int32_t core_count, i, mapped_count = 0; 936 uint32_t lcore_arr[RTE_MAX_LCORE]; 937 938 core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE); 939 940 for (i = 0; i < core_count; i++) 941 if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1) 942 mapped_count++; 943 944 return mapped_count; 945 } 946 947 static int 948 swtim_start(const struct rte_event_timer_adapter *adapter) 949 { 950 int mapped_count; 951 struct swtim *sw = swtim_pmd_priv(adapter); 952 953 /* Mapping the service to more than one service core can introduce 954 * delays while one thread is waiting to acquire a lock, so only allow 955 * one core to be mapped to the service. 956 * 957 * Note: the service could be modified such that it spreads cores to 958 * poll over multiple service instances. 959 */ 960 mapped_count = get_mapped_count_for_service(sw->service_id); 961 962 if (mapped_count != 1) 963 return mapped_count < 1 ? -ENOENT : -ENOTSUP; 964 965 return rte_service_component_runstate_set(sw->service_id, 1); 966 } 967 968 static int 969 swtim_stop(const struct rte_event_timer_adapter *adapter) 970 { 971 int ret; 972 struct swtim *sw = swtim_pmd_priv(adapter); 973 974 ret = rte_service_component_runstate_set(sw->service_id, 0); 975 if (ret < 0) 976 return ret; 977 978 /* Wait for the service to complete its final iteration */ 979 while (rte_service_may_be_active(sw->service_id)) 980 rte_pause(); 981 982 return 0; 983 } 984 985 static void 986 swtim_get_info(const struct rte_event_timer_adapter *adapter, 987 struct rte_event_timer_adapter_info *adapter_info) 988 { 989 struct swtim *sw = swtim_pmd_priv(adapter); 990 adapter_info->min_resolution_ns = sw->timer_tick_ns; 991 adapter_info->max_tmo_ns = sw->max_tmo_ns; 992 } 993 994 static int 995 swtim_stats_get(const struct rte_event_timer_adapter *adapter, 996 struct rte_event_timer_adapter_stats *stats) 997 { 998 struct swtim *sw = swtim_pmd_priv(adapter); 999 *stats = sw->stats; /* structure copy */ 1000 return 0; 1001 } 1002 1003 static int 1004 swtim_stats_reset(const struct rte_event_timer_adapter *adapter) 1005 { 1006 struct swtim *sw = swtim_pmd_priv(adapter); 1007 memset(&sw->stats, 0, sizeof(sw->stats)); 1008 return 0; 1009 } 1010 1011 static uint16_t 1012 __swtim_arm_burst(const struct rte_event_timer_adapter *adapter, 1013 struct rte_event_timer **evtims, 1014 uint16_t nb_evtims) 1015 { 1016 int i, ret; 1017 struct swtim *sw = swtim_pmd_priv(adapter); 1018 uint32_t lcore_id = rte_lcore_id(); 1019 struct rte_timer *tim, *tims[nb_evtims]; 1020 uint64_t cycles; 1021 int n_lcores; 1022 /* Timer list for this lcore is not in use. */ 1023 uint16_t exp_state = 0; 1024 enum rte_event_timer_state n_state; 1025 1026 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 1027 /* Check that the service is running. */ 1028 if (rte_service_runstate_get(adapter->data->service_id) != 1) { 1029 rte_errno = EINVAL; 1030 return 0; 1031 } 1032 #endif 1033 1034 /* Adjust lcore_id if non-EAL thread. Arbitrarily pick the timer list of 1035 * the highest lcore to insert such timers into 1036 */ 1037 if (lcore_id == LCORE_ID_ANY) 1038 lcore_id = RTE_MAX_LCORE - 1; 1039 1040 /* If this is the first time we're arming an event timer on this lcore, 1041 * mark this lcore as "in use"; this will cause the service 1042 * function to process the timer list that corresponds to this lcore. 1043 * The atomic compare-and-swap operation can prevent the race condition 1044 * on in_use flag between multiple non-EAL threads. 1045 */ 1046 if (unlikely(__atomic_compare_exchange_n(&sw->in_use[lcore_id].v, 1047 &exp_state, 1, 0, 1048 __ATOMIC_RELAXED, __ATOMIC_RELAXED))) { 1049 EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll", 1050 lcore_id); 1051 n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1, 1052 __ATOMIC_RELAXED); 1053 __atomic_store_n(&sw->poll_lcores[n_lcores], lcore_id, 1054 __ATOMIC_RELAXED); 1055 } 1056 1057 ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims, 1058 nb_evtims); 1059 if (ret < 0) { 1060 rte_errno = ENOSPC; 1061 return 0; 1062 } 1063 1064 for (i = 0; i < nb_evtims; i++) { 1065 n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE); 1066 if (n_state == RTE_EVENT_TIMER_ARMED) { 1067 rte_errno = EALREADY; 1068 break; 1069 } else if (!(n_state == RTE_EVENT_TIMER_NOT_ARMED || 1070 n_state == RTE_EVENT_TIMER_CANCELED)) { 1071 rte_errno = EINVAL; 1072 break; 1073 } 1074 1075 ret = check_timeout(evtims[i], adapter); 1076 if (unlikely(ret == -1)) { 1077 __atomic_store_n(&evtims[i]->state, 1078 RTE_EVENT_TIMER_ERROR_TOOLATE, 1079 __ATOMIC_RELAXED); 1080 rte_errno = EINVAL; 1081 break; 1082 } else if (unlikely(ret == -2)) { 1083 __atomic_store_n(&evtims[i]->state, 1084 RTE_EVENT_TIMER_ERROR_TOOEARLY, 1085 __ATOMIC_RELAXED); 1086 rte_errno = EINVAL; 1087 break; 1088 } 1089 1090 if (unlikely(check_destination_event_queue(evtims[i], 1091 adapter) < 0)) { 1092 __atomic_store_n(&evtims[i]->state, 1093 RTE_EVENT_TIMER_ERROR, 1094 __ATOMIC_RELAXED); 1095 rte_errno = EINVAL; 1096 break; 1097 } 1098 1099 tim = tims[i]; 1100 rte_timer_init(tim); 1101 1102 evtims[i]->impl_opaque[0] = (uintptr_t)tim; 1103 evtims[i]->impl_opaque[1] = (uintptr_t)adapter; 1104 1105 cycles = get_timeout_cycles(evtims[i], adapter); 1106 ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles, 1107 SINGLE, lcore_id, NULL, evtims[i]); 1108 if (ret < 0) { 1109 /* tim was in RUNNING or CONFIG state */ 1110 __atomic_store_n(&evtims[i]->state, 1111 RTE_EVENT_TIMER_ERROR, 1112 __ATOMIC_RELEASE); 1113 break; 1114 } 1115 1116 EVTIM_LOG_DBG("armed an event timer"); 1117 /* RELEASE ordering guarantees the adapter specific value 1118 * changes observed before the update of state. 1119 */ 1120 __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_ARMED, 1121 __ATOMIC_RELEASE); 1122 } 1123 1124 if (i < nb_evtims) 1125 rte_mempool_put_bulk(sw->tim_pool, 1126 (void **)&tims[i], nb_evtims - i); 1127 1128 return i; 1129 } 1130 1131 static uint16_t 1132 swtim_arm_burst(const struct rte_event_timer_adapter *adapter, 1133 struct rte_event_timer **evtims, 1134 uint16_t nb_evtims) 1135 { 1136 return __swtim_arm_burst(adapter, evtims, nb_evtims); 1137 } 1138 1139 static uint16_t 1140 swtim_cancel_burst(const struct rte_event_timer_adapter *adapter, 1141 struct rte_event_timer **evtims, 1142 uint16_t nb_evtims) 1143 { 1144 int i, ret; 1145 struct rte_timer *timp; 1146 uint64_t opaque; 1147 struct swtim *sw = swtim_pmd_priv(adapter); 1148 enum rte_event_timer_state n_state; 1149 1150 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 1151 /* Check that the service is running. */ 1152 if (rte_service_runstate_get(adapter->data->service_id) != 1) { 1153 rte_errno = EINVAL; 1154 return 0; 1155 } 1156 #endif 1157 1158 for (i = 0; i < nb_evtims; i++) { 1159 /* Don't modify the event timer state in these cases */ 1160 /* ACQUIRE ordering guarantees the access of implementation 1161 * specific opaque data under the correct state. 1162 */ 1163 n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE); 1164 if (n_state == RTE_EVENT_TIMER_CANCELED) { 1165 rte_errno = EALREADY; 1166 break; 1167 } else if (n_state != RTE_EVENT_TIMER_ARMED) { 1168 rte_errno = EINVAL; 1169 break; 1170 } 1171 1172 opaque = evtims[i]->impl_opaque[0]; 1173 timp = (struct rte_timer *)(uintptr_t)opaque; 1174 RTE_ASSERT(timp != NULL); 1175 1176 ret = rte_timer_alt_stop(sw->timer_data_id, timp); 1177 if (ret < 0) { 1178 /* Timer is running or being configured */ 1179 rte_errno = EAGAIN; 1180 break; 1181 } 1182 1183 rte_mempool_put(sw->tim_pool, (void **)timp); 1184 1185 /* The RELEASE ordering here pairs with atomic ordering 1186 * to make sure the state update data observed between 1187 * threads. 1188 */ 1189 __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED, 1190 __ATOMIC_RELEASE); 1191 } 1192 1193 return i; 1194 } 1195 1196 static uint16_t 1197 swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter, 1198 struct rte_event_timer **evtims, 1199 uint64_t timeout_ticks, 1200 uint16_t nb_evtims) 1201 { 1202 int i; 1203 1204 for (i = 0; i < nb_evtims; i++) 1205 evtims[i]->timeout_ticks = timeout_ticks; 1206 1207 return __swtim_arm_burst(adapter, evtims, nb_evtims); 1208 } 1209 1210 static const struct rte_event_timer_adapter_ops swtim_ops = { 1211 .init = swtim_init, 1212 .uninit = swtim_uninit, 1213 .start = swtim_start, 1214 .stop = swtim_stop, 1215 .get_info = swtim_get_info, 1216 .stats_get = swtim_stats_get, 1217 .stats_reset = swtim_stats_reset, 1218 .arm_burst = swtim_arm_burst, 1219 .arm_tmo_tick_burst = swtim_arm_tmo_tick_burst, 1220 .cancel_burst = swtim_cancel_burst, 1221 }; 1222