199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 299a2dd95SBruce Richardson * Copyright(c) 2017 Intel Corporation 399a2dd95SBruce Richardson */ 499a2dd95SBruce Richardson 599a2dd95SBruce Richardson #include <stdio.h> 699a2dd95SBruce Richardson #include <inttypes.h> 799a2dd95SBruce Richardson #include <string.h> 899a2dd95SBruce Richardson 999a2dd95SBruce Richardson #include <rte_service.h> 1099a2dd95SBruce Richardson #include <rte_service_component.h> 1199a2dd95SBruce Richardson 12841e87dfSArnaud Fiorini #include <eal_trace_internal.h> 1399a2dd95SBruce Richardson #include <rte_lcore.h> 14*b24bbaedSMattias Rönnblom #include <rte_lcore_var.h> 1534ec2384SMattias Rönnblom #include <rte_bitset.h> 16809bd244SMattias Rönnblom #include <rte_branch_prediction.h> 1799a2dd95SBruce Richardson #include <rte_common.h> 1899a2dd95SBruce Richardson #include <rte_cycles.h> 1999a2dd95SBruce Richardson #include <rte_atomic.h> 2099a2dd95SBruce Richardson #include <rte_malloc.h> 2199a2dd95SBruce Richardson #include <rte_spinlock.h> 22841e87dfSArnaud Fiorini #include <rte_trace_point.h> 2399a2dd95SBruce Richardson 2499a2dd95SBruce Richardson #include "eal_private.h" 2599a2dd95SBruce Richardson 2699a2dd95SBruce Richardson #define RTE_SERVICE_NUM_MAX 64 2799a2dd95SBruce Richardson 2899a2dd95SBruce Richardson #define SERVICE_F_REGISTERED (1 << 0) 2999a2dd95SBruce Richardson #define SERVICE_F_STATS_ENABLED (1 << 1) 3099a2dd95SBruce Richardson #define SERVICE_F_START_CHECK (1 << 2) 3199a2dd95SBruce Richardson 3299a2dd95SBruce Richardson /* runstates for services and lcores, denoting if they are active or not */ 3399a2dd95SBruce Richardson #define RUNSTATE_STOPPED 0 3499a2dd95SBruce Richardson #define RUNSTATE_RUNNING 1 3599a2dd95SBruce Richardson 3699a2dd95SBruce Richardson /* internal representation of a service */ 37c6552d9aSTyler Retzlaff struct __rte_cache_aligned rte_service_spec_impl { 3899a2dd95SBruce Richardson /* public part of the struct */ 3999a2dd95SBruce Richardson struct rte_service_spec spec; 4099a2dd95SBruce Richardson 4199a2dd95SBruce Richardson /* spin lock that when set indicates a service core is currently 4299a2dd95SBruce Richardson * running this service callback. When not set, a core may take the 4399a2dd95SBruce Richardson * lock and then run the service callback. 4499a2dd95SBruce Richardson */ 4599a2dd95SBruce Richardson rte_spinlock_t execute_lock; 4699a2dd95SBruce Richardson 4799a2dd95SBruce Richardson /* API set/get-able variables */ 482a7a42a5STyler Retzlaff RTE_ATOMIC(int8_t) app_runstate; 492a7a42a5STyler Retzlaff RTE_ATOMIC(int8_t) comp_runstate; 5099a2dd95SBruce Richardson uint8_t internal_flags; 5199a2dd95SBruce Richardson 5299a2dd95SBruce Richardson /* per service statistics */ 5399a2dd95SBruce Richardson /* Indicates how many cores the service is mapped to run on. 5499a2dd95SBruce Richardson * It does not indicate the number of cores the service is running 5599a2dd95SBruce Richardson * on currently. 5699a2dd95SBruce Richardson */ 572a7a42a5STyler Retzlaff RTE_ATOMIC(uint32_t) num_mapped_cores; 58c6552d9aSTyler Retzlaff }; 5999a2dd95SBruce Richardson 60eb111cbdSMattias Rönnblom struct service_stats { 612a7a42a5STyler Retzlaff RTE_ATOMIC(uint64_t) calls; 62a37e053bSMattias Rönnblom RTE_ATOMIC(uint64_t) idle_calls; 63a37e053bSMattias Rönnblom RTE_ATOMIC(uint64_t) error_calls; 642a7a42a5STyler Retzlaff RTE_ATOMIC(uint64_t) cycles; 65eb111cbdSMattias Rönnblom }; 6699e4e840SHarry van Haaren 6799a2dd95SBruce Richardson /* the internal values of a service core */ 68c6552d9aSTyler Retzlaff struct __rte_cache_aligned core_state { 6999a2dd95SBruce Richardson /* map of services IDs are run on this core */ 7034ec2384SMattias Rönnblom RTE_BITSET_DECLARE(mapped_services, RTE_SERVICE_NUM_MAX); 712a7a42a5STyler Retzlaff RTE_ATOMIC(uint8_t) runstate; /* running or stopped */ 722a7a42a5STyler Retzlaff RTE_ATOMIC(uint8_t) thread_active; /* indicates when thread is in service_run() */ 7399a2dd95SBruce Richardson uint8_t is_service_core; /* set if core is currently a service core */ 7434ec2384SMattias Rönnblom RTE_BITSET_DECLARE(service_active_on_lcore, RTE_SERVICE_NUM_MAX); 752a7a42a5STyler Retzlaff RTE_ATOMIC(uint64_t) loops; 762a7a42a5STyler Retzlaff RTE_ATOMIC(uint64_t) cycles; 77eb111cbdSMattias Rönnblom struct service_stats service_stats[RTE_SERVICE_NUM_MAX]; 78c6552d9aSTyler Retzlaff }; 7999a2dd95SBruce Richardson 8099a2dd95SBruce Richardson static uint32_t rte_service_count; 8199a2dd95SBruce Richardson static struct rte_service_spec_impl *rte_services; 82*b24bbaedSMattias Rönnblom static RTE_LCORE_VAR_HANDLE(struct core_state, lcore_states); 8399a2dd95SBruce Richardson static uint32_t rte_service_library_initialized; 8499a2dd95SBruce Richardson 8599a2dd95SBruce Richardson int32_t 8699a2dd95SBruce Richardson rte_service_init(void) 8799a2dd95SBruce Richardson { 8899a2dd95SBruce Richardson if (rte_service_library_initialized) { 89ae67895bSDavid Marchand EAL_LOG(NOTICE, 90ae67895bSDavid Marchand "service library init() called, init flag %d", 9199a2dd95SBruce Richardson rte_service_library_initialized); 9299a2dd95SBruce Richardson return -EALREADY; 9399a2dd95SBruce Richardson } 9499a2dd95SBruce Richardson 9599a2dd95SBruce Richardson rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX, 9699a2dd95SBruce Richardson sizeof(struct rte_service_spec_impl), 9799a2dd95SBruce Richardson RTE_CACHE_LINE_SIZE); 9899a2dd95SBruce Richardson if (!rte_services) { 99ae67895bSDavid Marchand EAL_LOG(ERR, "error allocating rte services array"); 10099a2dd95SBruce Richardson goto fail_mem; 10199a2dd95SBruce Richardson } 10299a2dd95SBruce Richardson 103*b24bbaedSMattias Rönnblom if (lcore_states == NULL) 104*b24bbaedSMattias Rönnblom RTE_LCORE_VAR_ALLOC(lcore_states); 10599a2dd95SBruce Richardson 10699a2dd95SBruce Richardson int i; 10799a2dd95SBruce Richardson struct rte_config *cfg = rte_eal_get_configuration(); 10899a2dd95SBruce Richardson for (i = 0; i < RTE_MAX_LCORE; i++) { 10999a2dd95SBruce Richardson if (lcore_config[i].core_role == ROLE_SERVICE) { 11099a2dd95SBruce Richardson if ((unsigned int)i == cfg->main_lcore) 11199a2dd95SBruce Richardson continue; 11299a2dd95SBruce Richardson rte_service_lcore_add(i); 11399a2dd95SBruce Richardson } 11499a2dd95SBruce Richardson } 11599a2dd95SBruce Richardson 11699a2dd95SBruce Richardson rte_service_library_initialized = 1; 11799a2dd95SBruce Richardson return 0; 11899a2dd95SBruce Richardson fail_mem: 11999a2dd95SBruce Richardson rte_free(rte_services); 12099a2dd95SBruce Richardson return -ENOMEM; 12199a2dd95SBruce Richardson } 12299a2dd95SBruce Richardson 12399a2dd95SBruce Richardson void 12499a2dd95SBruce Richardson rte_service_finalize(void) 12599a2dd95SBruce Richardson { 12699a2dd95SBruce Richardson if (!rte_service_library_initialized) 12799a2dd95SBruce Richardson return; 12899a2dd95SBruce Richardson 12999a2dd95SBruce Richardson rte_service_lcore_reset_all(); 13099a2dd95SBruce Richardson rte_eal_mp_wait_lcore(); 13199a2dd95SBruce Richardson 13299a2dd95SBruce Richardson rte_free(rte_services); 13399a2dd95SBruce Richardson 13499a2dd95SBruce Richardson rte_service_library_initialized = 0; 13599a2dd95SBruce Richardson } 13699a2dd95SBruce Richardson 137eb111cbdSMattias Rönnblom static inline bool 138eb111cbdSMattias Rönnblom service_registered(uint32_t id) 139eb111cbdSMattias Rönnblom { 140eb111cbdSMattias Rönnblom return rte_services[id].internal_flags & SERVICE_F_REGISTERED; 141eb111cbdSMattias Rönnblom } 142eb111cbdSMattias Rönnblom 143eb111cbdSMattias Rönnblom static inline bool 14499a2dd95SBruce Richardson service_valid(uint32_t id) 14599a2dd95SBruce Richardson { 146eb111cbdSMattias Rönnblom return id < RTE_SERVICE_NUM_MAX && service_registered(id); 14799a2dd95SBruce Richardson } 14899a2dd95SBruce Richardson 14999a2dd95SBruce Richardson static struct rte_service_spec_impl * 15099a2dd95SBruce Richardson service_get(uint32_t id) 15199a2dd95SBruce Richardson { 15299a2dd95SBruce Richardson return &rte_services[id]; 15399a2dd95SBruce Richardson } 15499a2dd95SBruce Richardson 15599a2dd95SBruce Richardson /* validate ID and retrieve service pointer, or return error value */ 15699a2dd95SBruce Richardson #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do { \ 157eb111cbdSMattias Rönnblom if (!service_valid(id)) \ 15899a2dd95SBruce Richardson return retval; \ 15999a2dd95SBruce Richardson service = &rte_services[id]; \ 16099a2dd95SBruce Richardson } while (0) 16199a2dd95SBruce Richardson 16299a2dd95SBruce Richardson /* returns 1 if statistics should be collected for service 16399a2dd95SBruce Richardson * Returns 0 if statistics should not be collected for service 16499a2dd95SBruce Richardson */ 16599a2dd95SBruce Richardson static inline int 16699a2dd95SBruce Richardson service_stats_enabled(struct rte_service_spec_impl *impl) 16799a2dd95SBruce Richardson { 16899a2dd95SBruce Richardson return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED); 16999a2dd95SBruce Richardson } 17099a2dd95SBruce Richardson 17199a2dd95SBruce Richardson static inline int 17299a2dd95SBruce Richardson service_mt_safe(struct rte_service_spec_impl *s) 17399a2dd95SBruce Richardson { 17499a2dd95SBruce Richardson return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE); 17599a2dd95SBruce Richardson } 17699a2dd95SBruce Richardson 17799a2dd95SBruce Richardson int32_t 17899a2dd95SBruce Richardson rte_service_set_stats_enable(uint32_t id, int32_t enabled) 17999a2dd95SBruce Richardson { 18099a2dd95SBruce Richardson struct rte_service_spec_impl *s; 18199a2dd95SBruce Richardson SERVICE_VALID_GET_OR_ERR_RET(id, s, 0); 18299a2dd95SBruce Richardson 18399a2dd95SBruce Richardson if (enabled) 18499a2dd95SBruce Richardson s->internal_flags |= SERVICE_F_STATS_ENABLED; 18599a2dd95SBruce Richardson else 18699a2dd95SBruce Richardson s->internal_flags &= ~(SERVICE_F_STATS_ENABLED); 18799a2dd95SBruce Richardson 18899a2dd95SBruce Richardson return 0; 18999a2dd95SBruce Richardson } 19099a2dd95SBruce Richardson 19199a2dd95SBruce Richardson int32_t 19299a2dd95SBruce Richardson rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled) 19399a2dd95SBruce Richardson { 19499a2dd95SBruce Richardson struct rte_service_spec_impl *s; 19599a2dd95SBruce Richardson SERVICE_VALID_GET_OR_ERR_RET(id, s, 0); 19699a2dd95SBruce Richardson 19799a2dd95SBruce Richardson if (enabled) 19899a2dd95SBruce Richardson s->internal_flags |= SERVICE_F_START_CHECK; 19999a2dd95SBruce Richardson else 20099a2dd95SBruce Richardson s->internal_flags &= ~(SERVICE_F_START_CHECK); 20199a2dd95SBruce Richardson 20299a2dd95SBruce Richardson return 0; 20399a2dd95SBruce Richardson } 20499a2dd95SBruce Richardson 20599a2dd95SBruce Richardson uint32_t 20699a2dd95SBruce Richardson rte_service_get_count(void) 20799a2dd95SBruce Richardson { 20899a2dd95SBruce Richardson return rte_service_count; 20999a2dd95SBruce Richardson } 21099a2dd95SBruce Richardson 21199a2dd95SBruce Richardson int32_t 21299a2dd95SBruce Richardson rte_service_get_by_name(const char *name, uint32_t *service_id) 21399a2dd95SBruce Richardson { 21499a2dd95SBruce Richardson if (!service_id) 21599a2dd95SBruce Richardson return -EINVAL; 21699a2dd95SBruce Richardson 21799a2dd95SBruce Richardson int i; 21899a2dd95SBruce Richardson for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) { 219eb111cbdSMattias Rönnblom if (service_registered(i) && 22099a2dd95SBruce Richardson strcmp(name, rte_services[i].spec.name) == 0) { 22199a2dd95SBruce Richardson *service_id = i; 22299a2dd95SBruce Richardson return 0; 22399a2dd95SBruce Richardson } 22499a2dd95SBruce Richardson } 22599a2dd95SBruce Richardson 22699a2dd95SBruce Richardson return -ENODEV; 22799a2dd95SBruce Richardson } 22899a2dd95SBruce Richardson 22999a2dd95SBruce Richardson const char * 23099a2dd95SBruce Richardson rte_service_get_name(uint32_t id) 23199a2dd95SBruce Richardson { 23299a2dd95SBruce Richardson struct rte_service_spec_impl *s; 23399a2dd95SBruce Richardson SERVICE_VALID_GET_OR_ERR_RET(id, s, 0); 23499a2dd95SBruce Richardson return s->spec.name; 23599a2dd95SBruce Richardson } 23699a2dd95SBruce Richardson 23799a2dd95SBruce Richardson int32_t 23899a2dd95SBruce Richardson rte_service_probe_capability(uint32_t id, uint32_t capability) 23999a2dd95SBruce Richardson { 24099a2dd95SBruce Richardson struct rte_service_spec_impl *s; 24199a2dd95SBruce Richardson SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); 24299a2dd95SBruce Richardson return !!(s->spec.capabilities & capability); 24399a2dd95SBruce Richardson } 24499a2dd95SBruce Richardson 24599a2dd95SBruce Richardson int32_t 24699a2dd95SBruce Richardson rte_service_component_register(const struct rte_service_spec *spec, 24799a2dd95SBruce Richardson uint32_t *id_ptr) 24899a2dd95SBruce Richardson { 24999a2dd95SBruce Richardson uint32_t i; 25099a2dd95SBruce Richardson int32_t free_slot = -1; 25199a2dd95SBruce Richardson 25299a2dd95SBruce Richardson if (spec->callback == NULL || strlen(spec->name) == 0) 25399a2dd95SBruce Richardson return -EINVAL; 25499a2dd95SBruce Richardson 25599a2dd95SBruce Richardson for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) { 256eb111cbdSMattias Rönnblom if (!service_registered(i)) { 25799a2dd95SBruce Richardson free_slot = i; 25899a2dd95SBruce Richardson break; 25999a2dd95SBruce Richardson } 26099a2dd95SBruce Richardson } 26199a2dd95SBruce Richardson 26299a2dd95SBruce Richardson if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX)) 26399a2dd95SBruce Richardson return -ENOSPC; 26499a2dd95SBruce Richardson 26599a2dd95SBruce Richardson struct rte_service_spec_impl *s = &rte_services[free_slot]; 26699a2dd95SBruce Richardson s->spec = *spec; 26799a2dd95SBruce Richardson s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK; 26899a2dd95SBruce Richardson 26999a2dd95SBruce Richardson rte_service_count++; 27099a2dd95SBruce Richardson 27199a2dd95SBruce Richardson if (id_ptr) 27299a2dd95SBruce Richardson *id_ptr = free_slot; 27399a2dd95SBruce Richardson 274841e87dfSArnaud Fiorini rte_eal_trace_service_component_register(free_slot, spec->name); 275841e87dfSArnaud Fiorini 27699a2dd95SBruce Richardson return 0; 27799a2dd95SBruce Richardson } 27899a2dd95SBruce Richardson 27999a2dd95SBruce Richardson int32_t 28099a2dd95SBruce Richardson rte_service_component_unregister(uint32_t id) 28199a2dd95SBruce Richardson { 28299a2dd95SBruce Richardson struct rte_service_spec_impl *s; 28399a2dd95SBruce Richardson SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); 28499a2dd95SBruce Richardson 28599a2dd95SBruce Richardson rte_service_count--; 28699a2dd95SBruce Richardson 28799a2dd95SBruce Richardson s->internal_flags &= ~(SERVICE_F_REGISTERED); 28899a2dd95SBruce Richardson 289*b24bbaedSMattias Rönnblom unsigned int lcore_id; 290*b24bbaedSMattias Rönnblom struct core_state *cs; 29199a2dd95SBruce Richardson /* clear the run-bit in all cores */ 292*b24bbaedSMattias Rönnblom RTE_LCORE_VAR_FOREACH(lcore_id, cs, lcore_states) 293*b24bbaedSMattias Rönnblom rte_bitset_clear(cs->mapped_services, id); 29499a2dd95SBruce Richardson 29599a2dd95SBruce Richardson memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl)); 29699a2dd95SBruce Richardson 29799a2dd95SBruce Richardson return 0; 29899a2dd95SBruce Richardson } 29999a2dd95SBruce Richardson 30099a2dd95SBruce Richardson int32_t 30199a2dd95SBruce Richardson rte_service_component_runstate_set(uint32_t id, uint32_t runstate) 30299a2dd95SBruce Richardson { 30399a2dd95SBruce Richardson struct rte_service_spec_impl *s; 30499a2dd95SBruce Richardson SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); 30599a2dd95SBruce Richardson 30699a2dd95SBruce Richardson /* comp_runstate act as the guard variable. Use store-release 30799a2dd95SBruce Richardson * memory order. This synchronizes with load-acquire in 30899a2dd95SBruce Richardson * service_run and service_runstate_get function. 30999a2dd95SBruce Richardson */ 31099a2dd95SBruce Richardson if (runstate) 3112a7a42a5STyler Retzlaff rte_atomic_store_explicit(&s->comp_runstate, RUNSTATE_RUNNING, 3122a7a42a5STyler Retzlaff rte_memory_order_release); 31399a2dd95SBruce Richardson else 3142a7a42a5STyler Retzlaff rte_atomic_store_explicit(&s->comp_runstate, RUNSTATE_STOPPED, 3152a7a42a5STyler Retzlaff rte_memory_order_release); 31699a2dd95SBruce Richardson 31799a2dd95SBruce Richardson return 0; 31899a2dd95SBruce Richardson } 31999a2dd95SBruce Richardson 32099a2dd95SBruce Richardson int32_t 32199a2dd95SBruce Richardson rte_service_runstate_set(uint32_t id, uint32_t runstate) 32299a2dd95SBruce Richardson { 32399a2dd95SBruce Richardson struct rte_service_spec_impl *s; 32499a2dd95SBruce Richardson SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); 32599a2dd95SBruce Richardson 32699a2dd95SBruce Richardson /* app_runstate act as the guard variable. Use store-release 32799a2dd95SBruce Richardson * memory order. This synchronizes with load-acquire in 32899a2dd95SBruce Richardson * service_run runstate_get function. 32999a2dd95SBruce Richardson */ 33099a2dd95SBruce Richardson if (runstate) 3312a7a42a5STyler Retzlaff rte_atomic_store_explicit(&s->app_runstate, RUNSTATE_RUNNING, 3322a7a42a5STyler Retzlaff rte_memory_order_release); 33399a2dd95SBruce Richardson else 3342a7a42a5STyler Retzlaff rte_atomic_store_explicit(&s->app_runstate, RUNSTATE_STOPPED, 3352a7a42a5STyler Retzlaff rte_memory_order_release); 33699a2dd95SBruce Richardson 337841e87dfSArnaud Fiorini rte_eal_trace_service_runstate_set(id, runstate); 33899a2dd95SBruce Richardson return 0; 33999a2dd95SBruce Richardson } 34099a2dd95SBruce Richardson 34199a2dd95SBruce Richardson int32_t 34299a2dd95SBruce Richardson rte_service_runstate_get(uint32_t id) 34399a2dd95SBruce Richardson { 34499a2dd95SBruce Richardson struct rte_service_spec_impl *s; 34599a2dd95SBruce Richardson SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); 34699a2dd95SBruce Richardson 34799a2dd95SBruce Richardson /* comp_runstate and app_runstate act as the guard variables. 34899a2dd95SBruce Richardson * Use load-acquire memory order. This synchronizes with 34999a2dd95SBruce Richardson * store-release in service state set functions. 35099a2dd95SBruce Richardson */ 3512a7a42a5STyler Retzlaff if (rte_atomic_load_explicit(&s->comp_runstate, rte_memory_order_acquire) == 35299a2dd95SBruce Richardson RUNSTATE_RUNNING && 3532a7a42a5STyler Retzlaff rte_atomic_load_explicit(&s->app_runstate, rte_memory_order_acquire) == 35499a2dd95SBruce Richardson RUNSTATE_RUNNING) { 35599a2dd95SBruce Richardson int check_disabled = !(s->internal_flags & 35699a2dd95SBruce Richardson SERVICE_F_START_CHECK); 3572a7a42a5STyler Retzlaff int lcore_mapped = (rte_atomic_load_explicit(&s->num_mapped_cores, 3582a7a42a5STyler Retzlaff rte_memory_order_relaxed) > 0); 35999a2dd95SBruce Richardson 36099a2dd95SBruce Richardson return (check_disabled | lcore_mapped); 36199a2dd95SBruce Richardson } else 36299a2dd95SBruce Richardson return 0; 36399a2dd95SBruce Richardson 36499a2dd95SBruce Richardson } 36599a2dd95SBruce Richardson 366a37e053bSMattias Rönnblom static void 367a37e053bSMattias Rönnblom service_counter_add(RTE_ATOMIC(uint64_t) *counter, uint64_t operand) 368a37e053bSMattias Rönnblom { 369a37e053bSMattias Rönnblom /* The lcore service worker thread is the only writer, and 370a37e053bSMattias Rönnblom * thus only a non-atomic load and an atomic store is needed, 371a37e053bSMattias Rönnblom * and not the more expensive atomic add. 372a37e053bSMattias Rönnblom */ 373a37e053bSMattias Rönnblom uint64_t value; 374a37e053bSMattias Rönnblom 375a37e053bSMattias Rönnblom value = rte_atomic_load_explicit(counter, rte_memory_order_relaxed); 376a37e053bSMattias Rönnblom 377a37e053bSMattias Rönnblom rte_atomic_store_explicit(counter, value + operand, 378a37e053bSMattias Rönnblom rte_memory_order_relaxed); 379a37e053bSMattias Rönnblom } 380a37e053bSMattias Rönnblom 38199a2dd95SBruce Richardson static inline void 38299a2dd95SBruce Richardson service_runner_do_callback(struct rte_service_spec_impl *s, 38399a2dd95SBruce Richardson struct core_state *cs, uint32_t service_idx) 38499a2dd95SBruce Richardson { 385841e87dfSArnaud Fiorini rte_eal_trace_service_run_begin(service_idx, rte_lcore_id()); 38699a2dd95SBruce Richardson void *userdata = s->spec.callback_userdata; 38799a2dd95SBruce Richardson 38899a2dd95SBruce Richardson if (service_stats_enabled(s)) { 38999a2dd95SBruce Richardson uint64_t start = rte_rdtsc(); 390809bd244SMattias Rönnblom int rc = s->spec.callback(userdata); 391eb111cbdSMattias Rönnblom 392eb111cbdSMattias Rönnblom struct service_stats *service_stats = 393eb111cbdSMattias Rönnblom &cs->service_stats[service_idx]; 394eb111cbdSMattias Rönnblom 395a37e053bSMattias Rönnblom service_counter_add(&service_stats->calls, 1); 396a37e053bSMattias Rönnblom 397a37e053bSMattias Rönnblom if (rc == -EAGAIN) 398a37e053bSMattias Rönnblom service_counter_add(&service_stats->idle_calls, 1); 399a37e053bSMattias Rönnblom else if (rc != 0) 400a37e053bSMattias Rönnblom service_counter_add(&service_stats->error_calls, 1); 401a37e053bSMattias Rönnblom 402809bd244SMattias Rönnblom if (likely(rc != -EAGAIN)) { 403809bd244SMattias Rönnblom uint64_t end = rte_rdtsc(); 404809bd244SMattias Rönnblom uint64_t cycles = end - start; 405809bd244SMattias Rönnblom 406a37e053bSMattias Rönnblom service_counter_add(&cs->cycles, cycles); 407a37e053bSMattias Rönnblom service_counter_add(&service_stats->cycles, cycles); 408809bd244SMattias Rönnblom } 409841e87dfSArnaud Fiorini } else { 41099a2dd95SBruce Richardson s->spec.callback(userdata); 41199a2dd95SBruce Richardson } 412841e87dfSArnaud Fiorini rte_eal_trace_service_run_end(service_idx, rte_lcore_id()); 413841e87dfSArnaud Fiorini } 41499a2dd95SBruce Richardson 41599a2dd95SBruce Richardson 41699a2dd95SBruce Richardson /* Expects the service 's' is valid. */ 41799a2dd95SBruce Richardson static int32_t 41834ec2384SMattias Rönnblom service_run(uint32_t i, struct core_state *cs, const uint64_t *mapped_services, 41999a2dd95SBruce Richardson struct rte_service_spec_impl *s, uint32_t serialize_mt_unsafe) 42099a2dd95SBruce Richardson { 42199a2dd95SBruce Richardson if (!s) 42299a2dd95SBruce Richardson return -EINVAL; 42399a2dd95SBruce Richardson 42499a2dd95SBruce Richardson /* comp_runstate and app_runstate act as the guard variables. 42599a2dd95SBruce Richardson * Use load-acquire memory order. This synchronizes with 42699a2dd95SBruce Richardson * store-release in service state set functions. 42799a2dd95SBruce Richardson */ 4282a7a42a5STyler Retzlaff if (rte_atomic_load_explicit(&s->comp_runstate, rte_memory_order_acquire) != 42999a2dd95SBruce Richardson RUNSTATE_RUNNING || 4302a7a42a5STyler Retzlaff rte_atomic_load_explicit(&s->app_runstate, rte_memory_order_acquire) != 43199a2dd95SBruce Richardson RUNSTATE_RUNNING || 43234ec2384SMattias Rönnblom !rte_bitset_test(mapped_services, i)) { 43334ec2384SMattias Rönnblom rte_bitset_clear(cs->service_active_on_lcore, i); 43499a2dd95SBruce Richardson return -ENOEXEC; 43599a2dd95SBruce Richardson } 43699a2dd95SBruce Richardson 43734ec2384SMattias Rönnblom rte_bitset_set(cs->service_active_on_lcore, i); 43899a2dd95SBruce Richardson 43999a2dd95SBruce Richardson if ((service_mt_safe(s) == 0) && (serialize_mt_unsafe == 1)) { 44099a2dd95SBruce Richardson if (!rte_spinlock_trylock(&s->execute_lock)) 44199a2dd95SBruce Richardson return -EBUSY; 44299a2dd95SBruce Richardson 44399a2dd95SBruce Richardson service_runner_do_callback(s, cs, i); 44499a2dd95SBruce Richardson rte_spinlock_unlock(&s->execute_lock); 44599a2dd95SBruce Richardson } else 44699a2dd95SBruce Richardson service_runner_do_callback(s, cs, i); 44799a2dd95SBruce Richardson 44899a2dd95SBruce Richardson return 0; 44999a2dd95SBruce Richardson } 45099a2dd95SBruce Richardson 45199a2dd95SBruce Richardson int32_t 45299a2dd95SBruce Richardson rte_service_may_be_active(uint32_t id) 45399a2dd95SBruce Richardson { 45499a2dd95SBruce Richardson uint32_t ids[RTE_MAX_LCORE] = {0}; 45599a2dd95SBruce Richardson int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE); 45699a2dd95SBruce Richardson int i; 45799a2dd95SBruce Richardson 458eb111cbdSMattias Rönnblom if (!service_valid(id)) 45999a2dd95SBruce Richardson return -EINVAL; 46099a2dd95SBruce Richardson 46199a2dd95SBruce Richardson for (i = 0; i < lcore_count; i++) { 462*b24bbaedSMattias Rönnblom struct core_state *cs = 463*b24bbaedSMattias Rönnblom RTE_LCORE_VAR_LCORE(ids[i], lcore_states); 464*b24bbaedSMattias Rönnblom 465*b24bbaedSMattias Rönnblom if (rte_bitset_test(cs->service_active_on_lcore, id)) 46699a2dd95SBruce Richardson return 1; 46799a2dd95SBruce Richardson } 46899a2dd95SBruce Richardson 46999a2dd95SBruce Richardson return 0; 47099a2dd95SBruce Richardson } 47199a2dd95SBruce Richardson 47299a2dd95SBruce Richardson int32_t 47399a2dd95SBruce Richardson rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe) 47499a2dd95SBruce Richardson { 475*b24bbaedSMattias Rönnblom struct core_state *cs = RTE_LCORE_VAR(lcore_states); 47699a2dd95SBruce Richardson struct rte_service_spec_impl *s; 47799a2dd95SBruce Richardson 47899a2dd95SBruce Richardson SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); 47999a2dd95SBruce Richardson 48099a2dd95SBruce Richardson /* Increment num_mapped_cores to reflect that this core is 48199a2dd95SBruce Richardson * now mapped capable of running the service. 48299a2dd95SBruce Richardson */ 4832a7a42a5STyler Retzlaff rte_atomic_fetch_add_explicit(&s->num_mapped_cores, 1, rte_memory_order_relaxed); 48499a2dd95SBruce Richardson 48534ec2384SMattias Rönnblom RTE_BITSET_DECLARE(all_services, RTE_SERVICE_NUM_MAX); 48634ec2384SMattias Rönnblom rte_bitset_set_all(all_services, RTE_SERVICE_NUM_MAX); 48734ec2384SMattias Rönnblom int ret = service_run(id, cs, all_services, s, serialize_mt_unsafe); 48899a2dd95SBruce Richardson 4892a7a42a5STyler Retzlaff rte_atomic_fetch_sub_explicit(&s->num_mapped_cores, 1, rte_memory_order_relaxed); 49099a2dd95SBruce Richardson 49199a2dd95SBruce Richardson return ret; 49299a2dd95SBruce Richardson } 49399a2dd95SBruce Richardson 49499a2dd95SBruce Richardson static int32_t 49599a2dd95SBruce Richardson service_runner_func(void *arg) 49699a2dd95SBruce Richardson { 49799a2dd95SBruce Richardson RTE_SET_USED(arg); 498*b24bbaedSMattias Rönnblom struct core_state *cs = RTE_LCORE_VAR(lcore_states); 49999a2dd95SBruce Richardson 5002a7a42a5STyler Retzlaff rte_atomic_store_explicit(&cs->thread_active, 1, rte_memory_order_seq_cst); 50199a2dd95SBruce Richardson 50299a2dd95SBruce Richardson /* runstate act as the guard variable. Use load-acquire 50399a2dd95SBruce Richardson * memory order here to synchronize with store-release 50499a2dd95SBruce Richardson * in runstate update functions. 50599a2dd95SBruce Richardson */ 5062a7a42a5STyler Retzlaff while (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) == 50799a2dd95SBruce Richardson RUNSTATE_RUNNING) { 50834ec2384SMattias Rönnblom ssize_t id; 509eb111cbdSMattias Rönnblom 51034ec2384SMattias Rönnblom RTE_BITSET_FOREACH_SET(id, cs->mapped_services, RTE_SERVICE_NUM_MAX) { 51199a2dd95SBruce Richardson /* return value ignored as no change to code flow */ 51234ec2384SMattias Rönnblom service_run(id, cs, cs->mapped_services, service_get(id), 1); 51399a2dd95SBruce Richardson } 51499a2dd95SBruce Richardson 5152a7a42a5STyler Retzlaff rte_atomic_store_explicit(&cs->loops, cs->loops + 1, rte_memory_order_relaxed); 51699a2dd95SBruce Richardson } 51799a2dd95SBruce Richardson 518329280c5SErik Gabriel Carrillo /* Switch off this core for all services, to ensure that future 519329280c5SErik Gabriel Carrillo * calls to may_be_active() know this core is switched off. 520329280c5SErik Gabriel Carrillo */ 52134ec2384SMattias Rönnblom rte_bitset_clear_all(cs->service_active_on_lcore, RTE_SERVICE_NUM_MAX); 522329280c5SErik Gabriel Carrillo 52399a2dd95SBruce Richardson /* Use SEQ CST memory ordering to avoid any re-ordering around 52499a2dd95SBruce Richardson * this store, ensuring that once this store is visible, the service 52599a2dd95SBruce Richardson * lcore thread really is done in service cores code. 52699a2dd95SBruce Richardson */ 5272a7a42a5STyler Retzlaff rte_atomic_store_explicit(&cs->thread_active, 0, rte_memory_order_seq_cst); 52899a2dd95SBruce Richardson return 0; 52999a2dd95SBruce Richardson } 53099a2dd95SBruce Richardson 53199a2dd95SBruce Richardson int32_t 53299a2dd95SBruce Richardson rte_service_lcore_may_be_active(uint32_t lcore) 53399a2dd95SBruce Richardson { 534*b24bbaedSMattias Rönnblom struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); 535*b24bbaedSMattias Rönnblom 536*b24bbaedSMattias Rönnblom if (lcore >= RTE_MAX_LCORE || !cs->is_service_core) 53799a2dd95SBruce Richardson return -EINVAL; 53899a2dd95SBruce Richardson 53999a2dd95SBruce Richardson /* Load thread_active using ACQUIRE to avoid instructions dependent on 54099a2dd95SBruce Richardson * the result being re-ordered before this load completes. 54199a2dd95SBruce Richardson */ 542*b24bbaedSMattias Rönnblom return rte_atomic_load_explicit(&cs->thread_active, 5432a7a42a5STyler Retzlaff rte_memory_order_acquire); 54499a2dd95SBruce Richardson } 54599a2dd95SBruce Richardson 54699a2dd95SBruce Richardson int32_t 54799a2dd95SBruce Richardson rte_service_lcore_count(void) 54899a2dd95SBruce Richardson { 54999a2dd95SBruce Richardson int32_t count = 0; 550*b24bbaedSMattias Rönnblom 551*b24bbaedSMattias Rönnblom unsigned int lcore_id; 552*b24bbaedSMattias Rönnblom struct core_state *cs; 553*b24bbaedSMattias Rönnblom RTE_LCORE_VAR_FOREACH(lcore_id, cs, lcore_states) 554*b24bbaedSMattias Rönnblom count += cs->is_service_core; 555*b24bbaedSMattias Rönnblom 55699a2dd95SBruce Richardson return count; 55799a2dd95SBruce Richardson } 55899a2dd95SBruce Richardson 55999a2dd95SBruce Richardson int32_t 56099a2dd95SBruce Richardson rte_service_lcore_list(uint32_t array[], uint32_t n) 56199a2dd95SBruce Richardson { 56299a2dd95SBruce Richardson uint32_t count = rte_service_lcore_count(); 56399a2dd95SBruce Richardson if (count > n) 56499a2dd95SBruce Richardson return -ENOMEM; 56599a2dd95SBruce Richardson 56699a2dd95SBruce Richardson if (!array) 56799a2dd95SBruce Richardson return -EINVAL; 56899a2dd95SBruce Richardson 56999a2dd95SBruce Richardson uint32_t i; 57099a2dd95SBruce Richardson uint32_t idx = 0; 57199a2dd95SBruce Richardson for (i = 0; i < RTE_MAX_LCORE; i++) { 572*b24bbaedSMattias Rönnblom struct core_state *cs = 573*b24bbaedSMattias Rönnblom RTE_LCORE_VAR_LCORE(i, lcore_states); 57499a2dd95SBruce Richardson if (cs->is_service_core) { 57599a2dd95SBruce Richardson array[idx] = i; 57699a2dd95SBruce Richardson idx++; 57799a2dd95SBruce Richardson } 57899a2dd95SBruce Richardson } 57999a2dd95SBruce Richardson 58099a2dd95SBruce Richardson return count; 58199a2dd95SBruce Richardson } 58299a2dd95SBruce Richardson 58399a2dd95SBruce Richardson int32_t 58499a2dd95SBruce Richardson rte_service_lcore_count_services(uint32_t lcore) 58599a2dd95SBruce Richardson { 58699a2dd95SBruce Richardson if (lcore >= RTE_MAX_LCORE) 58799a2dd95SBruce Richardson return -EINVAL; 58899a2dd95SBruce Richardson 589*b24bbaedSMattias Rönnblom struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); 59099a2dd95SBruce Richardson if (!cs->is_service_core) 59199a2dd95SBruce Richardson return -ENOTSUP; 59299a2dd95SBruce Richardson 59334ec2384SMattias Rönnblom return rte_bitset_count_set(cs->mapped_services, RTE_SERVICE_NUM_MAX); 59499a2dd95SBruce Richardson } 59599a2dd95SBruce Richardson 59699a2dd95SBruce Richardson int32_t 59799a2dd95SBruce Richardson rte_service_start_with_defaults(void) 59899a2dd95SBruce Richardson { 59999a2dd95SBruce Richardson /* create a default mapping from cores to services, then start the 60099a2dd95SBruce Richardson * services to make them transparent to unaware applications. 60199a2dd95SBruce Richardson */ 60299a2dd95SBruce Richardson uint32_t i; 60399a2dd95SBruce Richardson int ret; 60499a2dd95SBruce Richardson uint32_t count = rte_service_get_count(); 60599a2dd95SBruce Richardson 60699a2dd95SBruce Richardson int32_t lcore_iter = 0; 60799a2dd95SBruce Richardson uint32_t ids[RTE_MAX_LCORE] = {0}; 60899a2dd95SBruce Richardson int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE); 60999a2dd95SBruce Richardson 61099a2dd95SBruce Richardson if (lcore_count == 0) 61199a2dd95SBruce Richardson return -ENOTSUP; 61299a2dd95SBruce Richardson 61399a2dd95SBruce Richardson for (i = 0; (int)i < lcore_count; i++) 61499a2dd95SBruce Richardson rte_service_lcore_start(ids[i]); 61599a2dd95SBruce Richardson 61699a2dd95SBruce Richardson for (i = 0; i < count; i++) { 61799a2dd95SBruce Richardson /* do 1:1 core mapping here, with each service getting 61899a2dd95SBruce Richardson * assigned a single core by default. Adding multiple services 61999a2dd95SBruce Richardson * should multiplex to a single core, or 1:1 if there are the 62099a2dd95SBruce Richardson * same amount of services as service-cores 62199a2dd95SBruce Richardson */ 62299a2dd95SBruce Richardson ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1); 62399a2dd95SBruce Richardson if (ret) 62499a2dd95SBruce Richardson return -ENODEV; 62599a2dd95SBruce Richardson 62699a2dd95SBruce Richardson lcore_iter++; 62799a2dd95SBruce Richardson if (lcore_iter >= lcore_count) 62899a2dd95SBruce Richardson lcore_iter = 0; 62999a2dd95SBruce Richardson 63099a2dd95SBruce Richardson ret = rte_service_runstate_set(i, 1); 63199a2dd95SBruce Richardson if (ret) 63299a2dd95SBruce Richardson return -ENOEXEC; 63399a2dd95SBruce Richardson } 63499a2dd95SBruce Richardson 63599a2dd95SBruce Richardson return 0; 63699a2dd95SBruce Richardson } 63799a2dd95SBruce Richardson 63899a2dd95SBruce Richardson static int32_t 63999a2dd95SBruce Richardson service_update(uint32_t sid, uint32_t lcore, uint32_t *set, uint32_t *enabled) 64099a2dd95SBruce Richardson { 641*b24bbaedSMattias Rönnblom struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); 642*b24bbaedSMattias Rönnblom 64399a2dd95SBruce Richardson /* validate ID, or return error value */ 644eb111cbdSMattias Rönnblom if (!service_valid(sid) || lcore >= RTE_MAX_LCORE || 645*b24bbaedSMattias Rönnblom !cs->is_service_core) 64699a2dd95SBruce Richardson return -EINVAL; 64799a2dd95SBruce Richardson 64899a2dd95SBruce Richardson if (set) { 649*b24bbaedSMattias Rönnblom bool lcore_mapped = rte_bitset_test(cs->mapped_services, sid); 65099a2dd95SBruce Richardson 65199a2dd95SBruce Richardson if (*set && !lcore_mapped) { 652*b24bbaedSMattias Rönnblom rte_bitset_set(cs->mapped_services, sid); 6532a7a42a5STyler Retzlaff rte_atomic_fetch_add_explicit(&rte_services[sid].num_mapped_cores, 6542a7a42a5STyler Retzlaff 1, rte_memory_order_relaxed); 65599a2dd95SBruce Richardson } 65699a2dd95SBruce Richardson if (!*set && lcore_mapped) { 657*b24bbaedSMattias Rönnblom rte_bitset_clear(cs->mapped_services, sid); 6582a7a42a5STyler Retzlaff rte_atomic_fetch_sub_explicit(&rte_services[sid].num_mapped_cores, 6592a7a42a5STyler Retzlaff 1, rte_memory_order_relaxed); 66099a2dd95SBruce Richardson } 66199a2dd95SBruce Richardson } 66299a2dd95SBruce Richardson 66399a2dd95SBruce Richardson if (enabled) 664*b24bbaedSMattias Rönnblom *enabled = rte_bitset_test(cs->mapped_services, sid); 66599a2dd95SBruce Richardson 66699a2dd95SBruce Richardson return 0; 66799a2dd95SBruce Richardson } 66899a2dd95SBruce Richardson 66999a2dd95SBruce Richardson int32_t 67099a2dd95SBruce Richardson rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled) 67199a2dd95SBruce Richardson { 67299a2dd95SBruce Richardson uint32_t on = enabled > 0; 673841e87dfSArnaud Fiorini rte_eal_trace_service_map_lcore(id, lcore, enabled); 67499a2dd95SBruce Richardson return service_update(id, lcore, &on, 0); 67599a2dd95SBruce Richardson } 67699a2dd95SBruce Richardson 67799a2dd95SBruce Richardson int32_t 67899a2dd95SBruce Richardson rte_service_map_lcore_get(uint32_t id, uint32_t lcore) 67999a2dd95SBruce Richardson { 68099a2dd95SBruce Richardson uint32_t enabled; 68199a2dd95SBruce Richardson int ret = service_update(id, lcore, 0, &enabled); 68299a2dd95SBruce Richardson if (ret == 0) 68399a2dd95SBruce Richardson return enabled; 68499a2dd95SBruce Richardson return ret; 68599a2dd95SBruce Richardson } 68699a2dd95SBruce Richardson 68799a2dd95SBruce Richardson static void 68899a2dd95SBruce Richardson set_lcore_state(uint32_t lcore, int32_t state) 68999a2dd95SBruce Richardson { 69099a2dd95SBruce Richardson /* mark core state in hugepage backed config */ 69199a2dd95SBruce Richardson struct rte_config *cfg = rte_eal_get_configuration(); 692*b24bbaedSMattias Rönnblom struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); 69399a2dd95SBruce Richardson cfg->lcore_role[lcore] = state; 69499a2dd95SBruce Richardson 69599a2dd95SBruce Richardson /* mark state in process local lcore_config */ 69699a2dd95SBruce Richardson lcore_config[lcore].core_role = state; 69799a2dd95SBruce Richardson 69899a2dd95SBruce Richardson /* update per-lcore optimized state tracking */ 699*b24bbaedSMattias Rönnblom cs->is_service_core = (state == ROLE_SERVICE); 700841e87dfSArnaud Fiorini 701841e87dfSArnaud Fiorini rte_eal_trace_service_lcore_state_change(lcore, state); 70299a2dd95SBruce Richardson } 70399a2dd95SBruce Richardson 70499a2dd95SBruce Richardson int32_t 70599a2dd95SBruce Richardson rte_service_lcore_reset_all(void) 70699a2dd95SBruce Richardson { 70734ec2384SMattias Rönnblom /* loop over cores, reset all mapped services */ 70899a2dd95SBruce Richardson uint32_t i; 70999a2dd95SBruce Richardson for (i = 0; i < RTE_MAX_LCORE; i++) { 710*b24bbaedSMattias Rönnblom struct core_state *cs = RTE_LCORE_VAR_LCORE(i, lcore_states); 711*b24bbaedSMattias Rönnblom 712*b24bbaedSMattias Rönnblom if (cs->is_service_core) { 713*b24bbaedSMattias Rönnblom rte_bitset_clear_all(cs->mapped_services, RTE_SERVICE_NUM_MAX); 71499a2dd95SBruce Richardson set_lcore_state(i, ROLE_RTE); 71599a2dd95SBruce Richardson /* runstate act as guard variable Use 71699a2dd95SBruce Richardson * store-release memory order here to synchronize 71799a2dd95SBruce Richardson * with load-acquire in runstate read functions. 71899a2dd95SBruce Richardson */ 719*b24bbaedSMattias Rönnblom rte_atomic_store_explicit(&cs->runstate, 7202a7a42a5STyler Retzlaff RUNSTATE_STOPPED, rte_memory_order_release); 72199a2dd95SBruce Richardson } 72299a2dd95SBruce Richardson } 72399a2dd95SBruce Richardson for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) 7242a7a42a5STyler Retzlaff rte_atomic_store_explicit(&rte_services[i].num_mapped_cores, 0, 7252a7a42a5STyler Retzlaff rte_memory_order_relaxed); 72699a2dd95SBruce Richardson 72799a2dd95SBruce Richardson return 0; 72899a2dd95SBruce Richardson } 72999a2dd95SBruce Richardson 73099a2dd95SBruce Richardson int32_t 73199a2dd95SBruce Richardson rte_service_lcore_add(uint32_t lcore) 73299a2dd95SBruce Richardson { 73399a2dd95SBruce Richardson if (lcore >= RTE_MAX_LCORE) 73499a2dd95SBruce Richardson return -EINVAL; 735*b24bbaedSMattias Rönnblom 736*b24bbaedSMattias Rönnblom struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); 737*b24bbaedSMattias Rönnblom if (cs->is_service_core) 73899a2dd95SBruce Richardson return -EALREADY; 73999a2dd95SBruce Richardson 74099a2dd95SBruce Richardson set_lcore_state(lcore, ROLE_SERVICE); 74199a2dd95SBruce Richardson 74299a2dd95SBruce Richardson /* ensure that after adding a core the mask and state are defaults */ 743*b24bbaedSMattias Rönnblom rte_bitset_clear_all(cs->mapped_services, RTE_SERVICE_NUM_MAX); 74499a2dd95SBruce Richardson /* Use store-release memory order here to synchronize with 74599a2dd95SBruce Richardson * load-acquire in runstate read functions. 74699a2dd95SBruce Richardson */ 747*b24bbaedSMattias Rönnblom rte_atomic_store_explicit(&cs->runstate, RUNSTATE_STOPPED, 7482a7a42a5STyler Retzlaff rte_memory_order_release); 74999a2dd95SBruce Richardson 75099a2dd95SBruce Richardson return rte_eal_wait_lcore(lcore); 75199a2dd95SBruce Richardson } 75299a2dd95SBruce Richardson 75399a2dd95SBruce Richardson int32_t 75499a2dd95SBruce Richardson rte_service_lcore_del(uint32_t lcore) 75599a2dd95SBruce Richardson { 75699a2dd95SBruce Richardson if (lcore >= RTE_MAX_LCORE) 75799a2dd95SBruce Richardson return -EINVAL; 75899a2dd95SBruce Richardson 759*b24bbaedSMattias Rönnblom struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); 76099a2dd95SBruce Richardson if (!cs->is_service_core) 76199a2dd95SBruce Richardson return -EINVAL; 76299a2dd95SBruce Richardson 76399a2dd95SBruce Richardson /* runstate act as the guard variable. Use load-acquire 76499a2dd95SBruce Richardson * memory order here to synchronize with store-release 76599a2dd95SBruce Richardson * in runstate update functions. 76699a2dd95SBruce Richardson */ 7672a7a42a5STyler Retzlaff if (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) != 76899a2dd95SBruce Richardson RUNSTATE_STOPPED) 76999a2dd95SBruce Richardson return -EBUSY; 77099a2dd95SBruce Richardson 77199a2dd95SBruce Richardson set_lcore_state(lcore, ROLE_RTE); 77299a2dd95SBruce Richardson 77399a2dd95SBruce Richardson rte_smp_wmb(); 77499a2dd95SBruce Richardson return 0; 77599a2dd95SBruce Richardson } 77699a2dd95SBruce Richardson 77799a2dd95SBruce Richardson int32_t 77899a2dd95SBruce Richardson rte_service_lcore_start(uint32_t lcore) 77999a2dd95SBruce Richardson { 78099a2dd95SBruce Richardson if (lcore >= RTE_MAX_LCORE) 78199a2dd95SBruce Richardson return -EINVAL; 78299a2dd95SBruce Richardson 783*b24bbaedSMattias Rönnblom struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); 78499a2dd95SBruce Richardson if (!cs->is_service_core) 78599a2dd95SBruce Richardson return -EINVAL; 78699a2dd95SBruce Richardson 78799a2dd95SBruce Richardson /* runstate act as the guard variable. Use load-acquire 78899a2dd95SBruce Richardson * memory order here to synchronize with store-release 78999a2dd95SBruce Richardson * in runstate update functions. 79099a2dd95SBruce Richardson */ 7912a7a42a5STyler Retzlaff if (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) == 79299a2dd95SBruce Richardson RUNSTATE_RUNNING) 79399a2dd95SBruce Richardson return -EALREADY; 79499a2dd95SBruce Richardson 79599a2dd95SBruce Richardson /* set core to run state first, and then launch otherwise it will 79699a2dd95SBruce Richardson * return immediately as runstate keeps it in the service poll loop 79799a2dd95SBruce Richardson */ 79899a2dd95SBruce Richardson /* Use load-acquire memory order here to synchronize with 79999a2dd95SBruce Richardson * store-release in runstate update functions. 80099a2dd95SBruce Richardson */ 8012a7a42a5STyler Retzlaff rte_atomic_store_explicit(&cs->runstate, RUNSTATE_RUNNING, rte_memory_order_release); 80299a2dd95SBruce Richardson 803841e87dfSArnaud Fiorini rte_eal_trace_service_lcore_start(lcore); 804841e87dfSArnaud Fiorini 80599a2dd95SBruce Richardson int ret = rte_eal_remote_launch(service_runner_func, 0, lcore); 80699a2dd95SBruce Richardson /* returns -EBUSY if the core is already launched, 0 on success */ 80799a2dd95SBruce Richardson return ret; 80899a2dd95SBruce Richardson } 80999a2dd95SBruce Richardson 81099a2dd95SBruce Richardson int32_t 81199a2dd95SBruce Richardson rte_service_lcore_stop(uint32_t lcore) 81299a2dd95SBruce Richardson { 813*b24bbaedSMattias Rönnblom struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); 814*b24bbaedSMattias Rönnblom 81599a2dd95SBruce Richardson if (lcore >= RTE_MAX_LCORE) 81699a2dd95SBruce Richardson return -EINVAL; 81799a2dd95SBruce Richardson 81899a2dd95SBruce Richardson /* runstate act as the guard variable. Use load-acquire 81999a2dd95SBruce Richardson * memory order here to synchronize with store-release 82099a2dd95SBruce Richardson * in runstate update functions. 82199a2dd95SBruce Richardson */ 822*b24bbaedSMattias Rönnblom if (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) == 82399a2dd95SBruce Richardson RUNSTATE_STOPPED) 82499a2dd95SBruce Richardson return -EALREADY; 82599a2dd95SBruce Richardson 82699a2dd95SBruce Richardson uint32_t i; 8276550113bSHarry van Haaren 82899a2dd95SBruce Richardson for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) { 82934ec2384SMattias Rönnblom bool enabled = rte_bitset_test(cs->mapped_services, i); 83034ec2384SMattias Rönnblom bool service_running = rte_service_runstate_get(i); 83134ec2384SMattias Rönnblom bool only_core = (1 == 8322a7a42a5STyler Retzlaff rte_atomic_load_explicit(&rte_services[i].num_mapped_cores, 8332a7a42a5STyler Retzlaff rte_memory_order_relaxed)); 83499a2dd95SBruce Richardson 83599a2dd95SBruce Richardson /* if the core is mapped, and the service is running, and this 83699a2dd95SBruce Richardson * is the only core that is mapped, the service would cease to 83799a2dd95SBruce Richardson * run if this core stopped, so fail instead. 83899a2dd95SBruce Richardson */ 83999a2dd95SBruce Richardson if (enabled && service_running && only_core) 84099a2dd95SBruce Richardson return -EBUSY; 84199a2dd95SBruce Richardson } 84299a2dd95SBruce Richardson 84399a2dd95SBruce Richardson /* Use store-release memory order here to synchronize with 84499a2dd95SBruce Richardson * load-acquire in runstate read functions. 84599a2dd95SBruce Richardson */ 846*b24bbaedSMattias Rönnblom rte_atomic_store_explicit(&cs->runstate, RUNSTATE_STOPPED, 8472a7a42a5STyler Retzlaff rte_memory_order_release); 84899a2dd95SBruce Richardson 849841e87dfSArnaud Fiorini rte_eal_trace_service_lcore_stop(lcore); 850841e87dfSArnaud Fiorini 85199a2dd95SBruce Richardson return 0; 85299a2dd95SBruce Richardson } 85399a2dd95SBruce Richardson 854eb111cbdSMattias Rönnblom static uint64_t 855eb111cbdSMattias Rönnblom lcore_attr_get_loops(unsigned int lcore) 856eb111cbdSMattias Rönnblom { 857*b24bbaedSMattias Rönnblom struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); 858eb111cbdSMattias Rönnblom 8592a7a42a5STyler Retzlaff return rte_atomic_load_explicit(&cs->loops, rte_memory_order_relaxed); 860eb111cbdSMattias Rönnblom } 861eb111cbdSMattias Rönnblom 862eb111cbdSMattias Rönnblom static uint64_t 863b54ade8fSMattias Rönnblom lcore_attr_get_cycles(unsigned int lcore) 864b54ade8fSMattias Rönnblom { 865*b24bbaedSMattias Rönnblom struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); 866b54ade8fSMattias Rönnblom 8672a7a42a5STyler Retzlaff return rte_atomic_load_explicit(&cs->cycles, rte_memory_order_relaxed); 868b54ade8fSMattias Rönnblom } 869b54ade8fSMattias Rönnblom 870b54ade8fSMattias Rönnblom static uint64_t 871eb111cbdSMattias Rönnblom lcore_attr_get_service_calls(uint32_t service_id, unsigned int lcore) 872eb111cbdSMattias Rönnblom { 873*b24bbaedSMattias Rönnblom struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); 874eb111cbdSMattias Rönnblom 8752a7a42a5STyler Retzlaff return rte_atomic_load_explicit(&cs->service_stats[service_id].calls, 8762a7a42a5STyler Retzlaff rte_memory_order_relaxed); 877eb111cbdSMattias Rönnblom } 878eb111cbdSMattias Rönnblom 879eb111cbdSMattias Rönnblom static uint64_t 880a37e053bSMattias Rönnblom lcore_attr_get_service_idle_calls(uint32_t service_id, unsigned int lcore) 881a37e053bSMattias Rönnblom { 882a37e053bSMattias Rönnblom struct core_state *cs = &lcore_states[lcore]; 883a37e053bSMattias Rönnblom 884a37e053bSMattias Rönnblom return rte_atomic_load_explicit(&cs->service_stats[service_id].idle_calls, 885a37e053bSMattias Rönnblom rte_memory_order_relaxed); 886a37e053bSMattias Rönnblom } 887a37e053bSMattias Rönnblom 888a37e053bSMattias Rönnblom static uint64_t 889a37e053bSMattias Rönnblom lcore_attr_get_service_error_calls(uint32_t service_id, unsigned int lcore) 890a37e053bSMattias Rönnblom { 891a37e053bSMattias Rönnblom struct core_state *cs = &lcore_states[lcore]; 892a37e053bSMattias Rönnblom 893a37e053bSMattias Rönnblom return rte_atomic_load_explicit(&cs->service_stats[service_id].error_calls, 894a37e053bSMattias Rönnblom rte_memory_order_relaxed); 895a37e053bSMattias Rönnblom } 896a37e053bSMattias Rönnblom 897a37e053bSMattias Rönnblom static uint64_t 898eb111cbdSMattias Rönnblom lcore_attr_get_service_cycles(uint32_t service_id, unsigned int lcore) 899eb111cbdSMattias Rönnblom { 900*b24bbaedSMattias Rönnblom struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); 901eb111cbdSMattias Rönnblom 9022a7a42a5STyler Retzlaff return rte_atomic_load_explicit(&cs->service_stats[service_id].cycles, 9032a7a42a5STyler Retzlaff rte_memory_order_relaxed); 904eb111cbdSMattias Rönnblom } 905eb111cbdSMattias Rönnblom 906eb111cbdSMattias Rönnblom typedef uint64_t (*lcore_attr_get_fun)(uint32_t service_id, 907eb111cbdSMattias Rönnblom unsigned int lcore); 908eb111cbdSMattias Rönnblom 909eb111cbdSMattias Rönnblom static uint64_t 910eb111cbdSMattias Rönnblom attr_get(uint32_t id, lcore_attr_get_fun lcore_attr_get) 911eb111cbdSMattias Rönnblom { 912eb111cbdSMattias Rönnblom unsigned int lcore; 913eb111cbdSMattias Rönnblom uint64_t sum = 0; 914eb111cbdSMattias Rönnblom 915eb111cbdSMattias Rönnblom for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) { 916*b24bbaedSMattias Rönnblom struct core_state *cs = 917*b24bbaedSMattias Rönnblom RTE_LCORE_VAR_LCORE(lcore, lcore_states); 918*b24bbaedSMattias Rönnblom 919*b24bbaedSMattias Rönnblom if (cs->is_service_core) 920eb111cbdSMattias Rönnblom sum += lcore_attr_get(id, lcore); 921eb111cbdSMattias Rönnblom } 922eb111cbdSMattias Rönnblom 923eb111cbdSMattias Rönnblom return sum; 924eb111cbdSMattias Rönnblom } 925eb111cbdSMattias Rönnblom 926eb111cbdSMattias Rönnblom static uint64_t 927eb111cbdSMattias Rönnblom attr_get_service_calls(uint32_t service_id) 928eb111cbdSMattias Rönnblom { 929eb111cbdSMattias Rönnblom return attr_get(service_id, lcore_attr_get_service_calls); 930eb111cbdSMattias Rönnblom } 931eb111cbdSMattias Rönnblom 932eb111cbdSMattias Rönnblom static uint64_t 933a37e053bSMattias Rönnblom attr_get_service_idle_calls(uint32_t service_id) 934a37e053bSMattias Rönnblom { 935a37e053bSMattias Rönnblom return attr_get(service_id, lcore_attr_get_service_idle_calls); 936a37e053bSMattias Rönnblom } 937a37e053bSMattias Rönnblom 938a37e053bSMattias Rönnblom static uint64_t 939a37e053bSMattias Rönnblom attr_get_service_error_calls(uint32_t service_id) 940a37e053bSMattias Rönnblom { 941a37e053bSMattias Rönnblom return attr_get(service_id, lcore_attr_get_service_error_calls); 942a37e053bSMattias Rönnblom } 943a37e053bSMattias Rönnblom 944a37e053bSMattias Rönnblom static uint64_t 945eb111cbdSMattias Rönnblom attr_get_service_cycles(uint32_t service_id) 946eb111cbdSMattias Rönnblom { 947eb111cbdSMattias Rönnblom return attr_get(service_id, lcore_attr_get_service_cycles); 948eb111cbdSMattias Rönnblom } 949eb111cbdSMattias Rönnblom 95099a2dd95SBruce Richardson int32_t 95199a2dd95SBruce Richardson rte_service_attr_get(uint32_t id, uint32_t attr_id, uint64_t *attr_value) 95299a2dd95SBruce Richardson { 953eb111cbdSMattias Rönnblom if (!service_valid(id)) 954eb111cbdSMattias Rönnblom return -EINVAL; 95599a2dd95SBruce Richardson 95699a2dd95SBruce Richardson if (!attr_value) 95799a2dd95SBruce Richardson return -EINVAL; 95899a2dd95SBruce Richardson 95999a2dd95SBruce Richardson switch (attr_id) { 96099a2dd95SBruce Richardson case RTE_SERVICE_ATTR_CALL_COUNT: 961eb111cbdSMattias Rönnblom *attr_value = attr_get_service_calls(id); 962eb111cbdSMattias Rönnblom return 0; 963a37e053bSMattias Rönnblom case RTE_SERVICE_ATTR_IDLE_CALL_COUNT: 964a37e053bSMattias Rönnblom *attr_value = attr_get_service_idle_calls(id); 965a37e053bSMattias Rönnblom return 0; 966a37e053bSMattias Rönnblom case RTE_SERVICE_ATTR_ERROR_CALL_COUNT: 967a37e053bSMattias Rönnblom *attr_value = attr_get_service_error_calls(id); 968a37e053bSMattias Rönnblom return 0; 969eb111cbdSMattias Rönnblom case RTE_SERVICE_ATTR_CYCLES: 970eb111cbdSMattias Rönnblom *attr_value = attr_get_service_cycles(id); 97199a2dd95SBruce Richardson return 0; 97299a2dd95SBruce Richardson default: 97399a2dd95SBruce Richardson return -EINVAL; 97499a2dd95SBruce Richardson } 97599a2dd95SBruce Richardson } 97699a2dd95SBruce Richardson 97799a2dd95SBruce Richardson int32_t 97899a2dd95SBruce Richardson rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id, 97999a2dd95SBruce Richardson uint64_t *attr_value) 98099a2dd95SBruce Richardson { 981*b24bbaedSMattias Rönnblom struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); 98299a2dd95SBruce Richardson 98399a2dd95SBruce Richardson if (lcore >= RTE_MAX_LCORE || !attr_value) 98499a2dd95SBruce Richardson return -EINVAL; 98599a2dd95SBruce Richardson 98699a2dd95SBruce Richardson if (!cs->is_service_core) 98799a2dd95SBruce Richardson return -ENOTSUP; 98899a2dd95SBruce Richardson 98999a2dd95SBruce Richardson switch (attr_id) { 99099a2dd95SBruce Richardson case RTE_SERVICE_LCORE_ATTR_LOOPS: 991eb111cbdSMattias Rönnblom *attr_value = lcore_attr_get_loops(lcore); 99299a2dd95SBruce Richardson return 0; 993b54ade8fSMattias Rönnblom case RTE_SERVICE_LCORE_ATTR_CYCLES: 994b54ade8fSMattias Rönnblom *attr_value = lcore_attr_get_cycles(lcore); 995b54ade8fSMattias Rönnblom return 0; 99699a2dd95SBruce Richardson default: 99799a2dd95SBruce Richardson return -EINVAL; 99899a2dd95SBruce Richardson } 99999a2dd95SBruce Richardson } 100099a2dd95SBruce Richardson 100199a2dd95SBruce Richardson int32_t 100299a2dd95SBruce Richardson rte_service_attr_reset_all(uint32_t id) 100399a2dd95SBruce Richardson { 1004eb111cbdSMattias Rönnblom unsigned int lcore; 100599a2dd95SBruce Richardson 1006eb111cbdSMattias Rönnblom if (!service_valid(id)) 1007eb111cbdSMattias Rönnblom return -EINVAL; 1008eb111cbdSMattias Rönnblom 1009eb111cbdSMattias Rönnblom for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) { 1010*b24bbaedSMattias Rönnblom struct core_state *cs = 1011*b24bbaedSMattias Rönnblom RTE_LCORE_VAR_LCORE(lcore, lcore_states); 1012eb111cbdSMattias Rönnblom 1013eb111cbdSMattias Rönnblom cs->service_stats[id] = (struct service_stats) {}; 1014eb111cbdSMattias Rönnblom } 1015eb111cbdSMattias Rönnblom 101699a2dd95SBruce Richardson return 0; 101799a2dd95SBruce Richardson } 101899a2dd95SBruce Richardson 101999a2dd95SBruce Richardson int32_t 102099a2dd95SBruce Richardson rte_service_lcore_attr_reset_all(uint32_t lcore) 102199a2dd95SBruce Richardson { 1022*b24bbaedSMattias Rönnblom struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); 102399a2dd95SBruce Richardson 102499a2dd95SBruce Richardson if (lcore >= RTE_MAX_LCORE) 102599a2dd95SBruce Richardson return -EINVAL; 102699a2dd95SBruce Richardson 102799a2dd95SBruce Richardson if (!cs->is_service_core) 102899a2dd95SBruce Richardson return -ENOTSUP; 102999a2dd95SBruce Richardson 103099a2dd95SBruce Richardson cs->loops = 0; 103199a2dd95SBruce Richardson 103299a2dd95SBruce Richardson return 0; 103399a2dd95SBruce Richardson } 103499a2dd95SBruce Richardson 103599a2dd95SBruce Richardson static void 1036eb111cbdSMattias Rönnblom service_dump_one(FILE *f, uint32_t id) 103799a2dd95SBruce Richardson { 1038eb111cbdSMattias Rönnblom struct rte_service_spec_impl *s; 1039eb111cbdSMattias Rönnblom uint64_t service_calls; 1040eb111cbdSMattias Rönnblom uint64_t service_cycles; 104199a2dd95SBruce Richardson 1042eb111cbdSMattias Rönnblom service_calls = attr_get_service_calls(id); 1043eb111cbdSMattias Rönnblom service_cycles = attr_get_service_cycles(id); 1044eb111cbdSMattias Rönnblom 1045eb111cbdSMattias Rönnblom /* avoid divide by zero */ 1046eb111cbdSMattias Rönnblom if (service_calls == 0) 1047eb111cbdSMattias Rönnblom service_calls = 1; 1048eb111cbdSMattias Rönnblom 1049eb111cbdSMattias Rönnblom s = service_get(id); 1050eb111cbdSMattias Rönnblom 105199a2dd95SBruce Richardson fprintf(f, " %s: stats %d\tcalls %"PRIu64"\tcycles %" 105299a2dd95SBruce Richardson PRIu64"\tavg: %"PRIu64"\n", 1053eb111cbdSMattias Rönnblom s->spec.name, service_stats_enabled(s), service_calls, 1054eb111cbdSMattias Rönnblom service_cycles, service_cycles / service_calls); 105599a2dd95SBruce Richardson } 105699a2dd95SBruce Richardson 105799a2dd95SBruce Richardson static void 105899a2dd95SBruce Richardson service_dump_calls_per_lcore(FILE *f, uint32_t lcore) 105999a2dd95SBruce Richardson { 106099a2dd95SBruce Richardson uint32_t i; 1061*b24bbaedSMattias Rönnblom struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); 106299a2dd95SBruce Richardson 106399a2dd95SBruce Richardson fprintf(f, "%02d\t", lcore); 106499a2dd95SBruce Richardson for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) { 1065eb111cbdSMattias Rönnblom if (!service_registered(i)) 106699a2dd95SBruce Richardson continue; 1067eb111cbdSMattias Rönnblom fprintf(f, "%"PRIu64"\t", cs->service_stats[i].calls); 106899a2dd95SBruce Richardson } 106999a2dd95SBruce Richardson fprintf(f, "\n"); 107099a2dd95SBruce Richardson } 107199a2dd95SBruce Richardson 107299a2dd95SBruce Richardson int32_t 107399a2dd95SBruce Richardson rte_service_dump(FILE *f, uint32_t id) 107499a2dd95SBruce Richardson { 107599a2dd95SBruce Richardson uint32_t i; 107699a2dd95SBruce Richardson int print_one = (id != UINT32_MAX); 107799a2dd95SBruce Richardson 107899a2dd95SBruce Richardson /* print only the specified service */ 107999a2dd95SBruce Richardson if (print_one) { 108099a2dd95SBruce Richardson struct rte_service_spec_impl *s; 108199a2dd95SBruce Richardson SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); 108299a2dd95SBruce Richardson fprintf(f, "Service %s Summary\n", s->spec.name); 1083eb111cbdSMattias Rönnblom service_dump_one(f, id); 108499a2dd95SBruce Richardson return 0; 108599a2dd95SBruce Richardson } 108699a2dd95SBruce Richardson 108799a2dd95SBruce Richardson /* print all services, as UINT32_MAX was passed as id */ 108899a2dd95SBruce Richardson fprintf(f, "Services Summary\n"); 108999a2dd95SBruce Richardson for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) { 1090eb111cbdSMattias Rönnblom if (!service_registered(i)) 109199a2dd95SBruce Richardson continue; 1092eb111cbdSMattias Rönnblom service_dump_one(f, i); 109399a2dd95SBruce Richardson } 109499a2dd95SBruce Richardson 109599a2dd95SBruce Richardson fprintf(f, "Service Cores Summary\n"); 109699a2dd95SBruce Richardson for (i = 0; i < RTE_MAX_LCORE; i++) { 109799a2dd95SBruce Richardson if (lcore_config[i].core_role != ROLE_SERVICE) 109899a2dd95SBruce Richardson continue; 109999a2dd95SBruce Richardson 110099a2dd95SBruce Richardson service_dump_calls_per_lcore(f, i); 110199a2dd95SBruce Richardson } 110299a2dd95SBruce Richardson 110399a2dd95SBruce Richardson return 0; 110499a2dd95SBruce Richardson } 1105