xref: /dpdk/lib/eal/common/rte_service.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <inttypes.h>
7 #include <string.h>
8 
9 #include <rte_service.h>
10 #include <rte_service_component.h>
11 
12 #include <eal_trace_internal.h>
13 #include <rte_lcore.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
17 #include <rte_atomic.h>
18 #include <rte_malloc.h>
19 #include <rte_spinlock.h>
20 #include <rte_trace_point.h>
21 
22 #include "eal_private.h"
23 
24 #define RTE_SERVICE_NUM_MAX 64
25 
26 #define SERVICE_F_REGISTERED    (1 << 0)
27 #define SERVICE_F_STATS_ENABLED (1 << 1)
28 #define SERVICE_F_START_CHECK   (1 << 2)
29 
30 /* runstates for services and lcores, denoting if they are active or not */
31 #define RUNSTATE_STOPPED 0
32 #define RUNSTATE_RUNNING 1
33 
34 /* internal representation of a service */
35 struct __rte_cache_aligned rte_service_spec_impl {
36 	/* public part of the struct */
37 	struct rte_service_spec spec;
38 
39 	/* spin lock that when set indicates a service core is currently
40 	 * running this service callback. When not set, a core may take the
41 	 * lock and then run the service callback.
42 	 */
43 	rte_spinlock_t execute_lock;
44 
45 	/* API set/get-able variables */
46 	RTE_ATOMIC(int8_t) app_runstate;
47 	RTE_ATOMIC(int8_t) comp_runstate;
48 	uint8_t internal_flags;
49 
50 	/* per service statistics */
51 	/* Indicates how many cores the service is mapped to run on.
52 	 * It does not indicate the number of cores the service is running
53 	 * on currently.
54 	 */
55 	RTE_ATOMIC(uint32_t) num_mapped_cores;
56 };
57 
58 struct service_stats {
59 	RTE_ATOMIC(uint64_t) calls;
60 	RTE_ATOMIC(uint64_t) idle_calls;
61 	RTE_ATOMIC(uint64_t) error_calls;
62 	RTE_ATOMIC(uint64_t) cycles;
63 };
64 
65 /* the internal values of a service core */
66 struct __rte_cache_aligned core_state {
67 	/* map of services IDs are run on this core */
68 	uint64_t service_mask;
69 	RTE_ATOMIC(uint8_t) runstate; /* running or stopped */
70 	RTE_ATOMIC(uint8_t) thread_active; /* indicates when thread is in service_run() */
71 	uint8_t is_service_core; /* set if core is currently a service core */
72 	uint8_t service_active_on_lcore[RTE_SERVICE_NUM_MAX];
73 	RTE_ATOMIC(uint64_t) loops;
74 	RTE_ATOMIC(uint64_t) cycles;
75 	struct service_stats service_stats[RTE_SERVICE_NUM_MAX];
76 };
77 
78 static uint32_t rte_service_count;
79 static struct rte_service_spec_impl *rte_services;
80 static struct core_state *lcore_states;
81 static uint32_t rte_service_library_initialized;
82 
83 int32_t
84 rte_service_init(void)
85 {
86 	/* Hard limit due to the use of an uint64_t-based bitmask (and the
87 	 * clzl intrinsic).
88 	 */
89 	RTE_BUILD_BUG_ON(RTE_SERVICE_NUM_MAX > 64);
90 
91 	if (rte_service_library_initialized) {
92 		EAL_LOG(NOTICE,
93 			"service library init() called, init flag %d",
94 			rte_service_library_initialized);
95 		return -EALREADY;
96 	}
97 
98 	rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
99 			sizeof(struct rte_service_spec_impl),
100 			RTE_CACHE_LINE_SIZE);
101 	if (!rte_services) {
102 		EAL_LOG(ERR, "error allocating rte services array");
103 		goto fail_mem;
104 	}
105 
106 	lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
107 			sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
108 	if (!lcore_states) {
109 		EAL_LOG(ERR, "error allocating core states array");
110 		goto fail_mem;
111 	}
112 
113 	int i;
114 	struct rte_config *cfg = rte_eal_get_configuration();
115 	for (i = 0; i < RTE_MAX_LCORE; i++) {
116 		if (lcore_config[i].core_role == ROLE_SERVICE) {
117 			if ((unsigned int)i == cfg->main_lcore)
118 				continue;
119 			rte_service_lcore_add(i);
120 		}
121 	}
122 
123 	rte_service_library_initialized = 1;
124 	return 0;
125 fail_mem:
126 	rte_free(rte_services);
127 	rte_free(lcore_states);
128 	return -ENOMEM;
129 }
130 
131 void
132 rte_service_finalize(void)
133 {
134 	if (!rte_service_library_initialized)
135 		return;
136 
137 	rte_service_lcore_reset_all();
138 	rte_eal_mp_wait_lcore();
139 
140 	rte_free(rte_services);
141 	rte_free(lcore_states);
142 
143 	rte_service_library_initialized = 0;
144 }
145 
146 static inline bool
147 service_registered(uint32_t id)
148 {
149 	return rte_services[id].internal_flags & SERVICE_F_REGISTERED;
150 }
151 
152 static inline bool
153 service_valid(uint32_t id)
154 {
155 	return id < RTE_SERVICE_NUM_MAX && service_registered(id);
156 }
157 
158 static struct rte_service_spec_impl *
159 service_get(uint32_t id)
160 {
161 	return &rte_services[id];
162 }
163 
164 /* validate ID and retrieve service pointer, or return error value */
165 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do {          \
166 	if (!service_valid(id))                                         \
167 		return retval;                                          \
168 	service = &rte_services[id];                                    \
169 } while (0)
170 
171 /* returns 1 if statistics should be collected for service
172  * Returns 0 if statistics should not be collected for service
173  */
174 static inline int
175 service_stats_enabled(struct rte_service_spec_impl *impl)
176 {
177 	return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
178 }
179 
180 static inline int
181 service_mt_safe(struct rte_service_spec_impl *s)
182 {
183 	return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
184 }
185 
186 int32_t
187 rte_service_set_stats_enable(uint32_t id, int32_t enabled)
188 {
189 	struct rte_service_spec_impl *s;
190 	SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
191 
192 	if (enabled)
193 		s->internal_flags |= SERVICE_F_STATS_ENABLED;
194 	else
195 		s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
196 
197 	return 0;
198 }
199 
200 int32_t
201 rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
202 {
203 	struct rte_service_spec_impl *s;
204 	SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
205 
206 	if (enabled)
207 		s->internal_flags |= SERVICE_F_START_CHECK;
208 	else
209 		s->internal_flags &= ~(SERVICE_F_START_CHECK);
210 
211 	return 0;
212 }
213 
214 uint32_t
215 rte_service_get_count(void)
216 {
217 	return rte_service_count;
218 }
219 
220 int32_t
221 rte_service_get_by_name(const char *name, uint32_t *service_id)
222 {
223 	if (!service_id)
224 		return -EINVAL;
225 
226 	int i;
227 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
228 		if (service_registered(i) &&
229 				strcmp(name, rte_services[i].spec.name) == 0) {
230 			*service_id = i;
231 			return 0;
232 		}
233 	}
234 
235 	return -ENODEV;
236 }
237 
238 const char *
239 rte_service_get_name(uint32_t id)
240 {
241 	struct rte_service_spec_impl *s;
242 	SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
243 	return s->spec.name;
244 }
245 
246 int32_t
247 rte_service_probe_capability(uint32_t id, uint32_t capability)
248 {
249 	struct rte_service_spec_impl *s;
250 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
251 	return !!(s->spec.capabilities & capability);
252 }
253 
254 int32_t
255 rte_service_component_register(const struct rte_service_spec *spec,
256 			       uint32_t *id_ptr)
257 {
258 	uint32_t i;
259 	int32_t free_slot = -1;
260 
261 	if (spec->callback == NULL || strlen(spec->name) == 0)
262 		return -EINVAL;
263 
264 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
265 		if (!service_registered(i)) {
266 			free_slot = i;
267 			break;
268 		}
269 	}
270 
271 	if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
272 		return -ENOSPC;
273 
274 	struct rte_service_spec_impl *s = &rte_services[free_slot];
275 	s->spec = *spec;
276 	s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
277 
278 	rte_service_count++;
279 
280 	if (id_ptr)
281 		*id_ptr = free_slot;
282 
283 	rte_eal_trace_service_component_register(free_slot, spec->name);
284 
285 	return 0;
286 }
287 
288 int32_t
289 rte_service_component_unregister(uint32_t id)
290 {
291 	uint32_t i;
292 	struct rte_service_spec_impl *s;
293 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
294 
295 	rte_service_count--;
296 
297 	s->internal_flags &= ~(SERVICE_F_REGISTERED);
298 
299 	/* clear the run-bit in all cores */
300 	for (i = 0; i < RTE_MAX_LCORE; i++)
301 		lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
302 
303 	memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
304 
305 	return 0;
306 }
307 
308 int32_t
309 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
310 {
311 	struct rte_service_spec_impl *s;
312 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
313 
314 	/* comp_runstate act as the guard variable. Use store-release
315 	 * memory order. This synchronizes with load-acquire in
316 	 * service_run and service_runstate_get function.
317 	 */
318 	if (runstate)
319 		rte_atomic_store_explicit(&s->comp_runstate, RUNSTATE_RUNNING,
320 			rte_memory_order_release);
321 	else
322 		rte_atomic_store_explicit(&s->comp_runstate, RUNSTATE_STOPPED,
323 			rte_memory_order_release);
324 
325 	return 0;
326 }
327 
328 int32_t
329 rte_service_runstate_set(uint32_t id, uint32_t runstate)
330 {
331 	struct rte_service_spec_impl *s;
332 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
333 
334 	/* app_runstate act as the guard variable. Use store-release
335 	 * memory order. This synchronizes with load-acquire in
336 	 * service_run runstate_get function.
337 	 */
338 	if (runstate)
339 		rte_atomic_store_explicit(&s->app_runstate, RUNSTATE_RUNNING,
340 			rte_memory_order_release);
341 	else
342 		rte_atomic_store_explicit(&s->app_runstate, RUNSTATE_STOPPED,
343 			rte_memory_order_release);
344 
345 	rte_eal_trace_service_runstate_set(id, runstate);
346 	return 0;
347 }
348 
349 int32_t
350 rte_service_runstate_get(uint32_t id)
351 {
352 	struct rte_service_spec_impl *s;
353 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
354 
355 	/* comp_runstate and app_runstate act as the guard variables.
356 	 * Use load-acquire memory order. This synchronizes with
357 	 * store-release in service state set functions.
358 	 */
359 	if (rte_atomic_load_explicit(&s->comp_runstate, rte_memory_order_acquire) ==
360 			RUNSTATE_RUNNING &&
361 	    rte_atomic_load_explicit(&s->app_runstate, rte_memory_order_acquire) ==
362 			RUNSTATE_RUNNING) {
363 		int check_disabled = !(s->internal_flags &
364 			SERVICE_F_START_CHECK);
365 		int lcore_mapped = (rte_atomic_load_explicit(&s->num_mapped_cores,
366 			rte_memory_order_relaxed) > 0);
367 
368 		return (check_disabled | lcore_mapped);
369 	} else
370 		return 0;
371 
372 }
373 
374 static void
375 service_counter_add(RTE_ATOMIC(uint64_t) *counter, uint64_t operand)
376 {
377 	/* The lcore service worker thread is the only writer, and
378 	 * thus only a non-atomic load and an atomic store is needed,
379 	 * and not the more expensive atomic add.
380 	 */
381 	uint64_t value;
382 
383 	value = rte_atomic_load_explicit(counter, rte_memory_order_relaxed);
384 
385 	rte_atomic_store_explicit(counter, value + operand,
386 				  rte_memory_order_relaxed);
387 }
388 
389 static inline void
390 service_runner_do_callback(struct rte_service_spec_impl *s,
391 			   struct core_state *cs, uint32_t service_idx)
392 {
393 	rte_eal_trace_service_run_begin(service_idx, rte_lcore_id());
394 	void *userdata = s->spec.callback_userdata;
395 
396 	if (service_stats_enabled(s)) {
397 		uint64_t start = rte_rdtsc();
398 		int rc = s->spec.callback(userdata);
399 
400 		struct service_stats *service_stats =
401 			&cs->service_stats[service_idx];
402 
403 		service_counter_add(&service_stats->calls, 1);
404 
405 		if (rc == -EAGAIN)
406 			service_counter_add(&service_stats->idle_calls, 1);
407 		else if (rc != 0)
408 			service_counter_add(&service_stats->error_calls, 1);
409 
410 		if (likely(rc != -EAGAIN)) {
411 			uint64_t end = rte_rdtsc();
412 			uint64_t cycles = end - start;
413 
414 			service_counter_add(&cs->cycles, cycles);
415 			service_counter_add(&service_stats->cycles, cycles);
416 		}
417 	} else {
418 		s->spec.callback(userdata);
419 	}
420 	rte_eal_trace_service_run_end(service_idx, rte_lcore_id());
421 }
422 
423 
424 /* Expects the service 's' is valid. */
425 static int32_t
426 service_run(uint32_t i, struct core_state *cs, uint64_t service_mask,
427 	    struct rte_service_spec_impl *s, uint32_t serialize_mt_unsafe)
428 {
429 	if (!s)
430 		return -EINVAL;
431 
432 	/* comp_runstate and app_runstate act as the guard variables.
433 	 * Use load-acquire memory order. This synchronizes with
434 	 * store-release in service state set functions.
435 	 */
436 	if (rte_atomic_load_explicit(&s->comp_runstate, rte_memory_order_acquire) !=
437 			RUNSTATE_RUNNING ||
438 	    rte_atomic_load_explicit(&s->app_runstate, rte_memory_order_acquire) !=
439 			RUNSTATE_RUNNING ||
440 	    !(service_mask & (UINT64_C(1) << i))) {
441 		cs->service_active_on_lcore[i] = 0;
442 		return -ENOEXEC;
443 	}
444 
445 	cs->service_active_on_lcore[i] = 1;
446 
447 	if ((service_mt_safe(s) == 0) && (serialize_mt_unsafe == 1)) {
448 		if (!rte_spinlock_trylock(&s->execute_lock))
449 			return -EBUSY;
450 
451 		service_runner_do_callback(s, cs, i);
452 		rte_spinlock_unlock(&s->execute_lock);
453 	} else
454 		service_runner_do_callback(s, cs, i);
455 
456 	return 0;
457 }
458 
459 int32_t
460 rte_service_may_be_active(uint32_t id)
461 {
462 	uint32_t ids[RTE_MAX_LCORE] = {0};
463 	int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
464 	int i;
465 
466 	if (!service_valid(id))
467 		return -EINVAL;
468 
469 	for (i = 0; i < lcore_count; i++) {
470 		if (lcore_states[ids[i]].service_active_on_lcore[id])
471 			return 1;
472 	}
473 
474 	return 0;
475 }
476 
477 int32_t
478 rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe)
479 {
480 	struct core_state *cs = &lcore_states[rte_lcore_id()];
481 	struct rte_service_spec_impl *s;
482 
483 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
484 
485 	/* Increment num_mapped_cores to reflect that this core is
486 	 * now mapped capable of running the service.
487 	 */
488 	rte_atomic_fetch_add_explicit(&s->num_mapped_cores, 1, rte_memory_order_relaxed);
489 
490 	int ret = service_run(id, cs, UINT64_MAX, s, serialize_mt_unsafe);
491 
492 	rte_atomic_fetch_sub_explicit(&s->num_mapped_cores, 1, rte_memory_order_relaxed);
493 
494 	return ret;
495 }
496 
497 static int32_t
498 service_runner_func(void *arg)
499 {
500 	RTE_SET_USED(arg);
501 	uint8_t i;
502 	const int lcore = rte_lcore_id();
503 	struct core_state *cs = &lcore_states[lcore];
504 
505 	rte_atomic_store_explicit(&cs->thread_active, 1, rte_memory_order_seq_cst);
506 
507 	/* runstate act as the guard variable. Use load-acquire
508 	 * memory order here to synchronize with store-release
509 	 * in runstate update functions.
510 	 */
511 	while (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) ==
512 			RUNSTATE_RUNNING) {
513 
514 		const uint64_t service_mask = cs->service_mask;
515 		uint8_t start_id;
516 		uint8_t end_id;
517 
518 		if (service_mask == 0)
519 			continue;
520 
521 		start_id = rte_ctz64(service_mask);
522 		end_id = 64 - rte_clz64(service_mask);
523 
524 		for (i = start_id; i < end_id; i++) {
525 			/* return value ignored as no change to code flow */
526 			service_run(i, cs, service_mask, service_get(i), 1);
527 		}
528 
529 		rte_atomic_store_explicit(&cs->loops, cs->loops + 1, rte_memory_order_relaxed);
530 	}
531 
532 	/* Switch off this core for all services, to ensure that future
533 	 * calls to may_be_active() know this core is switched off.
534 	 */
535 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
536 		cs->service_active_on_lcore[i] = 0;
537 
538 	/* Use SEQ CST memory ordering to avoid any re-ordering around
539 	 * this store, ensuring that once this store is visible, the service
540 	 * lcore thread really is done in service cores code.
541 	 */
542 	rte_atomic_store_explicit(&cs->thread_active, 0, rte_memory_order_seq_cst);
543 	return 0;
544 }
545 
546 int32_t
547 rte_service_lcore_may_be_active(uint32_t lcore)
548 {
549 	if (lcore >= RTE_MAX_LCORE || !lcore_states[lcore].is_service_core)
550 		return -EINVAL;
551 
552 	/* Load thread_active using ACQUIRE to avoid instructions dependent on
553 	 * the result being re-ordered before this load completes.
554 	 */
555 	return rte_atomic_load_explicit(&lcore_states[lcore].thread_active,
556 			       rte_memory_order_acquire);
557 }
558 
559 int32_t
560 rte_service_lcore_count(void)
561 {
562 	int32_t count = 0;
563 	uint32_t i;
564 	for (i = 0; i < RTE_MAX_LCORE; i++)
565 		count += lcore_states[i].is_service_core;
566 	return count;
567 }
568 
569 int32_t
570 rte_service_lcore_list(uint32_t array[], uint32_t n)
571 {
572 	uint32_t count = rte_service_lcore_count();
573 	if (count > n)
574 		return -ENOMEM;
575 
576 	if (!array)
577 		return -EINVAL;
578 
579 	uint32_t i;
580 	uint32_t idx = 0;
581 	for (i = 0; i < RTE_MAX_LCORE; i++) {
582 		struct core_state *cs = &lcore_states[i];
583 		if (cs->is_service_core) {
584 			array[idx] = i;
585 			idx++;
586 		}
587 	}
588 
589 	return count;
590 }
591 
592 int32_t
593 rte_service_lcore_count_services(uint32_t lcore)
594 {
595 	if (lcore >= RTE_MAX_LCORE)
596 		return -EINVAL;
597 
598 	struct core_state *cs = &lcore_states[lcore];
599 	if (!cs->is_service_core)
600 		return -ENOTSUP;
601 
602 	return rte_popcount64(cs->service_mask);
603 }
604 
605 int32_t
606 rte_service_start_with_defaults(void)
607 {
608 	/* create a default mapping from cores to services, then start the
609 	 * services to make them transparent to unaware applications.
610 	 */
611 	uint32_t i;
612 	int ret;
613 	uint32_t count = rte_service_get_count();
614 
615 	int32_t lcore_iter = 0;
616 	uint32_t ids[RTE_MAX_LCORE] = {0};
617 	int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
618 
619 	if (lcore_count == 0)
620 		return -ENOTSUP;
621 
622 	for (i = 0; (int)i < lcore_count; i++)
623 		rte_service_lcore_start(ids[i]);
624 
625 	for (i = 0; i < count; i++) {
626 		/* do 1:1 core mapping here, with each service getting
627 		 * assigned a single core by default. Adding multiple services
628 		 * should multiplex to a single core, or 1:1 if there are the
629 		 * same amount of services as service-cores
630 		 */
631 		ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
632 		if (ret)
633 			return -ENODEV;
634 
635 		lcore_iter++;
636 		if (lcore_iter >= lcore_count)
637 			lcore_iter = 0;
638 
639 		ret = rte_service_runstate_set(i, 1);
640 		if (ret)
641 			return -ENOEXEC;
642 	}
643 
644 	return 0;
645 }
646 
647 static int32_t
648 service_update(uint32_t sid, uint32_t lcore, uint32_t *set, uint32_t *enabled)
649 {
650 	/* validate ID, or return error value */
651 	if (!service_valid(sid) || lcore >= RTE_MAX_LCORE ||
652 			!lcore_states[lcore].is_service_core)
653 		return -EINVAL;
654 
655 	uint64_t sid_mask = UINT64_C(1) << sid;
656 	if (set) {
657 		uint64_t lcore_mapped = lcore_states[lcore].service_mask &
658 			sid_mask;
659 
660 		if (*set && !lcore_mapped) {
661 			lcore_states[lcore].service_mask |= sid_mask;
662 			rte_atomic_fetch_add_explicit(&rte_services[sid].num_mapped_cores,
663 				1, rte_memory_order_relaxed);
664 		}
665 		if (!*set && lcore_mapped) {
666 			lcore_states[lcore].service_mask &= ~(sid_mask);
667 			rte_atomic_fetch_sub_explicit(&rte_services[sid].num_mapped_cores,
668 				1, rte_memory_order_relaxed);
669 		}
670 	}
671 
672 	if (enabled)
673 		*enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
674 
675 	return 0;
676 }
677 
678 int32_t
679 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
680 {
681 	uint32_t on = enabled > 0;
682 	rte_eal_trace_service_map_lcore(id, lcore, enabled);
683 	return service_update(id, lcore, &on, 0);
684 }
685 
686 int32_t
687 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
688 {
689 	uint32_t enabled;
690 	int ret = service_update(id, lcore, 0, &enabled);
691 	if (ret == 0)
692 		return enabled;
693 	return ret;
694 }
695 
696 static void
697 set_lcore_state(uint32_t lcore, int32_t state)
698 {
699 	/* mark core state in hugepage backed config */
700 	struct rte_config *cfg = rte_eal_get_configuration();
701 	cfg->lcore_role[lcore] = state;
702 
703 	/* mark state in process local lcore_config */
704 	lcore_config[lcore].core_role = state;
705 
706 	/* update per-lcore optimized state tracking */
707 	lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
708 
709 	rte_eal_trace_service_lcore_state_change(lcore, state);
710 }
711 
712 int32_t
713 rte_service_lcore_reset_all(void)
714 {
715 	/* loop over cores, reset all to mask 0 */
716 	uint32_t i;
717 	for (i = 0; i < RTE_MAX_LCORE; i++) {
718 		if (lcore_states[i].is_service_core) {
719 			lcore_states[i].service_mask = 0;
720 			set_lcore_state(i, ROLE_RTE);
721 			/* runstate act as guard variable Use
722 			 * store-release memory order here to synchronize
723 			 * with load-acquire in runstate read functions.
724 			 */
725 			rte_atomic_store_explicit(&lcore_states[i].runstate,
726 				RUNSTATE_STOPPED, rte_memory_order_release);
727 		}
728 	}
729 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
730 		rte_atomic_store_explicit(&rte_services[i].num_mapped_cores, 0,
731 			rte_memory_order_relaxed);
732 
733 	return 0;
734 }
735 
736 int32_t
737 rte_service_lcore_add(uint32_t lcore)
738 {
739 	if (lcore >= RTE_MAX_LCORE)
740 		return -EINVAL;
741 	if (lcore_states[lcore].is_service_core)
742 		return -EALREADY;
743 
744 	set_lcore_state(lcore, ROLE_SERVICE);
745 
746 	/* ensure that after adding a core the mask and state are defaults */
747 	lcore_states[lcore].service_mask = 0;
748 	/* Use store-release memory order here to synchronize with
749 	 * load-acquire in runstate read functions.
750 	 */
751 	rte_atomic_store_explicit(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
752 		rte_memory_order_release);
753 
754 	return rte_eal_wait_lcore(lcore);
755 }
756 
757 int32_t
758 rte_service_lcore_del(uint32_t lcore)
759 {
760 	if (lcore >= RTE_MAX_LCORE)
761 		return -EINVAL;
762 
763 	struct core_state *cs = &lcore_states[lcore];
764 	if (!cs->is_service_core)
765 		return -EINVAL;
766 
767 	/* runstate act as the guard variable. Use load-acquire
768 	 * memory order here to synchronize with store-release
769 	 * in runstate update functions.
770 	 */
771 	if (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) !=
772 			RUNSTATE_STOPPED)
773 		return -EBUSY;
774 
775 	set_lcore_state(lcore, ROLE_RTE);
776 
777 	rte_smp_wmb();
778 	return 0;
779 }
780 
781 int32_t
782 rte_service_lcore_start(uint32_t lcore)
783 {
784 	if (lcore >= RTE_MAX_LCORE)
785 		return -EINVAL;
786 
787 	struct core_state *cs = &lcore_states[lcore];
788 	if (!cs->is_service_core)
789 		return -EINVAL;
790 
791 	/* runstate act as the guard variable. Use load-acquire
792 	 * memory order here to synchronize with store-release
793 	 * in runstate update functions.
794 	 */
795 	if (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) ==
796 			RUNSTATE_RUNNING)
797 		return -EALREADY;
798 
799 	/* set core to run state first, and then launch otherwise it will
800 	 * return immediately as runstate keeps it in the service poll loop
801 	 */
802 	/* Use load-acquire memory order here to synchronize with
803 	 * store-release in runstate update functions.
804 	 */
805 	rte_atomic_store_explicit(&cs->runstate, RUNSTATE_RUNNING, rte_memory_order_release);
806 
807 	rte_eal_trace_service_lcore_start(lcore);
808 
809 	int ret = rte_eal_remote_launch(service_runner_func, 0, lcore);
810 	/* returns -EBUSY if the core is already launched, 0 on success */
811 	return ret;
812 }
813 
814 int32_t
815 rte_service_lcore_stop(uint32_t lcore)
816 {
817 	if (lcore >= RTE_MAX_LCORE)
818 		return -EINVAL;
819 
820 	/* runstate act as the guard variable. Use load-acquire
821 	 * memory order here to synchronize with store-release
822 	 * in runstate update functions.
823 	 */
824 	if (rte_atomic_load_explicit(&lcore_states[lcore].runstate, rte_memory_order_acquire) ==
825 			RUNSTATE_STOPPED)
826 		return -EALREADY;
827 
828 	uint32_t i;
829 	struct core_state *cs = &lcore_states[lcore];
830 	uint64_t service_mask = cs->service_mask;
831 
832 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
833 		int32_t enabled = service_mask & (UINT64_C(1) << i);
834 		int32_t service_running = rte_service_runstate_get(i);
835 		int32_t only_core = (1 ==
836 			rte_atomic_load_explicit(&rte_services[i].num_mapped_cores,
837 				rte_memory_order_relaxed));
838 
839 		/* if the core is mapped, and the service is running, and this
840 		 * is the only core that is mapped, the service would cease to
841 		 * run if this core stopped, so fail instead.
842 		 */
843 		if (enabled && service_running && only_core)
844 			return -EBUSY;
845 	}
846 
847 	/* Use store-release memory order here to synchronize with
848 	 * load-acquire in runstate read functions.
849 	 */
850 	rte_atomic_store_explicit(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
851 		rte_memory_order_release);
852 
853 	rte_eal_trace_service_lcore_stop(lcore);
854 
855 	return 0;
856 }
857 
858 static uint64_t
859 lcore_attr_get_loops(unsigned int lcore)
860 {
861 	struct core_state *cs = &lcore_states[lcore];
862 
863 	return rte_atomic_load_explicit(&cs->loops, rte_memory_order_relaxed);
864 }
865 
866 static uint64_t
867 lcore_attr_get_cycles(unsigned int lcore)
868 {
869 	struct core_state *cs = &lcore_states[lcore];
870 
871 	return rte_atomic_load_explicit(&cs->cycles, rte_memory_order_relaxed);
872 }
873 
874 static uint64_t
875 lcore_attr_get_service_calls(uint32_t service_id, unsigned int lcore)
876 {
877 	struct core_state *cs = &lcore_states[lcore];
878 
879 	return rte_atomic_load_explicit(&cs->service_stats[service_id].calls,
880 		rte_memory_order_relaxed);
881 }
882 
883 static uint64_t
884 lcore_attr_get_service_idle_calls(uint32_t service_id, unsigned int lcore)
885 {
886 	struct core_state *cs = &lcore_states[lcore];
887 
888 	return rte_atomic_load_explicit(&cs->service_stats[service_id].idle_calls,
889 		rte_memory_order_relaxed);
890 }
891 
892 static uint64_t
893 lcore_attr_get_service_error_calls(uint32_t service_id, unsigned int lcore)
894 {
895 	struct core_state *cs = &lcore_states[lcore];
896 
897 	return rte_atomic_load_explicit(&cs->service_stats[service_id].error_calls,
898 		rte_memory_order_relaxed);
899 }
900 
901 static uint64_t
902 lcore_attr_get_service_cycles(uint32_t service_id, unsigned int lcore)
903 {
904 	struct core_state *cs = &lcore_states[lcore];
905 
906 	return rte_atomic_load_explicit(&cs->service_stats[service_id].cycles,
907 		rte_memory_order_relaxed);
908 }
909 
910 typedef uint64_t (*lcore_attr_get_fun)(uint32_t service_id,
911 				       unsigned int lcore);
912 
913 static uint64_t
914 attr_get(uint32_t id, lcore_attr_get_fun lcore_attr_get)
915 {
916 	unsigned int lcore;
917 	uint64_t sum = 0;
918 
919 	for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
920 		if (lcore_states[lcore].is_service_core)
921 			sum += lcore_attr_get(id, lcore);
922 	}
923 
924 	return sum;
925 }
926 
927 static uint64_t
928 attr_get_service_calls(uint32_t service_id)
929 {
930 	return attr_get(service_id, lcore_attr_get_service_calls);
931 }
932 
933 static uint64_t
934 attr_get_service_idle_calls(uint32_t service_id)
935 {
936 	return attr_get(service_id, lcore_attr_get_service_idle_calls);
937 }
938 
939 static uint64_t
940 attr_get_service_error_calls(uint32_t service_id)
941 {
942 	return attr_get(service_id, lcore_attr_get_service_error_calls);
943 }
944 
945 static uint64_t
946 attr_get_service_cycles(uint32_t service_id)
947 {
948 	return attr_get(service_id, lcore_attr_get_service_cycles);
949 }
950 
951 int32_t
952 rte_service_attr_get(uint32_t id, uint32_t attr_id, uint64_t *attr_value)
953 {
954 	if (!service_valid(id))
955 		return -EINVAL;
956 
957 	if (!attr_value)
958 		return -EINVAL;
959 
960 	switch (attr_id) {
961 	case RTE_SERVICE_ATTR_CALL_COUNT:
962 		*attr_value = attr_get_service_calls(id);
963 		return 0;
964 	case RTE_SERVICE_ATTR_IDLE_CALL_COUNT:
965 		*attr_value = attr_get_service_idle_calls(id);
966 		return 0;
967 	case RTE_SERVICE_ATTR_ERROR_CALL_COUNT:
968 		*attr_value = attr_get_service_error_calls(id);
969 		return 0;
970 	case RTE_SERVICE_ATTR_CYCLES:
971 		*attr_value = attr_get_service_cycles(id);
972 		return 0;
973 	default:
974 		return -EINVAL;
975 	}
976 }
977 
978 int32_t
979 rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
980 			   uint64_t *attr_value)
981 {
982 	struct core_state *cs;
983 
984 	if (lcore >= RTE_MAX_LCORE || !attr_value)
985 		return -EINVAL;
986 
987 	cs = &lcore_states[lcore];
988 	if (!cs->is_service_core)
989 		return -ENOTSUP;
990 
991 	switch (attr_id) {
992 	case RTE_SERVICE_LCORE_ATTR_LOOPS:
993 		*attr_value = lcore_attr_get_loops(lcore);
994 		return 0;
995 	case RTE_SERVICE_LCORE_ATTR_CYCLES:
996 		*attr_value = lcore_attr_get_cycles(lcore);
997 		return 0;
998 	default:
999 		return -EINVAL;
1000 	}
1001 }
1002 
1003 int32_t
1004 rte_service_attr_reset_all(uint32_t id)
1005 {
1006 	unsigned int lcore;
1007 
1008 	if (!service_valid(id))
1009 		return -EINVAL;
1010 
1011 	for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
1012 		struct core_state *cs = &lcore_states[lcore];
1013 
1014 		cs->service_stats[id] = (struct service_stats) {};
1015 	}
1016 
1017 	return 0;
1018 }
1019 
1020 int32_t
1021 rte_service_lcore_attr_reset_all(uint32_t lcore)
1022 {
1023 	struct core_state *cs;
1024 
1025 	if (lcore >= RTE_MAX_LCORE)
1026 		return -EINVAL;
1027 
1028 	cs = &lcore_states[lcore];
1029 	if (!cs->is_service_core)
1030 		return -ENOTSUP;
1031 
1032 	cs->loops = 0;
1033 
1034 	return 0;
1035 }
1036 
1037 static void
1038 service_dump_one(FILE *f, uint32_t id)
1039 {
1040 	struct rte_service_spec_impl *s;
1041 	uint64_t service_calls;
1042 	uint64_t service_cycles;
1043 
1044 	service_calls = attr_get_service_calls(id);
1045 	service_cycles = attr_get_service_cycles(id);
1046 
1047 	/* avoid divide by zero */
1048 	if (service_calls == 0)
1049 		service_calls = 1;
1050 
1051 	s = service_get(id);
1052 
1053 	fprintf(f, "  %s: stats %d\tcalls %"PRIu64"\tcycles %"
1054 		PRIu64"\tavg: %"PRIu64"\n",
1055 		s->spec.name, service_stats_enabled(s), service_calls,
1056 		service_cycles, service_cycles / service_calls);
1057 }
1058 
1059 static void
1060 service_dump_calls_per_lcore(FILE *f, uint32_t lcore)
1061 {
1062 	uint32_t i;
1063 	struct core_state *cs = &lcore_states[lcore];
1064 
1065 	fprintf(f, "%02d\t", lcore);
1066 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
1067 		if (!service_registered(i))
1068 			continue;
1069 		fprintf(f, "%"PRIu64"\t", cs->service_stats[i].calls);
1070 	}
1071 	fprintf(f, "\n");
1072 }
1073 
1074 int32_t
1075 rte_service_dump(FILE *f, uint32_t id)
1076 {
1077 	uint32_t i;
1078 	int print_one = (id != UINT32_MAX);
1079 
1080 	/* print only the specified service */
1081 	if (print_one) {
1082 		struct rte_service_spec_impl *s;
1083 		SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
1084 		fprintf(f, "Service %s Summary\n", s->spec.name);
1085 		service_dump_one(f, id);
1086 		return 0;
1087 	}
1088 
1089 	/* print all services, as UINT32_MAX was passed as id */
1090 	fprintf(f, "Services Summary\n");
1091 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
1092 		if (!service_registered(i))
1093 			continue;
1094 		service_dump_one(f, i);
1095 	}
1096 
1097 	fprintf(f, "Service Cores Summary\n");
1098 	for (i = 0; i < RTE_MAX_LCORE; i++) {
1099 		if (lcore_config[i].core_role != ROLE_SERVICE)
1100 			continue;
1101 
1102 		service_dump_calls_per_lcore(f, i);
1103 	}
1104 
1105 	return 0;
1106 }
1107