xref: /dpdk/lib/eal/common/rte_service.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <inttypes.h>
7 #include <string.h>
8 
9 #include <rte_service.h>
10 #include <rte_service_component.h>
11 
12 #include <eal_trace_internal.h>
13 #include <rte_lcore.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
17 #include <rte_atomic.h>
18 #include <rte_malloc.h>
19 #include <rte_spinlock.h>
20 #include <rte_trace_point.h>
21 
22 #include "eal_private.h"
23 
24 #define RTE_SERVICE_NUM_MAX 64
25 
26 #define SERVICE_F_REGISTERED    (1 << 0)
27 #define SERVICE_F_STATS_ENABLED (1 << 1)
28 #define SERVICE_F_START_CHECK   (1 << 2)
29 
30 /* runstates for services and lcores, denoting if they are active or not */
31 #define RUNSTATE_STOPPED 0
32 #define RUNSTATE_RUNNING 1
33 
34 /* internal representation of a service */
35 struct rte_service_spec_impl {
36 	/* public part of the struct */
37 	struct rte_service_spec spec;
38 
39 	/* spin lock that when set indicates a service core is currently
40 	 * running this service callback. When not set, a core may take the
41 	 * lock and then run the service callback.
42 	 */
43 	rte_spinlock_t execute_lock;
44 
45 	/* API set/get-able variables */
46 	RTE_ATOMIC(int8_t) app_runstate;
47 	RTE_ATOMIC(int8_t) comp_runstate;
48 	uint8_t internal_flags;
49 
50 	/* per service statistics */
51 	/* Indicates how many cores the service is mapped to run on.
52 	 * It does not indicate the number of cores the service is running
53 	 * on currently.
54 	 */
55 	RTE_ATOMIC(uint32_t) num_mapped_cores;
56 } __rte_cache_aligned;
57 
58 struct service_stats {
59 	RTE_ATOMIC(uint64_t) calls;
60 	RTE_ATOMIC(uint64_t) cycles;
61 };
62 
63 /* the internal values of a service core */
64 struct core_state {
65 	/* map of services IDs are run on this core */
66 	uint64_t service_mask;
67 	RTE_ATOMIC(uint8_t) runstate; /* running or stopped */
68 	RTE_ATOMIC(uint8_t) thread_active; /* indicates when thread is in service_run() */
69 	uint8_t is_service_core; /* set if core is currently a service core */
70 	uint8_t service_active_on_lcore[RTE_SERVICE_NUM_MAX];
71 	RTE_ATOMIC(uint64_t) loops;
72 	RTE_ATOMIC(uint64_t) cycles;
73 	struct service_stats service_stats[RTE_SERVICE_NUM_MAX];
74 } __rte_cache_aligned;
75 
76 static uint32_t rte_service_count;
77 static struct rte_service_spec_impl *rte_services;
78 static struct core_state *lcore_states;
79 static uint32_t rte_service_library_initialized;
80 
81 int32_t
82 rte_service_init(void)
83 {
84 	/* Hard limit due to the use of an uint64_t-based bitmask (and the
85 	 * clzl intrinsic).
86 	 */
87 	RTE_BUILD_BUG_ON(RTE_SERVICE_NUM_MAX > 64);
88 
89 	if (rte_service_library_initialized) {
90 		EAL_LOG(NOTICE,
91 			"service library init() called, init flag %d",
92 			rte_service_library_initialized);
93 		return -EALREADY;
94 	}
95 
96 	rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
97 			sizeof(struct rte_service_spec_impl),
98 			RTE_CACHE_LINE_SIZE);
99 	if (!rte_services) {
100 		EAL_LOG(ERR, "error allocating rte services array");
101 		goto fail_mem;
102 	}
103 
104 	lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
105 			sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
106 	if (!lcore_states) {
107 		EAL_LOG(ERR, "error allocating core states array");
108 		goto fail_mem;
109 	}
110 
111 	int i;
112 	struct rte_config *cfg = rte_eal_get_configuration();
113 	for (i = 0; i < RTE_MAX_LCORE; i++) {
114 		if (lcore_config[i].core_role == ROLE_SERVICE) {
115 			if ((unsigned int)i == cfg->main_lcore)
116 				continue;
117 			rte_service_lcore_add(i);
118 		}
119 	}
120 
121 	rte_service_library_initialized = 1;
122 	return 0;
123 fail_mem:
124 	rte_free(rte_services);
125 	rte_free(lcore_states);
126 	return -ENOMEM;
127 }
128 
129 void
130 rte_service_finalize(void)
131 {
132 	if (!rte_service_library_initialized)
133 		return;
134 
135 	rte_service_lcore_reset_all();
136 	rte_eal_mp_wait_lcore();
137 
138 	rte_free(rte_services);
139 	rte_free(lcore_states);
140 
141 	rte_service_library_initialized = 0;
142 }
143 
144 static inline bool
145 service_registered(uint32_t id)
146 {
147 	return rte_services[id].internal_flags & SERVICE_F_REGISTERED;
148 }
149 
150 static inline bool
151 service_valid(uint32_t id)
152 {
153 	return id < RTE_SERVICE_NUM_MAX && service_registered(id);
154 }
155 
156 static struct rte_service_spec_impl *
157 service_get(uint32_t id)
158 {
159 	return &rte_services[id];
160 }
161 
162 /* validate ID and retrieve service pointer, or return error value */
163 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do {          \
164 	if (!service_valid(id))                                         \
165 		return retval;                                          \
166 	service = &rte_services[id];                                    \
167 } while (0)
168 
169 /* returns 1 if statistics should be collected for service
170  * Returns 0 if statistics should not be collected for service
171  */
172 static inline int
173 service_stats_enabled(struct rte_service_spec_impl *impl)
174 {
175 	return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
176 }
177 
178 static inline int
179 service_mt_safe(struct rte_service_spec_impl *s)
180 {
181 	return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
182 }
183 
184 int32_t
185 rte_service_set_stats_enable(uint32_t id, int32_t enabled)
186 {
187 	struct rte_service_spec_impl *s;
188 	SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
189 
190 	if (enabled)
191 		s->internal_flags |= SERVICE_F_STATS_ENABLED;
192 	else
193 		s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
194 
195 	return 0;
196 }
197 
198 int32_t
199 rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
200 {
201 	struct rte_service_spec_impl *s;
202 	SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
203 
204 	if (enabled)
205 		s->internal_flags |= SERVICE_F_START_CHECK;
206 	else
207 		s->internal_flags &= ~(SERVICE_F_START_CHECK);
208 
209 	return 0;
210 }
211 
212 uint32_t
213 rte_service_get_count(void)
214 {
215 	return rte_service_count;
216 }
217 
218 int32_t
219 rte_service_get_by_name(const char *name, uint32_t *service_id)
220 {
221 	if (!service_id)
222 		return -EINVAL;
223 
224 	int i;
225 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
226 		if (service_registered(i) &&
227 				strcmp(name, rte_services[i].spec.name) == 0) {
228 			*service_id = i;
229 			return 0;
230 		}
231 	}
232 
233 	return -ENODEV;
234 }
235 
236 const char *
237 rte_service_get_name(uint32_t id)
238 {
239 	struct rte_service_spec_impl *s;
240 	SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
241 	return s->spec.name;
242 }
243 
244 int32_t
245 rte_service_probe_capability(uint32_t id, uint32_t capability)
246 {
247 	struct rte_service_spec_impl *s;
248 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
249 	return !!(s->spec.capabilities & capability);
250 }
251 
252 int32_t
253 rte_service_component_register(const struct rte_service_spec *spec,
254 			       uint32_t *id_ptr)
255 {
256 	uint32_t i;
257 	int32_t free_slot = -1;
258 
259 	if (spec->callback == NULL || strlen(spec->name) == 0)
260 		return -EINVAL;
261 
262 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
263 		if (!service_registered(i)) {
264 			free_slot = i;
265 			break;
266 		}
267 	}
268 
269 	if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
270 		return -ENOSPC;
271 
272 	struct rte_service_spec_impl *s = &rte_services[free_slot];
273 	s->spec = *spec;
274 	s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
275 
276 	rte_service_count++;
277 
278 	if (id_ptr)
279 		*id_ptr = free_slot;
280 
281 	rte_eal_trace_service_component_register(free_slot, spec->name);
282 
283 	return 0;
284 }
285 
286 int32_t
287 rte_service_component_unregister(uint32_t id)
288 {
289 	uint32_t i;
290 	struct rte_service_spec_impl *s;
291 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
292 
293 	rte_service_count--;
294 
295 	s->internal_flags &= ~(SERVICE_F_REGISTERED);
296 
297 	/* clear the run-bit in all cores */
298 	for (i = 0; i < RTE_MAX_LCORE; i++)
299 		lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
300 
301 	memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
302 
303 	return 0;
304 }
305 
306 int32_t
307 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
308 {
309 	struct rte_service_spec_impl *s;
310 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
311 
312 	/* comp_runstate act as the guard variable. Use store-release
313 	 * memory order. This synchronizes with load-acquire in
314 	 * service_run and service_runstate_get function.
315 	 */
316 	if (runstate)
317 		rte_atomic_store_explicit(&s->comp_runstate, RUNSTATE_RUNNING,
318 			rte_memory_order_release);
319 	else
320 		rte_atomic_store_explicit(&s->comp_runstate, RUNSTATE_STOPPED,
321 			rte_memory_order_release);
322 
323 	return 0;
324 }
325 
326 int32_t
327 rte_service_runstate_set(uint32_t id, uint32_t runstate)
328 {
329 	struct rte_service_spec_impl *s;
330 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
331 
332 	/* app_runstate act as the guard variable. Use store-release
333 	 * memory order. This synchronizes with load-acquire in
334 	 * service_run runstate_get function.
335 	 */
336 	if (runstate)
337 		rte_atomic_store_explicit(&s->app_runstate, RUNSTATE_RUNNING,
338 			rte_memory_order_release);
339 	else
340 		rte_atomic_store_explicit(&s->app_runstate, RUNSTATE_STOPPED,
341 			rte_memory_order_release);
342 
343 	rte_eal_trace_service_runstate_set(id, runstate);
344 	return 0;
345 }
346 
347 int32_t
348 rte_service_runstate_get(uint32_t id)
349 {
350 	struct rte_service_spec_impl *s;
351 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
352 
353 	/* comp_runstate and app_runstate act as the guard variables.
354 	 * Use load-acquire memory order. This synchronizes with
355 	 * store-release in service state set functions.
356 	 */
357 	if (rte_atomic_load_explicit(&s->comp_runstate, rte_memory_order_acquire) ==
358 			RUNSTATE_RUNNING &&
359 	    rte_atomic_load_explicit(&s->app_runstate, rte_memory_order_acquire) ==
360 			RUNSTATE_RUNNING) {
361 		int check_disabled = !(s->internal_flags &
362 			SERVICE_F_START_CHECK);
363 		int lcore_mapped = (rte_atomic_load_explicit(&s->num_mapped_cores,
364 			rte_memory_order_relaxed) > 0);
365 
366 		return (check_disabled | lcore_mapped);
367 	} else
368 		return 0;
369 
370 }
371 
372 static inline void
373 service_runner_do_callback(struct rte_service_spec_impl *s,
374 			   struct core_state *cs, uint32_t service_idx)
375 {
376 	rte_eal_trace_service_run_begin(service_idx, rte_lcore_id());
377 	void *userdata = s->spec.callback_userdata;
378 
379 	if (service_stats_enabled(s)) {
380 		uint64_t start = rte_rdtsc();
381 		int rc = s->spec.callback(userdata);
382 
383 		/* The lcore service worker thread is the only writer,
384 		 * and thus only a non-atomic load and an atomic store
385 		 * is needed, and not the more expensive atomic
386 		 * add.
387 		 */
388 		struct service_stats *service_stats =
389 			&cs->service_stats[service_idx];
390 
391 		if (likely(rc != -EAGAIN)) {
392 			uint64_t end = rte_rdtsc();
393 			uint64_t cycles = end - start;
394 
395 			rte_atomic_store_explicit(&cs->cycles, cs->cycles + cycles,
396 				rte_memory_order_relaxed);
397 			rte_atomic_store_explicit(&service_stats->cycles,
398 				service_stats->cycles + cycles,
399 				rte_memory_order_relaxed);
400 		}
401 
402 		rte_atomic_store_explicit(&service_stats->calls,
403 			service_stats->calls + 1, rte_memory_order_relaxed);
404 	} else {
405 		s->spec.callback(userdata);
406 	}
407 	rte_eal_trace_service_run_end(service_idx, rte_lcore_id());
408 }
409 
410 
411 /* Expects the service 's' is valid. */
412 static int32_t
413 service_run(uint32_t i, struct core_state *cs, uint64_t service_mask,
414 	    struct rte_service_spec_impl *s, uint32_t serialize_mt_unsafe)
415 {
416 	if (!s)
417 		return -EINVAL;
418 
419 	/* comp_runstate and app_runstate act as the guard variables.
420 	 * Use load-acquire memory order. This synchronizes with
421 	 * store-release in service state set functions.
422 	 */
423 	if (rte_atomic_load_explicit(&s->comp_runstate, rte_memory_order_acquire) !=
424 			RUNSTATE_RUNNING ||
425 	    rte_atomic_load_explicit(&s->app_runstate, rte_memory_order_acquire) !=
426 			RUNSTATE_RUNNING ||
427 	    !(service_mask & (UINT64_C(1) << i))) {
428 		cs->service_active_on_lcore[i] = 0;
429 		return -ENOEXEC;
430 	}
431 
432 	cs->service_active_on_lcore[i] = 1;
433 
434 	if ((service_mt_safe(s) == 0) && (serialize_mt_unsafe == 1)) {
435 		if (!rte_spinlock_trylock(&s->execute_lock))
436 			return -EBUSY;
437 
438 		service_runner_do_callback(s, cs, i);
439 		rte_spinlock_unlock(&s->execute_lock);
440 	} else
441 		service_runner_do_callback(s, cs, i);
442 
443 	return 0;
444 }
445 
446 int32_t
447 rte_service_may_be_active(uint32_t id)
448 {
449 	uint32_t ids[RTE_MAX_LCORE] = {0};
450 	int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
451 	int i;
452 
453 	if (!service_valid(id))
454 		return -EINVAL;
455 
456 	for (i = 0; i < lcore_count; i++) {
457 		if (lcore_states[ids[i]].service_active_on_lcore[id])
458 			return 1;
459 	}
460 
461 	return 0;
462 }
463 
464 int32_t
465 rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe)
466 {
467 	struct core_state *cs = &lcore_states[rte_lcore_id()];
468 	struct rte_service_spec_impl *s;
469 
470 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
471 
472 	/* Increment num_mapped_cores to reflect that this core is
473 	 * now mapped capable of running the service.
474 	 */
475 	rte_atomic_fetch_add_explicit(&s->num_mapped_cores, 1, rte_memory_order_relaxed);
476 
477 	int ret = service_run(id, cs, UINT64_MAX, s, serialize_mt_unsafe);
478 
479 	rte_atomic_fetch_sub_explicit(&s->num_mapped_cores, 1, rte_memory_order_relaxed);
480 
481 	return ret;
482 }
483 
484 static int32_t
485 service_runner_func(void *arg)
486 {
487 	RTE_SET_USED(arg);
488 	uint8_t i;
489 	const int lcore = rte_lcore_id();
490 	struct core_state *cs = &lcore_states[lcore];
491 
492 	rte_atomic_store_explicit(&cs->thread_active, 1, rte_memory_order_seq_cst);
493 
494 	/* runstate act as the guard variable. Use load-acquire
495 	 * memory order here to synchronize with store-release
496 	 * in runstate update functions.
497 	 */
498 	while (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) ==
499 			RUNSTATE_RUNNING) {
500 
501 		const uint64_t service_mask = cs->service_mask;
502 		uint8_t start_id;
503 		uint8_t end_id;
504 
505 		if (service_mask == 0)
506 			continue;
507 
508 		start_id = rte_ctz64(service_mask);
509 		end_id = 64 - rte_clz64(service_mask);
510 
511 		for (i = start_id; i < end_id; i++) {
512 			/* return value ignored as no change to code flow */
513 			service_run(i, cs, service_mask, service_get(i), 1);
514 		}
515 
516 		rte_atomic_store_explicit(&cs->loops, cs->loops + 1, rte_memory_order_relaxed);
517 	}
518 
519 	/* Switch off this core for all services, to ensure that future
520 	 * calls to may_be_active() know this core is switched off.
521 	 */
522 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
523 		cs->service_active_on_lcore[i] = 0;
524 
525 	/* Use SEQ CST memory ordering to avoid any re-ordering around
526 	 * this store, ensuring that once this store is visible, the service
527 	 * lcore thread really is done in service cores code.
528 	 */
529 	rte_atomic_store_explicit(&cs->thread_active, 0, rte_memory_order_seq_cst);
530 	return 0;
531 }
532 
533 int32_t
534 rte_service_lcore_may_be_active(uint32_t lcore)
535 {
536 	if (lcore >= RTE_MAX_LCORE || !lcore_states[lcore].is_service_core)
537 		return -EINVAL;
538 
539 	/* Load thread_active using ACQUIRE to avoid instructions dependent on
540 	 * the result being re-ordered before this load completes.
541 	 */
542 	return rte_atomic_load_explicit(&lcore_states[lcore].thread_active,
543 			       rte_memory_order_acquire);
544 }
545 
546 int32_t
547 rte_service_lcore_count(void)
548 {
549 	int32_t count = 0;
550 	uint32_t i;
551 	for (i = 0; i < RTE_MAX_LCORE; i++)
552 		count += lcore_states[i].is_service_core;
553 	return count;
554 }
555 
556 int32_t
557 rte_service_lcore_list(uint32_t array[], uint32_t n)
558 {
559 	uint32_t count = rte_service_lcore_count();
560 	if (count > n)
561 		return -ENOMEM;
562 
563 	if (!array)
564 		return -EINVAL;
565 
566 	uint32_t i;
567 	uint32_t idx = 0;
568 	for (i = 0; i < RTE_MAX_LCORE; i++) {
569 		struct core_state *cs = &lcore_states[i];
570 		if (cs->is_service_core) {
571 			array[idx] = i;
572 			idx++;
573 		}
574 	}
575 
576 	return count;
577 }
578 
579 int32_t
580 rte_service_lcore_count_services(uint32_t lcore)
581 {
582 	if (lcore >= RTE_MAX_LCORE)
583 		return -EINVAL;
584 
585 	struct core_state *cs = &lcore_states[lcore];
586 	if (!cs->is_service_core)
587 		return -ENOTSUP;
588 
589 	return rte_popcount64(cs->service_mask);
590 }
591 
592 int32_t
593 rte_service_start_with_defaults(void)
594 {
595 	/* create a default mapping from cores to services, then start the
596 	 * services to make them transparent to unaware applications.
597 	 */
598 	uint32_t i;
599 	int ret;
600 	uint32_t count = rte_service_get_count();
601 
602 	int32_t lcore_iter = 0;
603 	uint32_t ids[RTE_MAX_LCORE] = {0};
604 	int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
605 
606 	if (lcore_count == 0)
607 		return -ENOTSUP;
608 
609 	for (i = 0; (int)i < lcore_count; i++)
610 		rte_service_lcore_start(ids[i]);
611 
612 	for (i = 0; i < count; i++) {
613 		/* do 1:1 core mapping here, with each service getting
614 		 * assigned a single core by default. Adding multiple services
615 		 * should multiplex to a single core, or 1:1 if there are the
616 		 * same amount of services as service-cores
617 		 */
618 		ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
619 		if (ret)
620 			return -ENODEV;
621 
622 		lcore_iter++;
623 		if (lcore_iter >= lcore_count)
624 			lcore_iter = 0;
625 
626 		ret = rte_service_runstate_set(i, 1);
627 		if (ret)
628 			return -ENOEXEC;
629 	}
630 
631 	return 0;
632 }
633 
634 static int32_t
635 service_update(uint32_t sid, uint32_t lcore, uint32_t *set, uint32_t *enabled)
636 {
637 	/* validate ID, or return error value */
638 	if (!service_valid(sid) || lcore >= RTE_MAX_LCORE ||
639 			!lcore_states[lcore].is_service_core)
640 		return -EINVAL;
641 
642 	uint64_t sid_mask = UINT64_C(1) << sid;
643 	if (set) {
644 		uint64_t lcore_mapped = lcore_states[lcore].service_mask &
645 			sid_mask;
646 
647 		if (*set && !lcore_mapped) {
648 			lcore_states[lcore].service_mask |= sid_mask;
649 			rte_atomic_fetch_add_explicit(&rte_services[sid].num_mapped_cores,
650 				1, rte_memory_order_relaxed);
651 		}
652 		if (!*set && lcore_mapped) {
653 			lcore_states[lcore].service_mask &= ~(sid_mask);
654 			rte_atomic_fetch_sub_explicit(&rte_services[sid].num_mapped_cores,
655 				1, rte_memory_order_relaxed);
656 		}
657 	}
658 
659 	if (enabled)
660 		*enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
661 
662 	return 0;
663 }
664 
665 int32_t
666 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
667 {
668 	uint32_t on = enabled > 0;
669 	rte_eal_trace_service_map_lcore(id, lcore, enabled);
670 	return service_update(id, lcore, &on, 0);
671 }
672 
673 int32_t
674 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
675 {
676 	uint32_t enabled;
677 	int ret = service_update(id, lcore, 0, &enabled);
678 	if (ret == 0)
679 		return enabled;
680 	return ret;
681 }
682 
683 static void
684 set_lcore_state(uint32_t lcore, int32_t state)
685 {
686 	/* mark core state in hugepage backed config */
687 	struct rte_config *cfg = rte_eal_get_configuration();
688 	cfg->lcore_role[lcore] = state;
689 
690 	/* mark state in process local lcore_config */
691 	lcore_config[lcore].core_role = state;
692 
693 	/* update per-lcore optimized state tracking */
694 	lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
695 
696 	rte_eal_trace_service_lcore_state_change(lcore, state);
697 }
698 
699 int32_t
700 rte_service_lcore_reset_all(void)
701 {
702 	/* loop over cores, reset all to mask 0 */
703 	uint32_t i;
704 	for (i = 0; i < RTE_MAX_LCORE; i++) {
705 		if (lcore_states[i].is_service_core) {
706 			lcore_states[i].service_mask = 0;
707 			set_lcore_state(i, ROLE_RTE);
708 			/* runstate act as guard variable Use
709 			 * store-release memory order here to synchronize
710 			 * with load-acquire in runstate read functions.
711 			 */
712 			rte_atomic_store_explicit(&lcore_states[i].runstate,
713 				RUNSTATE_STOPPED, rte_memory_order_release);
714 		}
715 	}
716 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
717 		rte_atomic_store_explicit(&rte_services[i].num_mapped_cores, 0,
718 			rte_memory_order_relaxed);
719 
720 	return 0;
721 }
722 
723 int32_t
724 rte_service_lcore_add(uint32_t lcore)
725 {
726 	if (lcore >= RTE_MAX_LCORE)
727 		return -EINVAL;
728 	if (lcore_states[lcore].is_service_core)
729 		return -EALREADY;
730 
731 	set_lcore_state(lcore, ROLE_SERVICE);
732 
733 	/* ensure that after adding a core the mask and state are defaults */
734 	lcore_states[lcore].service_mask = 0;
735 	/* Use store-release memory order here to synchronize with
736 	 * load-acquire in runstate read functions.
737 	 */
738 	rte_atomic_store_explicit(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
739 		rte_memory_order_release);
740 
741 	return rte_eal_wait_lcore(lcore);
742 }
743 
744 int32_t
745 rte_service_lcore_del(uint32_t lcore)
746 {
747 	if (lcore >= RTE_MAX_LCORE)
748 		return -EINVAL;
749 
750 	struct core_state *cs = &lcore_states[lcore];
751 	if (!cs->is_service_core)
752 		return -EINVAL;
753 
754 	/* runstate act as the guard variable. Use load-acquire
755 	 * memory order here to synchronize with store-release
756 	 * in runstate update functions.
757 	 */
758 	if (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) !=
759 			RUNSTATE_STOPPED)
760 		return -EBUSY;
761 
762 	set_lcore_state(lcore, ROLE_RTE);
763 
764 	rte_smp_wmb();
765 	return 0;
766 }
767 
768 int32_t
769 rte_service_lcore_start(uint32_t lcore)
770 {
771 	if (lcore >= RTE_MAX_LCORE)
772 		return -EINVAL;
773 
774 	struct core_state *cs = &lcore_states[lcore];
775 	if (!cs->is_service_core)
776 		return -EINVAL;
777 
778 	/* runstate act as the guard variable. Use load-acquire
779 	 * memory order here to synchronize with store-release
780 	 * in runstate update functions.
781 	 */
782 	if (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) ==
783 			RUNSTATE_RUNNING)
784 		return -EALREADY;
785 
786 	/* set core to run state first, and then launch otherwise it will
787 	 * return immediately as runstate keeps it in the service poll loop
788 	 */
789 	/* Use load-acquire memory order here to synchronize with
790 	 * store-release in runstate update functions.
791 	 */
792 	rte_atomic_store_explicit(&cs->runstate, RUNSTATE_RUNNING, rte_memory_order_release);
793 
794 	rte_eal_trace_service_lcore_start(lcore);
795 
796 	int ret = rte_eal_remote_launch(service_runner_func, 0, lcore);
797 	/* returns -EBUSY if the core is already launched, 0 on success */
798 	return ret;
799 }
800 
801 int32_t
802 rte_service_lcore_stop(uint32_t lcore)
803 {
804 	if (lcore >= RTE_MAX_LCORE)
805 		return -EINVAL;
806 
807 	/* runstate act as the guard variable. Use load-acquire
808 	 * memory order here to synchronize with store-release
809 	 * in runstate update functions.
810 	 */
811 	if (rte_atomic_load_explicit(&lcore_states[lcore].runstate, rte_memory_order_acquire) ==
812 			RUNSTATE_STOPPED)
813 		return -EALREADY;
814 
815 	uint32_t i;
816 	struct core_state *cs = &lcore_states[lcore];
817 	uint64_t service_mask = cs->service_mask;
818 
819 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
820 		int32_t enabled = service_mask & (UINT64_C(1) << i);
821 		int32_t service_running = rte_service_runstate_get(i);
822 		int32_t only_core = (1 ==
823 			rte_atomic_load_explicit(&rte_services[i].num_mapped_cores,
824 				rte_memory_order_relaxed));
825 
826 		/* if the core is mapped, and the service is running, and this
827 		 * is the only core that is mapped, the service would cease to
828 		 * run if this core stopped, so fail instead.
829 		 */
830 		if (enabled && service_running && only_core)
831 			return -EBUSY;
832 	}
833 
834 	/* Use store-release memory order here to synchronize with
835 	 * load-acquire in runstate read functions.
836 	 */
837 	rte_atomic_store_explicit(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
838 		rte_memory_order_release);
839 
840 	rte_eal_trace_service_lcore_stop(lcore);
841 
842 	return 0;
843 }
844 
845 static uint64_t
846 lcore_attr_get_loops(unsigned int lcore)
847 {
848 	struct core_state *cs = &lcore_states[lcore];
849 
850 	return rte_atomic_load_explicit(&cs->loops, rte_memory_order_relaxed);
851 }
852 
853 static uint64_t
854 lcore_attr_get_cycles(unsigned int lcore)
855 {
856 	struct core_state *cs = &lcore_states[lcore];
857 
858 	return rte_atomic_load_explicit(&cs->cycles, rte_memory_order_relaxed);
859 }
860 
861 static uint64_t
862 lcore_attr_get_service_calls(uint32_t service_id, unsigned int lcore)
863 {
864 	struct core_state *cs = &lcore_states[lcore];
865 
866 	return rte_atomic_load_explicit(&cs->service_stats[service_id].calls,
867 		rte_memory_order_relaxed);
868 }
869 
870 static uint64_t
871 lcore_attr_get_service_cycles(uint32_t service_id, unsigned int lcore)
872 {
873 	struct core_state *cs = &lcore_states[lcore];
874 
875 	return rte_atomic_load_explicit(&cs->service_stats[service_id].cycles,
876 		rte_memory_order_relaxed);
877 }
878 
879 typedef uint64_t (*lcore_attr_get_fun)(uint32_t service_id,
880 				       unsigned int lcore);
881 
882 static uint64_t
883 attr_get(uint32_t id, lcore_attr_get_fun lcore_attr_get)
884 {
885 	unsigned int lcore;
886 	uint64_t sum = 0;
887 
888 	for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
889 		if (lcore_states[lcore].is_service_core)
890 			sum += lcore_attr_get(id, lcore);
891 	}
892 
893 	return sum;
894 }
895 
896 static uint64_t
897 attr_get_service_calls(uint32_t service_id)
898 {
899 	return attr_get(service_id, lcore_attr_get_service_calls);
900 }
901 
902 static uint64_t
903 attr_get_service_cycles(uint32_t service_id)
904 {
905 	return attr_get(service_id, lcore_attr_get_service_cycles);
906 }
907 
908 int32_t
909 rte_service_attr_get(uint32_t id, uint32_t attr_id, uint64_t *attr_value)
910 {
911 	if (!service_valid(id))
912 		return -EINVAL;
913 
914 	if (!attr_value)
915 		return -EINVAL;
916 
917 	switch (attr_id) {
918 	case RTE_SERVICE_ATTR_CALL_COUNT:
919 		*attr_value = attr_get_service_calls(id);
920 		return 0;
921 	case RTE_SERVICE_ATTR_CYCLES:
922 		*attr_value = attr_get_service_cycles(id);
923 		return 0;
924 	default:
925 		return -EINVAL;
926 	}
927 }
928 
929 int32_t
930 rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
931 			   uint64_t *attr_value)
932 {
933 	struct core_state *cs;
934 
935 	if (lcore >= RTE_MAX_LCORE || !attr_value)
936 		return -EINVAL;
937 
938 	cs = &lcore_states[lcore];
939 	if (!cs->is_service_core)
940 		return -ENOTSUP;
941 
942 	switch (attr_id) {
943 	case RTE_SERVICE_LCORE_ATTR_LOOPS:
944 		*attr_value = lcore_attr_get_loops(lcore);
945 		return 0;
946 	case RTE_SERVICE_LCORE_ATTR_CYCLES:
947 		*attr_value = lcore_attr_get_cycles(lcore);
948 		return 0;
949 	default:
950 		return -EINVAL;
951 	}
952 }
953 
954 int32_t
955 rte_service_attr_reset_all(uint32_t id)
956 {
957 	unsigned int lcore;
958 
959 	if (!service_valid(id))
960 		return -EINVAL;
961 
962 	for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
963 		struct core_state *cs = &lcore_states[lcore];
964 
965 		cs->service_stats[id] = (struct service_stats) {};
966 	}
967 
968 	return 0;
969 }
970 
971 int32_t
972 rte_service_lcore_attr_reset_all(uint32_t lcore)
973 {
974 	struct core_state *cs;
975 
976 	if (lcore >= RTE_MAX_LCORE)
977 		return -EINVAL;
978 
979 	cs = &lcore_states[lcore];
980 	if (!cs->is_service_core)
981 		return -ENOTSUP;
982 
983 	cs->loops = 0;
984 
985 	return 0;
986 }
987 
988 static void
989 service_dump_one(FILE *f, uint32_t id)
990 {
991 	struct rte_service_spec_impl *s;
992 	uint64_t service_calls;
993 	uint64_t service_cycles;
994 
995 	service_calls = attr_get_service_calls(id);
996 	service_cycles = attr_get_service_cycles(id);
997 
998 	/* avoid divide by zero */
999 	if (service_calls == 0)
1000 		service_calls = 1;
1001 
1002 	s = service_get(id);
1003 
1004 	fprintf(f, "  %s: stats %d\tcalls %"PRIu64"\tcycles %"
1005 		PRIu64"\tavg: %"PRIu64"\n",
1006 		s->spec.name, service_stats_enabled(s), service_calls,
1007 		service_cycles, service_cycles / service_calls);
1008 }
1009 
1010 static void
1011 service_dump_calls_per_lcore(FILE *f, uint32_t lcore)
1012 {
1013 	uint32_t i;
1014 	struct core_state *cs = &lcore_states[lcore];
1015 
1016 	fprintf(f, "%02d\t", lcore);
1017 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
1018 		if (!service_registered(i))
1019 			continue;
1020 		fprintf(f, "%"PRIu64"\t", cs->service_stats[i].calls);
1021 	}
1022 	fprintf(f, "\n");
1023 }
1024 
1025 int32_t
1026 rte_service_dump(FILE *f, uint32_t id)
1027 {
1028 	uint32_t i;
1029 	int print_one = (id != UINT32_MAX);
1030 
1031 	/* print only the specified service */
1032 	if (print_one) {
1033 		struct rte_service_spec_impl *s;
1034 		SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
1035 		fprintf(f, "Service %s Summary\n", s->spec.name);
1036 		service_dump_one(f, id);
1037 		return 0;
1038 	}
1039 
1040 	/* print all services, as UINT32_MAX was passed as id */
1041 	fprintf(f, "Services Summary\n");
1042 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
1043 		if (!service_registered(i))
1044 			continue;
1045 		service_dump_one(f, i);
1046 	}
1047 
1048 	fprintf(f, "Service Cores Summary\n");
1049 	for (i = 0; i < RTE_MAX_LCORE; i++) {
1050 		if (lcore_config[i].core_role != ROLE_SERVICE)
1051 			continue;
1052 
1053 		service_dump_calls_per_lcore(f, i);
1054 	}
1055 
1056 	return 0;
1057 }
1058