xref: /dpdk/lib/eal/common/rte_service.c (revision b24bbaedbba2df6ad2c25bc0bbde52fb55876fdb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <inttypes.h>
7 #include <string.h>
8 
9 #include <rte_service.h>
10 #include <rte_service_component.h>
11 
12 #include <eal_trace_internal.h>
13 #include <rte_lcore.h>
14 #include <rte_lcore_var.h>
15 #include <rte_bitset.h>
16 #include <rte_branch_prediction.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_atomic.h>
20 #include <rte_malloc.h>
21 #include <rte_spinlock.h>
22 #include <rte_trace_point.h>
23 
24 #include "eal_private.h"
25 
26 #define RTE_SERVICE_NUM_MAX 64
27 
28 #define SERVICE_F_REGISTERED    (1 << 0)
29 #define SERVICE_F_STATS_ENABLED (1 << 1)
30 #define SERVICE_F_START_CHECK   (1 << 2)
31 
32 /* runstates for services and lcores, denoting if they are active or not */
33 #define RUNSTATE_STOPPED 0
34 #define RUNSTATE_RUNNING 1
35 
36 /* internal representation of a service */
37 struct __rte_cache_aligned rte_service_spec_impl {
38 	/* public part of the struct */
39 	struct rte_service_spec spec;
40 
41 	/* spin lock that when set indicates a service core is currently
42 	 * running this service callback. When not set, a core may take the
43 	 * lock and then run the service callback.
44 	 */
45 	rte_spinlock_t execute_lock;
46 
47 	/* API set/get-able variables */
48 	RTE_ATOMIC(int8_t) app_runstate;
49 	RTE_ATOMIC(int8_t) comp_runstate;
50 	uint8_t internal_flags;
51 
52 	/* per service statistics */
53 	/* Indicates how many cores the service is mapped to run on.
54 	 * It does not indicate the number of cores the service is running
55 	 * on currently.
56 	 */
57 	RTE_ATOMIC(uint32_t) num_mapped_cores;
58 };
59 
60 struct service_stats {
61 	RTE_ATOMIC(uint64_t) calls;
62 	RTE_ATOMIC(uint64_t) idle_calls;
63 	RTE_ATOMIC(uint64_t) error_calls;
64 	RTE_ATOMIC(uint64_t) cycles;
65 };
66 
67 /* the internal values of a service core */
68 struct __rte_cache_aligned core_state {
69 	/* map of services IDs are run on this core */
70 	RTE_BITSET_DECLARE(mapped_services, RTE_SERVICE_NUM_MAX);
71 	RTE_ATOMIC(uint8_t) runstate; /* running or stopped */
72 	RTE_ATOMIC(uint8_t) thread_active; /* indicates when thread is in service_run() */
73 	uint8_t is_service_core; /* set if core is currently a service core */
74 	RTE_BITSET_DECLARE(service_active_on_lcore, RTE_SERVICE_NUM_MAX);
75 	RTE_ATOMIC(uint64_t) loops;
76 	RTE_ATOMIC(uint64_t) cycles;
77 	struct service_stats service_stats[RTE_SERVICE_NUM_MAX];
78 };
79 
80 static uint32_t rte_service_count;
81 static struct rte_service_spec_impl *rte_services;
82 static RTE_LCORE_VAR_HANDLE(struct core_state, lcore_states);
83 static uint32_t rte_service_library_initialized;
84 
85 int32_t
86 rte_service_init(void)
87 {
88 	if (rte_service_library_initialized) {
89 		EAL_LOG(NOTICE,
90 			"service library init() called, init flag %d",
91 			rte_service_library_initialized);
92 		return -EALREADY;
93 	}
94 
95 	rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
96 			sizeof(struct rte_service_spec_impl),
97 			RTE_CACHE_LINE_SIZE);
98 	if (!rte_services) {
99 		EAL_LOG(ERR, "error allocating rte services array");
100 		goto fail_mem;
101 	}
102 
103 	if (lcore_states == NULL)
104 		RTE_LCORE_VAR_ALLOC(lcore_states);
105 
106 	int i;
107 	struct rte_config *cfg = rte_eal_get_configuration();
108 	for (i = 0; i < RTE_MAX_LCORE; i++) {
109 		if (lcore_config[i].core_role == ROLE_SERVICE) {
110 			if ((unsigned int)i == cfg->main_lcore)
111 				continue;
112 			rte_service_lcore_add(i);
113 		}
114 	}
115 
116 	rte_service_library_initialized = 1;
117 	return 0;
118 fail_mem:
119 	rte_free(rte_services);
120 	return -ENOMEM;
121 }
122 
123 void
124 rte_service_finalize(void)
125 {
126 	if (!rte_service_library_initialized)
127 		return;
128 
129 	rte_service_lcore_reset_all();
130 	rte_eal_mp_wait_lcore();
131 
132 	rte_free(rte_services);
133 
134 	rte_service_library_initialized = 0;
135 }
136 
137 static inline bool
138 service_registered(uint32_t id)
139 {
140 	return rte_services[id].internal_flags & SERVICE_F_REGISTERED;
141 }
142 
143 static inline bool
144 service_valid(uint32_t id)
145 {
146 	return id < RTE_SERVICE_NUM_MAX && service_registered(id);
147 }
148 
149 static struct rte_service_spec_impl *
150 service_get(uint32_t id)
151 {
152 	return &rte_services[id];
153 }
154 
155 /* validate ID and retrieve service pointer, or return error value */
156 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do {          \
157 	if (!service_valid(id))                                         \
158 		return retval;                                          \
159 	service = &rte_services[id];                                    \
160 } while (0)
161 
162 /* returns 1 if statistics should be collected for service
163  * Returns 0 if statistics should not be collected for service
164  */
165 static inline int
166 service_stats_enabled(struct rte_service_spec_impl *impl)
167 {
168 	return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
169 }
170 
171 static inline int
172 service_mt_safe(struct rte_service_spec_impl *s)
173 {
174 	return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
175 }
176 
177 int32_t
178 rte_service_set_stats_enable(uint32_t id, int32_t enabled)
179 {
180 	struct rte_service_spec_impl *s;
181 	SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
182 
183 	if (enabled)
184 		s->internal_flags |= SERVICE_F_STATS_ENABLED;
185 	else
186 		s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
187 
188 	return 0;
189 }
190 
191 int32_t
192 rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
193 {
194 	struct rte_service_spec_impl *s;
195 	SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
196 
197 	if (enabled)
198 		s->internal_flags |= SERVICE_F_START_CHECK;
199 	else
200 		s->internal_flags &= ~(SERVICE_F_START_CHECK);
201 
202 	return 0;
203 }
204 
205 uint32_t
206 rte_service_get_count(void)
207 {
208 	return rte_service_count;
209 }
210 
211 int32_t
212 rte_service_get_by_name(const char *name, uint32_t *service_id)
213 {
214 	if (!service_id)
215 		return -EINVAL;
216 
217 	int i;
218 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
219 		if (service_registered(i) &&
220 				strcmp(name, rte_services[i].spec.name) == 0) {
221 			*service_id = i;
222 			return 0;
223 		}
224 	}
225 
226 	return -ENODEV;
227 }
228 
229 const char *
230 rte_service_get_name(uint32_t id)
231 {
232 	struct rte_service_spec_impl *s;
233 	SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
234 	return s->spec.name;
235 }
236 
237 int32_t
238 rte_service_probe_capability(uint32_t id, uint32_t capability)
239 {
240 	struct rte_service_spec_impl *s;
241 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
242 	return !!(s->spec.capabilities & capability);
243 }
244 
245 int32_t
246 rte_service_component_register(const struct rte_service_spec *spec,
247 			       uint32_t *id_ptr)
248 {
249 	uint32_t i;
250 	int32_t free_slot = -1;
251 
252 	if (spec->callback == NULL || strlen(spec->name) == 0)
253 		return -EINVAL;
254 
255 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
256 		if (!service_registered(i)) {
257 			free_slot = i;
258 			break;
259 		}
260 	}
261 
262 	if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
263 		return -ENOSPC;
264 
265 	struct rte_service_spec_impl *s = &rte_services[free_slot];
266 	s->spec = *spec;
267 	s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
268 
269 	rte_service_count++;
270 
271 	if (id_ptr)
272 		*id_ptr = free_slot;
273 
274 	rte_eal_trace_service_component_register(free_slot, spec->name);
275 
276 	return 0;
277 }
278 
279 int32_t
280 rte_service_component_unregister(uint32_t id)
281 {
282 	struct rte_service_spec_impl *s;
283 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
284 
285 	rte_service_count--;
286 
287 	s->internal_flags &= ~(SERVICE_F_REGISTERED);
288 
289 	unsigned int lcore_id;
290 	struct core_state *cs;
291 	/* clear the run-bit in all cores */
292 	RTE_LCORE_VAR_FOREACH(lcore_id, cs, lcore_states)
293 		rte_bitset_clear(cs->mapped_services, id);
294 
295 	memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
296 
297 	return 0;
298 }
299 
300 int32_t
301 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
302 {
303 	struct rte_service_spec_impl *s;
304 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
305 
306 	/* comp_runstate act as the guard variable. Use store-release
307 	 * memory order. This synchronizes with load-acquire in
308 	 * service_run and service_runstate_get function.
309 	 */
310 	if (runstate)
311 		rte_atomic_store_explicit(&s->comp_runstate, RUNSTATE_RUNNING,
312 			rte_memory_order_release);
313 	else
314 		rte_atomic_store_explicit(&s->comp_runstate, RUNSTATE_STOPPED,
315 			rte_memory_order_release);
316 
317 	return 0;
318 }
319 
320 int32_t
321 rte_service_runstate_set(uint32_t id, uint32_t runstate)
322 {
323 	struct rte_service_spec_impl *s;
324 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
325 
326 	/* app_runstate act as the guard variable. Use store-release
327 	 * memory order. This synchronizes with load-acquire in
328 	 * service_run runstate_get function.
329 	 */
330 	if (runstate)
331 		rte_atomic_store_explicit(&s->app_runstate, RUNSTATE_RUNNING,
332 			rte_memory_order_release);
333 	else
334 		rte_atomic_store_explicit(&s->app_runstate, RUNSTATE_STOPPED,
335 			rte_memory_order_release);
336 
337 	rte_eal_trace_service_runstate_set(id, runstate);
338 	return 0;
339 }
340 
341 int32_t
342 rte_service_runstate_get(uint32_t id)
343 {
344 	struct rte_service_spec_impl *s;
345 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
346 
347 	/* comp_runstate and app_runstate act as the guard variables.
348 	 * Use load-acquire memory order. This synchronizes with
349 	 * store-release in service state set functions.
350 	 */
351 	if (rte_atomic_load_explicit(&s->comp_runstate, rte_memory_order_acquire) ==
352 			RUNSTATE_RUNNING &&
353 	    rte_atomic_load_explicit(&s->app_runstate, rte_memory_order_acquire) ==
354 			RUNSTATE_RUNNING) {
355 		int check_disabled = !(s->internal_flags &
356 			SERVICE_F_START_CHECK);
357 		int lcore_mapped = (rte_atomic_load_explicit(&s->num_mapped_cores,
358 			rte_memory_order_relaxed) > 0);
359 
360 		return (check_disabled | lcore_mapped);
361 	} else
362 		return 0;
363 
364 }
365 
366 static void
367 service_counter_add(RTE_ATOMIC(uint64_t) *counter, uint64_t operand)
368 {
369 	/* The lcore service worker thread is the only writer, and
370 	 * thus only a non-atomic load and an atomic store is needed,
371 	 * and not the more expensive atomic add.
372 	 */
373 	uint64_t value;
374 
375 	value = rte_atomic_load_explicit(counter, rte_memory_order_relaxed);
376 
377 	rte_atomic_store_explicit(counter, value + operand,
378 				  rte_memory_order_relaxed);
379 }
380 
381 static inline void
382 service_runner_do_callback(struct rte_service_spec_impl *s,
383 			   struct core_state *cs, uint32_t service_idx)
384 {
385 	rte_eal_trace_service_run_begin(service_idx, rte_lcore_id());
386 	void *userdata = s->spec.callback_userdata;
387 
388 	if (service_stats_enabled(s)) {
389 		uint64_t start = rte_rdtsc();
390 		int rc = s->spec.callback(userdata);
391 
392 		struct service_stats *service_stats =
393 			&cs->service_stats[service_idx];
394 
395 		service_counter_add(&service_stats->calls, 1);
396 
397 		if (rc == -EAGAIN)
398 			service_counter_add(&service_stats->idle_calls, 1);
399 		else if (rc != 0)
400 			service_counter_add(&service_stats->error_calls, 1);
401 
402 		if (likely(rc != -EAGAIN)) {
403 			uint64_t end = rte_rdtsc();
404 			uint64_t cycles = end - start;
405 
406 			service_counter_add(&cs->cycles, cycles);
407 			service_counter_add(&service_stats->cycles, cycles);
408 		}
409 	} else {
410 		s->spec.callback(userdata);
411 	}
412 	rte_eal_trace_service_run_end(service_idx, rte_lcore_id());
413 }
414 
415 
416 /* Expects the service 's' is valid. */
417 static int32_t
418 service_run(uint32_t i, struct core_state *cs, const uint64_t *mapped_services,
419 	    struct rte_service_spec_impl *s, uint32_t serialize_mt_unsafe)
420 {
421 	if (!s)
422 		return -EINVAL;
423 
424 	/* comp_runstate and app_runstate act as the guard variables.
425 	 * Use load-acquire memory order. This synchronizes with
426 	 * store-release in service state set functions.
427 	 */
428 	if (rte_atomic_load_explicit(&s->comp_runstate, rte_memory_order_acquire) !=
429 			RUNSTATE_RUNNING ||
430 	    rte_atomic_load_explicit(&s->app_runstate, rte_memory_order_acquire) !=
431 			RUNSTATE_RUNNING ||
432 	    !rte_bitset_test(mapped_services, i)) {
433 		rte_bitset_clear(cs->service_active_on_lcore, i);
434 		return -ENOEXEC;
435 	}
436 
437 	rte_bitset_set(cs->service_active_on_lcore, i);
438 
439 	if ((service_mt_safe(s) == 0) && (serialize_mt_unsafe == 1)) {
440 		if (!rte_spinlock_trylock(&s->execute_lock))
441 			return -EBUSY;
442 
443 		service_runner_do_callback(s, cs, i);
444 		rte_spinlock_unlock(&s->execute_lock);
445 	} else
446 		service_runner_do_callback(s, cs, i);
447 
448 	return 0;
449 }
450 
451 int32_t
452 rte_service_may_be_active(uint32_t id)
453 {
454 	uint32_t ids[RTE_MAX_LCORE] = {0};
455 	int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
456 	int i;
457 
458 	if (!service_valid(id))
459 		return -EINVAL;
460 
461 	for (i = 0; i < lcore_count; i++) {
462 		struct core_state *cs =
463 			RTE_LCORE_VAR_LCORE(ids[i], lcore_states);
464 
465 		if (rte_bitset_test(cs->service_active_on_lcore, id))
466 			return 1;
467 	}
468 
469 	return 0;
470 }
471 
472 int32_t
473 rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe)
474 {
475 	struct core_state *cs =	RTE_LCORE_VAR(lcore_states);
476 	struct rte_service_spec_impl *s;
477 
478 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
479 
480 	/* Increment num_mapped_cores to reflect that this core is
481 	 * now mapped capable of running the service.
482 	 */
483 	rte_atomic_fetch_add_explicit(&s->num_mapped_cores, 1, rte_memory_order_relaxed);
484 
485 	RTE_BITSET_DECLARE(all_services, RTE_SERVICE_NUM_MAX);
486 	rte_bitset_set_all(all_services, RTE_SERVICE_NUM_MAX);
487 	int ret = service_run(id, cs, all_services, s, serialize_mt_unsafe);
488 
489 	rte_atomic_fetch_sub_explicit(&s->num_mapped_cores, 1, rte_memory_order_relaxed);
490 
491 	return ret;
492 }
493 
494 static int32_t
495 service_runner_func(void *arg)
496 {
497 	RTE_SET_USED(arg);
498 	struct core_state *cs = RTE_LCORE_VAR(lcore_states);
499 
500 	rte_atomic_store_explicit(&cs->thread_active, 1, rte_memory_order_seq_cst);
501 
502 	/* runstate act as the guard variable. Use load-acquire
503 	 * memory order here to synchronize with store-release
504 	 * in runstate update functions.
505 	 */
506 	while (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) ==
507 			RUNSTATE_RUNNING) {
508 		ssize_t id;
509 
510 		RTE_BITSET_FOREACH_SET(id, cs->mapped_services, RTE_SERVICE_NUM_MAX) {
511 			/* return value ignored as no change to code flow */
512 			service_run(id, cs, cs->mapped_services, service_get(id), 1);
513 		}
514 
515 		rte_atomic_store_explicit(&cs->loops, cs->loops + 1, rte_memory_order_relaxed);
516 	}
517 
518 	/* Switch off this core for all services, to ensure that future
519 	 * calls to may_be_active() know this core is switched off.
520 	 */
521 	rte_bitset_clear_all(cs->service_active_on_lcore, RTE_SERVICE_NUM_MAX);
522 
523 	/* Use SEQ CST memory ordering to avoid any re-ordering around
524 	 * this store, ensuring that once this store is visible, the service
525 	 * lcore thread really is done in service cores code.
526 	 */
527 	rte_atomic_store_explicit(&cs->thread_active, 0, rte_memory_order_seq_cst);
528 	return 0;
529 }
530 
531 int32_t
532 rte_service_lcore_may_be_active(uint32_t lcore)
533 {
534 	struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states);
535 
536 	if (lcore >= RTE_MAX_LCORE || !cs->is_service_core)
537 		return -EINVAL;
538 
539 	/* Load thread_active using ACQUIRE to avoid instructions dependent on
540 	 * the result being re-ordered before this load completes.
541 	 */
542 	return rte_atomic_load_explicit(&cs->thread_active,
543 			       rte_memory_order_acquire);
544 }
545 
546 int32_t
547 rte_service_lcore_count(void)
548 {
549 	int32_t count = 0;
550 
551 	unsigned int lcore_id;
552 	struct core_state *cs;
553 	RTE_LCORE_VAR_FOREACH(lcore_id, cs, lcore_states)
554 		count += cs->is_service_core;
555 
556 	return count;
557 }
558 
559 int32_t
560 rte_service_lcore_list(uint32_t array[], uint32_t n)
561 {
562 	uint32_t count = rte_service_lcore_count();
563 	if (count > n)
564 		return -ENOMEM;
565 
566 	if (!array)
567 		return -EINVAL;
568 
569 	uint32_t i;
570 	uint32_t idx = 0;
571 	for (i = 0; i < RTE_MAX_LCORE; i++) {
572 		struct core_state *cs =
573 			RTE_LCORE_VAR_LCORE(i, lcore_states);
574 		if (cs->is_service_core) {
575 			array[idx] = i;
576 			idx++;
577 		}
578 	}
579 
580 	return count;
581 }
582 
583 int32_t
584 rte_service_lcore_count_services(uint32_t lcore)
585 {
586 	if (lcore >= RTE_MAX_LCORE)
587 		return -EINVAL;
588 
589 	struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states);
590 	if (!cs->is_service_core)
591 		return -ENOTSUP;
592 
593 	return rte_bitset_count_set(cs->mapped_services, RTE_SERVICE_NUM_MAX);
594 }
595 
596 int32_t
597 rte_service_start_with_defaults(void)
598 {
599 	/* create a default mapping from cores to services, then start the
600 	 * services to make them transparent to unaware applications.
601 	 */
602 	uint32_t i;
603 	int ret;
604 	uint32_t count = rte_service_get_count();
605 
606 	int32_t lcore_iter = 0;
607 	uint32_t ids[RTE_MAX_LCORE] = {0};
608 	int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
609 
610 	if (lcore_count == 0)
611 		return -ENOTSUP;
612 
613 	for (i = 0; (int)i < lcore_count; i++)
614 		rte_service_lcore_start(ids[i]);
615 
616 	for (i = 0; i < count; i++) {
617 		/* do 1:1 core mapping here, with each service getting
618 		 * assigned a single core by default. Adding multiple services
619 		 * should multiplex to a single core, or 1:1 if there are the
620 		 * same amount of services as service-cores
621 		 */
622 		ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
623 		if (ret)
624 			return -ENODEV;
625 
626 		lcore_iter++;
627 		if (lcore_iter >= lcore_count)
628 			lcore_iter = 0;
629 
630 		ret = rte_service_runstate_set(i, 1);
631 		if (ret)
632 			return -ENOEXEC;
633 	}
634 
635 	return 0;
636 }
637 
638 static int32_t
639 service_update(uint32_t sid, uint32_t lcore, uint32_t *set, uint32_t *enabled)
640 {
641 	struct core_state *cs =	RTE_LCORE_VAR_LCORE(lcore, lcore_states);
642 
643 	/* validate ID, or return error value */
644 	if (!service_valid(sid) || lcore >= RTE_MAX_LCORE ||
645 			!cs->is_service_core)
646 		return -EINVAL;
647 
648 	if (set) {
649 		bool lcore_mapped = rte_bitset_test(cs->mapped_services, sid);
650 
651 		if (*set && !lcore_mapped) {
652 			rte_bitset_set(cs->mapped_services, sid);
653 			rte_atomic_fetch_add_explicit(&rte_services[sid].num_mapped_cores,
654 				1, rte_memory_order_relaxed);
655 		}
656 		if (!*set && lcore_mapped) {
657 			rte_bitset_clear(cs->mapped_services, sid);
658 			rte_atomic_fetch_sub_explicit(&rte_services[sid].num_mapped_cores,
659 				1, rte_memory_order_relaxed);
660 		}
661 	}
662 
663 	if (enabled)
664 		*enabled = rte_bitset_test(cs->mapped_services, sid);
665 
666 	return 0;
667 }
668 
669 int32_t
670 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
671 {
672 	uint32_t on = enabled > 0;
673 	rte_eal_trace_service_map_lcore(id, lcore, enabled);
674 	return service_update(id, lcore, &on, 0);
675 }
676 
677 int32_t
678 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
679 {
680 	uint32_t enabled;
681 	int ret = service_update(id, lcore, 0, &enabled);
682 	if (ret == 0)
683 		return enabled;
684 	return ret;
685 }
686 
687 static void
688 set_lcore_state(uint32_t lcore, int32_t state)
689 {
690 	/* mark core state in hugepage backed config */
691 	struct rte_config *cfg = rte_eal_get_configuration();
692 	struct core_state *cs =	RTE_LCORE_VAR_LCORE(lcore, lcore_states);
693 	cfg->lcore_role[lcore] = state;
694 
695 	/* mark state in process local lcore_config */
696 	lcore_config[lcore].core_role = state;
697 
698 	/* update per-lcore optimized state tracking */
699 	cs->is_service_core = (state == ROLE_SERVICE);
700 
701 	rte_eal_trace_service_lcore_state_change(lcore, state);
702 }
703 
704 int32_t
705 rte_service_lcore_reset_all(void)
706 {
707 	/* loop over cores, reset all mapped services */
708 	uint32_t i;
709 	for (i = 0; i < RTE_MAX_LCORE; i++) {
710 		struct core_state *cs =	RTE_LCORE_VAR_LCORE(i, lcore_states);
711 
712 		if (cs->is_service_core) {
713 			rte_bitset_clear_all(cs->mapped_services, RTE_SERVICE_NUM_MAX);
714 			set_lcore_state(i, ROLE_RTE);
715 			/* runstate act as guard variable Use
716 			 * store-release memory order here to synchronize
717 			 * with load-acquire in runstate read functions.
718 			 */
719 			rte_atomic_store_explicit(&cs->runstate,
720 				RUNSTATE_STOPPED, rte_memory_order_release);
721 		}
722 	}
723 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
724 		rte_atomic_store_explicit(&rte_services[i].num_mapped_cores, 0,
725 			rte_memory_order_relaxed);
726 
727 	return 0;
728 }
729 
730 int32_t
731 rte_service_lcore_add(uint32_t lcore)
732 {
733 	if (lcore >= RTE_MAX_LCORE)
734 		return -EINVAL;
735 
736 	struct core_state *cs =	RTE_LCORE_VAR_LCORE(lcore, lcore_states);
737 	if (cs->is_service_core)
738 		return -EALREADY;
739 
740 	set_lcore_state(lcore, ROLE_SERVICE);
741 
742 	/* ensure that after adding a core the mask and state are defaults */
743 	rte_bitset_clear_all(cs->mapped_services, RTE_SERVICE_NUM_MAX);
744 	/* Use store-release memory order here to synchronize with
745 	 * load-acquire in runstate read functions.
746 	 */
747 	rte_atomic_store_explicit(&cs->runstate, RUNSTATE_STOPPED,
748 		rte_memory_order_release);
749 
750 	return rte_eal_wait_lcore(lcore);
751 }
752 
753 int32_t
754 rte_service_lcore_del(uint32_t lcore)
755 {
756 	if (lcore >= RTE_MAX_LCORE)
757 		return -EINVAL;
758 
759 	struct core_state *cs =	RTE_LCORE_VAR_LCORE(lcore, lcore_states);
760 	if (!cs->is_service_core)
761 		return -EINVAL;
762 
763 	/* runstate act as the guard variable. Use load-acquire
764 	 * memory order here to synchronize with store-release
765 	 * in runstate update functions.
766 	 */
767 	if (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) !=
768 			RUNSTATE_STOPPED)
769 		return -EBUSY;
770 
771 	set_lcore_state(lcore, ROLE_RTE);
772 
773 	rte_smp_wmb();
774 	return 0;
775 }
776 
777 int32_t
778 rte_service_lcore_start(uint32_t lcore)
779 {
780 	if (lcore >= RTE_MAX_LCORE)
781 		return -EINVAL;
782 
783 	struct core_state *cs =	RTE_LCORE_VAR_LCORE(lcore, lcore_states);
784 	if (!cs->is_service_core)
785 		return -EINVAL;
786 
787 	/* runstate act as the guard variable. Use load-acquire
788 	 * memory order here to synchronize with store-release
789 	 * in runstate update functions.
790 	 */
791 	if (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) ==
792 			RUNSTATE_RUNNING)
793 		return -EALREADY;
794 
795 	/* set core to run state first, and then launch otherwise it will
796 	 * return immediately as runstate keeps it in the service poll loop
797 	 */
798 	/* Use load-acquire memory order here to synchronize with
799 	 * store-release in runstate update functions.
800 	 */
801 	rte_atomic_store_explicit(&cs->runstate, RUNSTATE_RUNNING, rte_memory_order_release);
802 
803 	rte_eal_trace_service_lcore_start(lcore);
804 
805 	int ret = rte_eal_remote_launch(service_runner_func, 0, lcore);
806 	/* returns -EBUSY if the core is already launched, 0 on success */
807 	return ret;
808 }
809 
810 int32_t
811 rte_service_lcore_stop(uint32_t lcore)
812 {
813 	struct core_state *cs =	RTE_LCORE_VAR_LCORE(lcore, lcore_states);
814 
815 	if (lcore >= RTE_MAX_LCORE)
816 		return -EINVAL;
817 
818 	/* runstate act as the guard variable. Use load-acquire
819 	 * memory order here to synchronize with store-release
820 	 * in runstate update functions.
821 	 */
822 	if (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) ==
823 			RUNSTATE_STOPPED)
824 		return -EALREADY;
825 
826 	uint32_t i;
827 
828 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
829 		bool enabled = rte_bitset_test(cs->mapped_services, i);
830 		bool service_running = rte_service_runstate_get(i);
831 		bool only_core = (1 ==
832 			rte_atomic_load_explicit(&rte_services[i].num_mapped_cores,
833 				rte_memory_order_relaxed));
834 
835 		/* if the core is mapped, and the service is running, and this
836 		 * is the only core that is mapped, the service would cease to
837 		 * run if this core stopped, so fail instead.
838 		 */
839 		if (enabled && service_running && only_core)
840 			return -EBUSY;
841 	}
842 
843 	/* Use store-release memory order here to synchronize with
844 	 * load-acquire in runstate read functions.
845 	 */
846 	rte_atomic_store_explicit(&cs->runstate, RUNSTATE_STOPPED,
847 		rte_memory_order_release);
848 
849 	rte_eal_trace_service_lcore_stop(lcore);
850 
851 	return 0;
852 }
853 
854 static uint64_t
855 lcore_attr_get_loops(unsigned int lcore)
856 {
857 	struct core_state *cs =	RTE_LCORE_VAR_LCORE(lcore, lcore_states);
858 
859 	return rte_atomic_load_explicit(&cs->loops, rte_memory_order_relaxed);
860 }
861 
862 static uint64_t
863 lcore_attr_get_cycles(unsigned int lcore)
864 {
865 	struct core_state *cs =	RTE_LCORE_VAR_LCORE(lcore, lcore_states);
866 
867 	return rte_atomic_load_explicit(&cs->cycles, rte_memory_order_relaxed);
868 }
869 
870 static uint64_t
871 lcore_attr_get_service_calls(uint32_t service_id, unsigned int lcore)
872 {
873 	struct core_state *cs =	RTE_LCORE_VAR_LCORE(lcore, lcore_states);
874 
875 	return rte_atomic_load_explicit(&cs->service_stats[service_id].calls,
876 		rte_memory_order_relaxed);
877 }
878 
879 static uint64_t
880 lcore_attr_get_service_idle_calls(uint32_t service_id, unsigned int lcore)
881 {
882 	struct core_state *cs = &lcore_states[lcore];
883 
884 	return rte_atomic_load_explicit(&cs->service_stats[service_id].idle_calls,
885 		rte_memory_order_relaxed);
886 }
887 
888 static uint64_t
889 lcore_attr_get_service_error_calls(uint32_t service_id, unsigned int lcore)
890 {
891 	struct core_state *cs = &lcore_states[lcore];
892 
893 	return rte_atomic_load_explicit(&cs->service_stats[service_id].error_calls,
894 		rte_memory_order_relaxed);
895 }
896 
897 static uint64_t
898 lcore_attr_get_service_cycles(uint32_t service_id, unsigned int lcore)
899 {
900 	struct core_state *cs =	RTE_LCORE_VAR_LCORE(lcore, lcore_states);
901 
902 	return rte_atomic_load_explicit(&cs->service_stats[service_id].cycles,
903 		rte_memory_order_relaxed);
904 }
905 
906 typedef uint64_t (*lcore_attr_get_fun)(uint32_t service_id,
907 				       unsigned int lcore);
908 
909 static uint64_t
910 attr_get(uint32_t id, lcore_attr_get_fun lcore_attr_get)
911 {
912 	unsigned int lcore;
913 	uint64_t sum = 0;
914 
915 	for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
916 		struct core_state *cs =
917 			RTE_LCORE_VAR_LCORE(lcore, lcore_states);
918 
919 		if (cs->is_service_core)
920 			sum += lcore_attr_get(id, lcore);
921 	}
922 
923 	return sum;
924 }
925 
926 static uint64_t
927 attr_get_service_calls(uint32_t service_id)
928 {
929 	return attr_get(service_id, lcore_attr_get_service_calls);
930 }
931 
932 static uint64_t
933 attr_get_service_idle_calls(uint32_t service_id)
934 {
935 	return attr_get(service_id, lcore_attr_get_service_idle_calls);
936 }
937 
938 static uint64_t
939 attr_get_service_error_calls(uint32_t service_id)
940 {
941 	return attr_get(service_id, lcore_attr_get_service_error_calls);
942 }
943 
944 static uint64_t
945 attr_get_service_cycles(uint32_t service_id)
946 {
947 	return attr_get(service_id, lcore_attr_get_service_cycles);
948 }
949 
950 int32_t
951 rte_service_attr_get(uint32_t id, uint32_t attr_id, uint64_t *attr_value)
952 {
953 	if (!service_valid(id))
954 		return -EINVAL;
955 
956 	if (!attr_value)
957 		return -EINVAL;
958 
959 	switch (attr_id) {
960 	case RTE_SERVICE_ATTR_CALL_COUNT:
961 		*attr_value = attr_get_service_calls(id);
962 		return 0;
963 	case RTE_SERVICE_ATTR_IDLE_CALL_COUNT:
964 		*attr_value = attr_get_service_idle_calls(id);
965 		return 0;
966 	case RTE_SERVICE_ATTR_ERROR_CALL_COUNT:
967 		*attr_value = attr_get_service_error_calls(id);
968 		return 0;
969 	case RTE_SERVICE_ATTR_CYCLES:
970 		*attr_value = attr_get_service_cycles(id);
971 		return 0;
972 	default:
973 		return -EINVAL;
974 	}
975 }
976 
977 int32_t
978 rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
979 			   uint64_t *attr_value)
980 {
981 	struct core_state *cs =	RTE_LCORE_VAR_LCORE(lcore, lcore_states);
982 
983 	if (lcore >= RTE_MAX_LCORE || !attr_value)
984 		return -EINVAL;
985 
986 	if (!cs->is_service_core)
987 		return -ENOTSUP;
988 
989 	switch (attr_id) {
990 	case RTE_SERVICE_LCORE_ATTR_LOOPS:
991 		*attr_value = lcore_attr_get_loops(lcore);
992 		return 0;
993 	case RTE_SERVICE_LCORE_ATTR_CYCLES:
994 		*attr_value = lcore_attr_get_cycles(lcore);
995 		return 0;
996 	default:
997 		return -EINVAL;
998 	}
999 }
1000 
1001 int32_t
1002 rte_service_attr_reset_all(uint32_t id)
1003 {
1004 	unsigned int lcore;
1005 
1006 	if (!service_valid(id))
1007 		return -EINVAL;
1008 
1009 	for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
1010 		struct core_state *cs =
1011 			RTE_LCORE_VAR_LCORE(lcore, lcore_states);
1012 
1013 		cs->service_stats[id] = (struct service_stats) {};
1014 	}
1015 
1016 	return 0;
1017 }
1018 
1019 int32_t
1020 rte_service_lcore_attr_reset_all(uint32_t lcore)
1021 {
1022 	struct core_state *cs =	RTE_LCORE_VAR_LCORE(lcore, lcore_states);
1023 
1024 	if (lcore >= RTE_MAX_LCORE)
1025 		return -EINVAL;
1026 
1027 	if (!cs->is_service_core)
1028 		return -ENOTSUP;
1029 
1030 	cs->loops = 0;
1031 
1032 	return 0;
1033 }
1034 
1035 static void
1036 service_dump_one(FILE *f, uint32_t id)
1037 {
1038 	struct rte_service_spec_impl *s;
1039 	uint64_t service_calls;
1040 	uint64_t service_cycles;
1041 
1042 	service_calls = attr_get_service_calls(id);
1043 	service_cycles = attr_get_service_cycles(id);
1044 
1045 	/* avoid divide by zero */
1046 	if (service_calls == 0)
1047 		service_calls = 1;
1048 
1049 	s = service_get(id);
1050 
1051 	fprintf(f, "  %s: stats %d\tcalls %"PRIu64"\tcycles %"
1052 		PRIu64"\tavg: %"PRIu64"\n",
1053 		s->spec.name, service_stats_enabled(s), service_calls,
1054 		service_cycles, service_cycles / service_calls);
1055 }
1056 
1057 static void
1058 service_dump_calls_per_lcore(FILE *f, uint32_t lcore)
1059 {
1060 	uint32_t i;
1061 	struct core_state *cs =	RTE_LCORE_VAR_LCORE(lcore, lcore_states);
1062 
1063 	fprintf(f, "%02d\t", lcore);
1064 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
1065 		if (!service_registered(i))
1066 			continue;
1067 		fprintf(f, "%"PRIu64"\t", cs->service_stats[i].calls);
1068 	}
1069 	fprintf(f, "\n");
1070 }
1071 
1072 int32_t
1073 rte_service_dump(FILE *f, uint32_t id)
1074 {
1075 	uint32_t i;
1076 	int print_one = (id != UINT32_MAX);
1077 
1078 	/* print only the specified service */
1079 	if (print_one) {
1080 		struct rte_service_spec_impl *s;
1081 		SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
1082 		fprintf(f, "Service %s Summary\n", s->spec.name);
1083 		service_dump_one(f, id);
1084 		return 0;
1085 	}
1086 
1087 	/* print all services, as UINT32_MAX was passed as id */
1088 	fprintf(f, "Services Summary\n");
1089 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
1090 		if (!service_registered(i))
1091 			continue;
1092 		service_dump_one(f, i);
1093 	}
1094 
1095 	fprintf(f, "Service Cores Summary\n");
1096 	for (i = 0; i < RTE_MAX_LCORE; i++) {
1097 		if (lcore_config[i].core_role != ROLE_SERVICE)
1098 			continue;
1099 
1100 		service_dump_calls_per_lcore(f, i);
1101 	}
1102 
1103 	return 0;
1104 }
1105