xref: /dpdk/lib/eal/common/rte_service.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <unistd.h>
7 #include <inttypes.h>
8 #include <limits.h>
9 #include <string.h>
10 
11 #include <rte_compat.h>
12 #include <rte_service.h>
13 #include <rte_service_component.h>
14 
15 #include <rte_eal.h>
16 #include <rte_lcore.h>
17 #include <rte_common.h>
18 #include <rte_debug.h>
19 #include <rte_cycles.h>
20 #include <rte_atomic.h>
21 #include <rte_memory.h>
22 #include <rte_malloc.h>
23 #include <rte_spinlock.h>
24 
25 #include "eal_private.h"
26 
27 #define RTE_SERVICE_NUM_MAX 64
28 
29 #define SERVICE_F_REGISTERED    (1 << 0)
30 #define SERVICE_F_STATS_ENABLED (1 << 1)
31 #define SERVICE_F_START_CHECK   (1 << 2)
32 
33 /* runstates for services and lcores, denoting if they are active or not */
34 #define RUNSTATE_STOPPED 0
35 #define RUNSTATE_RUNNING 1
36 
37 /* internal representation of a service */
38 struct rte_service_spec_impl {
39 	/* public part of the struct */
40 	struct rte_service_spec spec;
41 
42 	/* spin lock that when set indicates a service core is currently
43 	 * running this service callback. When not set, a core may take the
44 	 * lock and then run the service callback.
45 	 */
46 	rte_spinlock_t execute_lock;
47 
48 	/* API set/get-able variables */
49 	int8_t app_runstate;
50 	int8_t comp_runstate;
51 	uint8_t internal_flags;
52 
53 	/* per service statistics */
54 	/* Indicates how many cores the service is mapped to run on.
55 	 * It does not indicate the number of cores the service is running
56 	 * on currently.
57 	 */
58 	uint32_t num_mapped_cores;
59 	uint64_t calls;
60 	uint64_t cycles_spent;
61 } __rte_cache_aligned;
62 
63 /* the internal values of a service core */
64 struct core_state {
65 	/* map of services IDs are run on this core */
66 	uint64_t service_mask;
67 	uint8_t runstate; /* running or stopped */
68 	uint8_t thread_active; /* indicates when thread is in service_run() */
69 	uint8_t is_service_core; /* set if core is currently a service core */
70 	uint8_t service_active_on_lcore[RTE_SERVICE_NUM_MAX];
71 	uint64_t loops;
72 	uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
73 } __rte_cache_aligned;
74 
75 static uint32_t rte_service_count;
76 static struct rte_service_spec_impl *rte_services;
77 static struct core_state *lcore_states;
78 static uint32_t rte_service_library_initialized;
79 
80 int32_t
81 rte_service_init(void)
82 {
83 	if (rte_service_library_initialized) {
84 		RTE_LOG(NOTICE, EAL,
85 			"service library init() called, init flag %d\n",
86 			rte_service_library_initialized);
87 		return -EALREADY;
88 	}
89 
90 	rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
91 			sizeof(struct rte_service_spec_impl),
92 			RTE_CACHE_LINE_SIZE);
93 	if (!rte_services) {
94 		RTE_LOG(ERR, EAL, "error allocating rte services array\n");
95 		goto fail_mem;
96 	}
97 
98 	lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
99 			sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
100 	if (!lcore_states) {
101 		RTE_LOG(ERR, EAL, "error allocating core states array\n");
102 		goto fail_mem;
103 	}
104 
105 	int i;
106 	int count = 0;
107 	struct rte_config *cfg = rte_eal_get_configuration();
108 	for (i = 0; i < RTE_MAX_LCORE; i++) {
109 		if (lcore_config[i].core_role == ROLE_SERVICE) {
110 			if ((unsigned int)i == cfg->main_lcore)
111 				continue;
112 			rte_service_lcore_add(i);
113 			count++;
114 		}
115 	}
116 
117 	rte_service_library_initialized = 1;
118 	return 0;
119 fail_mem:
120 	rte_free(rte_services);
121 	rte_free(lcore_states);
122 	return -ENOMEM;
123 }
124 
125 void
126 rte_service_finalize(void)
127 {
128 	if (!rte_service_library_initialized)
129 		return;
130 
131 	rte_service_lcore_reset_all();
132 	rte_eal_mp_wait_lcore();
133 
134 	rte_free(rte_services);
135 	rte_free(lcore_states);
136 
137 	rte_service_library_initialized = 0;
138 }
139 
140 /* returns 1 if service is registered and has not been unregistered
141  * Returns 0 if service never registered, or has been unregistered
142  */
143 static inline int
144 service_valid(uint32_t id)
145 {
146 	return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
147 }
148 
149 static struct rte_service_spec_impl *
150 service_get(uint32_t id)
151 {
152 	return &rte_services[id];
153 }
154 
155 /* validate ID and retrieve service pointer, or return error value */
156 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do {          \
157 	if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))            \
158 		return retval;                                          \
159 	service = &rte_services[id];                                    \
160 } while (0)
161 
162 /* returns 1 if statistics should be collected for service
163  * Returns 0 if statistics should not be collected for service
164  */
165 static inline int
166 service_stats_enabled(struct rte_service_spec_impl *impl)
167 {
168 	return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
169 }
170 
171 static inline int
172 service_mt_safe(struct rte_service_spec_impl *s)
173 {
174 	return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
175 }
176 
177 int32_t
178 rte_service_set_stats_enable(uint32_t id, int32_t enabled)
179 {
180 	struct rte_service_spec_impl *s;
181 	SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
182 
183 	if (enabled)
184 		s->internal_flags |= SERVICE_F_STATS_ENABLED;
185 	else
186 		s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
187 
188 	return 0;
189 }
190 
191 int32_t
192 rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
193 {
194 	struct rte_service_spec_impl *s;
195 	SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
196 
197 	if (enabled)
198 		s->internal_flags |= SERVICE_F_START_CHECK;
199 	else
200 		s->internal_flags &= ~(SERVICE_F_START_CHECK);
201 
202 	return 0;
203 }
204 
205 uint32_t
206 rte_service_get_count(void)
207 {
208 	return rte_service_count;
209 }
210 
211 int32_t
212 rte_service_get_by_name(const char *name, uint32_t *service_id)
213 {
214 	if (!service_id)
215 		return -EINVAL;
216 
217 	int i;
218 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
219 		if (service_valid(i) &&
220 				strcmp(name, rte_services[i].spec.name) == 0) {
221 			*service_id = i;
222 			return 0;
223 		}
224 	}
225 
226 	return -ENODEV;
227 }
228 
229 const char *
230 rte_service_get_name(uint32_t id)
231 {
232 	struct rte_service_spec_impl *s;
233 	SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
234 	return s->spec.name;
235 }
236 
237 int32_t
238 rte_service_probe_capability(uint32_t id, uint32_t capability)
239 {
240 	struct rte_service_spec_impl *s;
241 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
242 	return !!(s->spec.capabilities & capability);
243 }
244 
245 int32_t
246 rte_service_component_register(const struct rte_service_spec *spec,
247 			       uint32_t *id_ptr)
248 {
249 	uint32_t i;
250 	int32_t free_slot = -1;
251 
252 	if (spec->callback == NULL || strlen(spec->name) == 0)
253 		return -EINVAL;
254 
255 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
256 		if (!service_valid(i)) {
257 			free_slot = i;
258 			break;
259 		}
260 	}
261 
262 	if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
263 		return -ENOSPC;
264 
265 	struct rte_service_spec_impl *s = &rte_services[free_slot];
266 	s->spec = *spec;
267 	s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
268 
269 	rte_service_count++;
270 
271 	if (id_ptr)
272 		*id_ptr = free_slot;
273 
274 	return 0;
275 }
276 
277 int32_t
278 rte_service_component_unregister(uint32_t id)
279 {
280 	uint32_t i;
281 	struct rte_service_spec_impl *s;
282 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
283 
284 	rte_service_count--;
285 
286 	s->internal_flags &= ~(SERVICE_F_REGISTERED);
287 
288 	/* clear the run-bit in all cores */
289 	for (i = 0; i < RTE_MAX_LCORE; i++)
290 		lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
291 
292 	memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
293 
294 	return 0;
295 }
296 
297 int32_t
298 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
299 {
300 	struct rte_service_spec_impl *s;
301 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
302 
303 	/* comp_runstate act as the guard variable. Use store-release
304 	 * memory order. This synchronizes with load-acquire in
305 	 * service_run and service_runstate_get function.
306 	 */
307 	if (runstate)
308 		__atomic_store_n(&s->comp_runstate, RUNSTATE_RUNNING,
309 			__ATOMIC_RELEASE);
310 	else
311 		__atomic_store_n(&s->comp_runstate, RUNSTATE_STOPPED,
312 			__ATOMIC_RELEASE);
313 
314 	return 0;
315 }
316 
317 int32_t
318 rte_service_runstate_set(uint32_t id, uint32_t runstate)
319 {
320 	struct rte_service_spec_impl *s;
321 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
322 
323 	/* app_runstate act as the guard variable. Use store-release
324 	 * memory order. This synchronizes with load-acquire in
325 	 * service_run runstate_get function.
326 	 */
327 	if (runstate)
328 		__atomic_store_n(&s->app_runstate, RUNSTATE_RUNNING,
329 			__ATOMIC_RELEASE);
330 	else
331 		__atomic_store_n(&s->app_runstate, RUNSTATE_STOPPED,
332 			__ATOMIC_RELEASE);
333 
334 	return 0;
335 }
336 
337 int32_t
338 rte_service_runstate_get(uint32_t id)
339 {
340 	struct rte_service_spec_impl *s;
341 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
342 
343 	/* comp_runstate and app_runstate act as the guard variables.
344 	 * Use load-acquire memory order. This synchronizes with
345 	 * store-release in service state set functions.
346 	 */
347 	if (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) ==
348 			RUNSTATE_RUNNING &&
349 	    __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) ==
350 			RUNSTATE_RUNNING) {
351 		int check_disabled = !(s->internal_flags &
352 			SERVICE_F_START_CHECK);
353 		int lcore_mapped = (__atomic_load_n(&s->num_mapped_cores,
354 			__ATOMIC_RELAXED) > 0);
355 
356 		return (check_disabled | lcore_mapped);
357 	} else
358 		return 0;
359 
360 }
361 
362 static inline void
363 service_runner_do_callback(struct rte_service_spec_impl *s,
364 			   struct core_state *cs, uint32_t service_idx)
365 {
366 	void *userdata = s->spec.callback_userdata;
367 
368 	if (service_stats_enabled(s)) {
369 		uint64_t start = rte_rdtsc();
370 		s->spec.callback(userdata);
371 		uint64_t end = rte_rdtsc();
372 		s->cycles_spent += end - start;
373 		cs->calls_per_service[service_idx]++;
374 		s->calls++;
375 	} else
376 		s->spec.callback(userdata);
377 }
378 
379 
380 /* Expects the service 's' is valid. */
381 static int32_t
382 service_run(uint32_t i, struct core_state *cs, uint64_t service_mask,
383 	    struct rte_service_spec_impl *s, uint32_t serialize_mt_unsafe)
384 {
385 	if (!s)
386 		return -EINVAL;
387 
388 	/* comp_runstate and app_runstate act as the guard variables.
389 	 * Use load-acquire memory order. This synchronizes with
390 	 * store-release in service state set functions.
391 	 */
392 	if (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) !=
393 			RUNSTATE_RUNNING ||
394 	    __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) !=
395 			RUNSTATE_RUNNING ||
396 	    !(service_mask & (UINT64_C(1) << i))) {
397 		cs->service_active_on_lcore[i] = 0;
398 		return -ENOEXEC;
399 	}
400 
401 	cs->service_active_on_lcore[i] = 1;
402 
403 	if ((service_mt_safe(s) == 0) && (serialize_mt_unsafe == 1)) {
404 		if (!rte_spinlock_trylock(&s->execute_lock))
405 			return -EBUSY;
406 
407 		service_runner_do_callback(s, cs, i);
408 		rte_spinlock_unlock(&s->execute_lock);
409 	} else
410 		service_runner_do_callback(s, cs, i);
411 
412 	return 0;
413 }
414 
415 int32_t
416 rte_service_may_be_active(uint32_t id)
417 {
418 	uint32_t ids[RTE_MAX_LCORE] = {0};
419 	int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
420 	int i;
421 
422 	if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))
423 		return -EINVAL;
424 
425 	for (i = 0; i < lcore_count; i++) {
426 		if (lcore_states[ids[i]].service_active_on_lcore[id])
427 			return 1;
428 	}
429 
430 	return 0;
431 }
432 
433 int32_t
434 rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe)
435 {
436 	struct core_state *cs = &lcore_states[rte_lcore_id()];
437 	struct rte_service_spec_impl *s;
438 
439 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
440 
441 	/* Increment num_mapped_cores to reflect that this core is
442 	 * now mapped capable of running the service.
443 	 */
444 	__atomic_add_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
445 
446 	int ret = service_run(id, cs, UINT64_MAX, s, serialize_mt_unsafe);
447 
448 	__atomic_sub_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
449 
450 	return ret;
451 }
452 
453 static int32_t
454 service_runner_func(void *arg)
455 {
456 	RTE_SET_USED(arg);
457 	uint32_t i;
458 	const int lcore = rte_lcore_id();
459 	struct core_state *cs = &lcore_states[lcore];
460 
461 	__atomic_store_n(&cs->thread_active, 1, __ATOMIC_SEQ_CST);
462 
463 	/* runstate act as the guard variable. Use load-acquire
464 	 * memory order here to synchronize with store-release
465 	 * in runstate update functions.
466 	 */
467 	while (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==
468 			RUNSTATE_RUNNING) {
469 		const uint64_t service_mask = cs->service_mask;
470 
471 		for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
472 			if (!service_valid(i))
473 				continue;
474 			/* return value ignored as no change to code flow */
475 			service_run(i, cs, service_mask, service_get(i), 1);
476 		}
477 
478 		cs->loops++;
479 	}
480 
481 	/* Use SEQ CST memory ordering to avoid any re-ordering around
482 	 * this store, ensuring that once this store is visible, the service
483 	 * lcore thread really is done in service cores code.
484 	 */
485 	__atomic_store_n(&cs->thread_active, 0, __ATOMIC_SEQ_CST);
486 	return 0;
487 }
488 
489 int32_t
490 rte_service_lcore_may_be_active(uint32_t lcore)
491 {
492 	if (lcore >= RTE_MAX_LCORE || !lcore_states[lcore].is_service_core)
493 		return -EINVAL;
494 
495 	/* Load thread_active using ACQUIRE to avoid instructions dependent on
496 	 * the result being re-ordered before this load completes.
497 	 */
498 	return __atomic_load_n(&lcore_states[lcore].thread_active,
499 			       __ATOMIC_ACQUIRE);
500 }
501 
502 int32_t
503 rte_service_lcore_count(void)
504 {
505 	int32_t count = 0;
506 	uint32_t i;
507 	for (i = 0; i < RTE_MAX_LCORE; i++)
508 		count += lcore_states[i].is_service_core;
509 	return count;
510 }
511 
512 int32_t
513 rte_service_lcore_list(uint32_t array[], uint32_t n)
514 {
515 	uint32_t count = rte_service_lcore_count();
516 	if (count > n)
517 		return -ENOMEM;
518 
519 	if (!array)
520 		return -EINVAL;
521 
522 	uint32_t i;
523 	uint32_t idx = 0;
524 	for (i = 0; i < RTE_MAX_LCORE; i++) {
525 		struct core_state *cs = &lcore_states[i];
526 		if (cs->is_service_core) {
527 			array[idx] = i;
528 			idx++;
529 		}
530 	}
531 
532 	return count;
533 }
534 
535 int32_t
536 rte_service_lcore_count_services(uint32_t lcore)
537 {
538 	if (lcore >= RTE_MAX_LCORE)
539 		return -EINVAL;
540 
541 	struct core_state *cs = &lcore_states[lcore];
542 	if (!cs->is_service_core)
543 		return -ENOTSUP;
544 
545 	return __builtin_popcountll(cs->service_mask);
546 }
547 
548 int32_t
549 rte_service_start_with_defaults(void)
550 {
551 	/* create a default mapping from cores to services, then start the
552 	 * services to make them transparent to unaware applications.
553 	 */
554 	uint32_t i;
555 	int ret;
556 	uint32_t count = rte_service_get_count();
557 
558 	int32_t lcore_iter = 0;
559 	uint32_t ids[RTE_MAX_LCORE] = {0};
560 	int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
561 
562 	if (lcore_count == 0)
563 		return -ENOTSUP;
564 
565 	for (i = 0; (int)i < lcore_count; i++)
566 		rte_service_lcore_start(ids[i]);
567 
568 	for (i = 0; i < count; i++) {
569 		/* do 1:1 core mapping here, with each service getting
570 		 * assigned a single core by default. Adding multiple services
571 		 * should multiplex to a single core, or 1:1 if there are the
572 		 * same amount of services as service-cores
573 		 */
574 		ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
575 		if (ret)
576 			return -ENODEV;
577 
578 		lcore_iter++;
579 		if (lcore_iter >= lcore_count)
580 			lcore_iter = 0;
581 
582 		ret = rte_service_runstate_set(i, 1);
583 		if (ret)
584 			return -ENOEXEC;
585 	}
586 
587 	return 0;
588 }
589 
590 static int32_t
591 service_update(uint32_t sid, uint32_t lcore, uint32_t *set, uint32_t *enabled)
592 {
593 	/* validate ID, or return error value */
594 	if (sid >= RTE_SERVICE_NUM_MAX || !service_valid(sid) ||
595 	    lcore >= RTE_MAX_LCORE || !lcore_states[lcore].is_service_core)
596 		return -EINVAL;
597 
598 	uint64_t sid_mask = UINT64_C(1) << sid;
599 	if (set) {
600 		uint64_t lcore_mapped = lcore_states[lcore].service_mask &
601 			sid_mask;
602 
603 		if (*set && !lcore_mapped) {
604 			lcore_states[lcore].service_mask |= sid_mask;
605 			__atomic_add_fetch(&rte_services[sid].num_mapped_cores,
606 				1, __ATOMIC_RELAXED);
607 		}
608 		if (!*set && lcore_mapped) {
609 			lcore_states[lcore].service_mask &= ~(sid_mask);
610 			__atomic_sub_fetch(&rte_services[sid].num_mapped_cores,
611 				1, __ATOMIC_RELAXED);
612 		}
613 	}
614 
615 	if (enabled)
616 		*enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
617 
618 	return 0;
619 }
620 
621 int32_t
622 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
623 {
624 	uint32_t on = enabled > 0;
625 	return service_update(id, lcore, &on, 0);
626 }
627 
628 int32_t
629 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
630 {
631 	uint32_t enabled;
632 	int ret = service_update(id, lcore, 0, &enabled);
633 	if (ret == 0)
634 		return enabled;
635 	return ret;
636 }
637 
638 static void
639 set_lcore_state(uint32_t lcore, int32_t state)
640 {
641 	/* mark core state in hugepage backed config */
642 	struct rte_config *cfg = rte_eal_get_configuration();
643 	cfg->lcore_role[lcore] = state;
644 
645 	/* mark state in process local lcore_config */
646 	lcore_config[lcore].core_role = state;
647 
648 	/* update per-lcore optimized state tracking */
649 	lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
650 }
651 
652 int32_t
653 rte_service_lcore_reset_all(void)
654 {
655 	/* loop over cores, reset all to mask 0 */
656 	uint32_t i;
657 	for (i = 0; i < RTE_MAX_LCORE; i++) {
658 		if (lcore_states[i].is_service_core) {
659 			lcore_states[i].service_mask = 0;
660 			set_lcore_state(i, ROLE_RTE);
661 			/* runstate act as guard variable Use
662 			 * store-release memory order here to synchronize
663 			 * with load-acquire in runstate read functions.
664 			 */
665 			__atomic_store_n(&lcore_states[i].runstate,
666 				RUNSTATE_STOPPED, __ATOMIC_RELEASE);
667 		}
668 	}
669 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
670 		__atomic_store_n(&rte_services[i].num_mapped_cores, 0,
671 			__ATOMIC_RELAXED);
672 
673 	return 0;
674 }
675 
676 int32_t
677 rte_service_lcore_add(uint32_t lcore)
678 {
679 	if (lcore >= RTE_MAX_LCORE)
680 		return -EINVAL;
681 	if (lcore_states[lcore].is_service_core)
682 		return -EALREADY;
683 
684 	set_lcore_state(lcore, ROLE_SERVICE);
685 
686 	/* ensure that after adding a core the mask and state are defaults */
687 	lcore_states[lcore].service_mask = 0;
688 	/* Use store-release memory order here to synchronize with
689 	 * load-acquire in runstate read functions.
690 	 */
691 	__atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
692 		__ATOMIC_RELEASE);
693 
694 	return rte_eal_wait_lcore(lcore);
695 }
696 
697 int32_t
698 rte_service_lcore_del(uint32_t lcore)
699 {
700 	if (lcore >= RTE_MAX_LCORE)
701 		return -EINVAL;
702 
703 	struct core_state *cs = &lcore_states[lcore];
704 	if (!cs->is_service_core)
705 		return -EINVAL;
706 
707 	/* runstate act as the guard variable. Use load-acquire
708 	 * memory order here to synchronize with store-release
709 	 * in runstate update functions.
710 	 */
711 	if (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) !=
712 			RUNSTATE_STOPPED)
713 		return -EBUSY;
714 
715 	set_lcore_state(lcore, ROLE_RTE);
716 
717 	rte_smp_wmb();
718 	return 0;
719 }
720 
721 int32_t
722 rte_service_lcore_start(uint32_t lcore)
723 {
724 	if (lcore >= RTE_MAX_LCORE)
725 		return -EINVAL;
726 
727 	struct core_state *cs = &lcore_states[lcore];
728 	if (!cs->is_service_core)
729 		return -EINVAL;
730 
731 	/* runstate act as the guard variable. Use load-acquire
732 	 * memory order here to synchronize with store-release
733 	 * in runstate update functions.
734 	 */
735 	if (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==
736 			RUNSTATE_RUNNING)
737 		return -EALREADY;
738 
739 	/* set core to run state first, and then launch otherwise it will
740 	 * return immediately as runstate keeps it in the service poll loop
741 	 */
742 	/* Use load-acquire memory order here to synchronize with
743 	 * store-release in runstate update functions.
744 	 */
745 	__atomic_store_n(&cs->runstate, RUNSTATE_RUNNING, __ATOMIC_RELEASE);
746 
747 	int ret = rte_eal_remote_launch(service_runner_func, 0, lcore);
748 	/* returns -EBUSY if the core is already launched, 0 on success */
749 	return ret;
750 }
751 
752 int32_t
753 rte_service_lcore_stop(uint32_t lcore)
754 {
755 	if (lcore >= RTE_MAX_LCORE)
756 		return -EINVAL;
757 
758 	/* runstate act as the guard variable. Use load-acquire
759 	 * memory order here to synchronize with store-release
760 	 * in runstate update functions.
761 	 */
762 	if (__atomic_load_n(&lcore_states[lcore].runstate, __ATOMIC_ACQUIRE) ==
763 			RUNSTATE_STOPPED)
764 		return -EALREADY;
765 
766 	uint32_t i;
767 	uint64_t service_mask = lcore_states[lcore].service_mask;
768 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
769 		int32_t enabled = service_mask & (UINT64_C(1) << i);
770 		int32_t service_running = rte_service_runstate_get(i);
771 		int32_t only_core = (1 ==
772 			__atomic_load_n(&rte_services[i].num_mapped_cores,
773 				__ATOMIC_RELAXED));
774 
775 		/* if the core is mapped, and the service is running, and this
776 		 * is the only core that is mapped, the service would cease to
777 		 * run if this core stopped, so fail instead.
778 		 */
779 		if (enabled && service_running && only_core)
780 			return -EBUSY;
781 	}
782 
783 	/* Use store-release memory order here to synchronize with
784 	 * load-acquire in runstate read functions.
785 	 */
786 	__atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
787 		__ATOMIC_RELEASE);
788 
789 	return 0;
790 }
791 
792 int32_t
793 rte_service_attr_get(uint32_t id, uint32_t attr_id, uint64_t *attr_value)
794 {
795 	struct rte_service_spec_impl *s;
796 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
797 
798 	if (!attr_value)
799 		return -EINVAL;
800 
801 	switch (attr_id) {
802 	case RTE_SERVICE_ATTR_CYCLES:
803 		*attr_value = s->cycles_spent;
804 		return 0;
805 	case RTE_SERVICE_ATTR_CALL_COUNT:
806 		*attr_value = s->calls;
807 		return 0;
808 	default:
809 		return -EINVAL;
810 	}
811 }
812 
813 int32_t
814 rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
815 			   uint64_t *attr_value)
816 {
817 	struct core_state *cs;
818 
819 	if (lcore >= RTE_MAX_LCORE || !attr_value)
820 		return -EINVAL;
821 
822 	cs = &lcore_states[lcore];
823 	if (!cs->is_service_core)
824 		return -ENOTSUP;
825 
826 	switch (attr_id) {
827 	case RTE_SERVICE_LCORE_ATTR_LOOPS:
828 		*attr_value = cs->loops;
829 		return 0;
830 	default:
831 		return -EINVAL;
832 	}
833 }
834 
835 int32_t
836 rte_service_attr_reset_all(uint32_t id)
837 {
838 	struct rte_service_spec_impl *s;
839 	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
840 
841 	s->cycles_spent = 0;
842 	s->calls = 0;
843 	return 0;
844 }
845 
846 int32_t
847 rte_service_lcore_attr_reset_all(uint32_t lcore)
848 {
849 	struct core_state *cs;
850 
851 	if (lcore >= RTE_MAX_LCORE)
852 		return -EINVAL;
853 
854 	cs = &lcore_states[lcore];
855 	if (!cs->is_service_core)
856 		return -ENOTSUP;
857 
858 	cs->loops = 0;
859 
860 	return 0;
861 }
862 
863 static void
864 service_dump_one(FILE *f, struct rte_service_spec_impl *s)
865 {
866 	/* avoid divide by zero */
867 	int calls = 1;
868 
869 	if (s->calls != 0)
870 		calls = s->calls;
871 	fprintf(f, "  %s: stats %d\tcalls %"PRIu64"\tcycles %"
872 			PRIu64"\tavg: %"PRIu64"\n",
873 			s->spec.name, service_stats_enabled(s), s->calls,
874 			s->cycles_spent, s->cycles_spent / calls);
875 }
876 
877 static void
878 service_dump_calls_per_lcore(FILE *f, uint32_t lcore)
879 {
880 	uint32_t i;
881 	struct core_state *cs = &lcore_states[lcore];
882 
883 	fprintf(f, "%02d\t", lcore);
884 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
885 		if (!service_valid(i))
886 			continue;
887 		fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
888 	}
889 	fprintf(f, "\n");
890 }
891 
892 int32_t
893 rte_service_dump(FILE *f, uint32_t id)
894 {
895 	uint32_t i;
896 	int print_one = (id != UINT32_MAX);
897 
898 	/* print only the specified service */
899 	if (print_one) {
900 		struct rte_service_spec_impl *s;
901 		SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
902 		fprintf(f, "Service %s Summary\n", s->spec.name);
903 		service_dump_one(f, s);
904 		return 0;
905 	}
906 
907 	/* print all services, as UINT32_MAX was passed as id */
908 	fprintf(f, "Services Summary\n");
909 	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
910 		if (!service_valid(i))
911 			continue;
912 		service_dump_one(f, &rte_services[i]);
913 	}
914 
915 	fprintf(f, "Service Cores Summary\n");
916 	for (i = 0; i < RTE_MAX_LCORE; i++) {
917 		if (lcore_config[i].core_role != ROLE_SERVICE)
918 			continue;
919 
920 		service_dump_calls_per_lcore(f, i);
921 	}
922 
923 	return 0;
924 }
925