xref: /dpdk/examples/l2fwd-keepalive/shm.c (revision 13830b98b2bcf6a8a1aa8edde5132907c053861c)
13998e2a0SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
23998e2a0SBruce Richardson  * Copyright(c) 2016 Intel Corporation
37b2a704cSRemy Horton  */
47b2a704cSRemy Horton 
57b2a704cSRemy Horton #include <time.h>
67b2a704cSRemy Horton 
77b2a704cSRemy Horton #include <rte_common.h>
87b2a704cSRemy Horton #include <rte_keepalive.h>
97b2a704cSRemy Horton 
107b2a704cSRemy Horton #include "shm.h"
117b2a704cSRemy Horton 
rte_keepalive_shm_create(void)127b2a704cSRemy Horton struct rte_keepalive_shm *rte_keepalive_shm_create(void)
137b2a704cSRemy Horton {
147b2a704cSRemy Horton 	int fd;
157b2a704cSRemy Horton 	int idx_core;
167b2a704cSRemy Horton 	struct rte_keepalive_shm *ka_shm;
177b2a704cSRemy Horton 
187b2a704cSRemy Horton 	/* If any existing object is not unlinked, it makes it all too easy
197b2a704cSRemy Horton 	 * for clients to end up with stale shared memory blocks when
207b2a704cSRemy Horton 	 * restarted. Unlinking makes sure subsequent shm_open by clients
217b2a704cSRemy Horton 	 * will get the new block mapped below.
227b2a704cSRemy Horton 	 */
237b2a704cSRemy Horton 	if (shm_unlink(RTE_KEEPALIVE_SHM_NAME) == -1 && errno != ENOENT)
24*13830b98SStephen Hemminger 		RTE_LOG(NOTICE, L2FWD,
25*13830b98SStephen Hemminger 			"Warning: Error unlinking stale %s (%s)\n",
267b2a704cSRemy Horton 			RTE_KEEPALIVE_SHM_NAME, strerror(errno));
277b2a704cSRemy Horton 
287b2a704cSRemy Horton 	fd = shm_open(RTE_KEEPALIVE_SHM_NAME,
297b2a704cSRemy Horton 		O_CREAT | O_TRUNC | O_RDWR, 0666);
307b2a704cSRemy Horton 	if (fd < 0)
31*13830b98SStephen Hemminger 		RTE_LOG(ERR, L2FWD, "Failed to open %s as SHM (%s)\n",
32*13830b98SStephen Hemminger 			RTE_KEEPALIVE_SHM_NAME, strerror(errno));
337b2a704cSRemy Horton 	else if (ftruncate(fd, sizeof(struct rte_keepalive_shm)) != 0)
34*13830b98SStephen Hemminger 		RTE_LOG(ERR, L2FWD, "Failed to resize SHM (%s)\n", strerror(errno));
357b2a704cSRemy Horton 	else {
367b2a704cSRemy Horton 		ka_shm = (struct rte_keepalive_shm *) mmap(
377b2a704cSRemy Horton 			0, sizeof(struct rte_keepalive_shm),
387b2a704cSRemy Horton 			PROT_READ | PROT_WRITE,	MAP_SHARED, fd, 0);
397b2a704cSRemy Horton 		close(fd);
407b2a704cSRemy Horton 		if (ka_shm == MAP_FAILED)
41*13830b98SStephen Hemminger 			RTE_LOG(ERR, L2FWD, "Failed to mmap SHM (%s)\n", strerror(errno));
427b2a704cSRemy Horton 		else {
437b2a704cSRemy Horton 			memset(ka_shm, 0, sizeof(struct rte_keepalive_shm));
447b2a704cSRemy Horton 
457b2a704cSRemy Horton 			/* Initialize the semaphores for IPC/SHM use */
467b2a704cSRemy Horton 			if (sem_init(&ka_shm->core_died, 1, 0) != 0) {
47*13830b98SStephen Hemminger 				RTE_LOG(ERR, L2FWD, "Failed to setup SHM semaphore (%s)\n",
487b2a704cSRemy Horton 					strerror(errno));
499f2bb5ceSRemy Horton 				munmap(ka_shm,
509f2bb5ceSRemy Horton 					sizeof(struct rte_keepalive_shm));
517b2a704cSRemy Horton 				return NULL;
527b2a704cSRemy Horton 			}
537b2a704cSRemy Horton 
547b2a704cSRemy Horton 			/* Set all cores to 'not present' */
557b2a704cSRemy Horton 			for (idx_core = 0;
567b2a704cSRemy Horton 					idx_core < RTE_KEEPALIVE_MAXCORES;
577b2a704cSRemy Horton 					idx_core++) {
587b2a704cSRemy Horton 				ka_shm->core_state[idx_core] =
597b2a704cSRemy Horton 					RTE_KA_STATE_UNUSED;
607b2a704cSRemy Horton 				ka_shm->core_last_seen_times[idx_core] = 0;
617b2a704cSRemy Horton 			}
627b2a704cSRemy Horton 
637b2a704cSRemy Horton 			return ka_shm;
647b2a704cSRemy Horton 		}
657b2a704cSRemy Horton 	}
667b2a704cSRemy Horton return NULL;
677b2a704cSRemy Horton }
687b2a704cSRemy Horton 
rte_keepalive_relayed_state(struct rte_keepalive_shm * shm,const int id_core,const enum rte_keepalive_state core_state,__rte_unused uint64_t last_alive)697b2a704cSRemy Horton void rte_keepalive_relayed_state(struct rte_keepalive_shm *shm,
707b2a704cSRemy Horton 	const int id_core, const enum rte_keepalive_state core_state,
717b2a704cSRemy Horton 	__rte_unused uint64_t last_alive)
727b2a704cSRemy Horton {
737b2a704cSRemy Horton 	int count;
747b2a704cSRemy Horton 
757b2a704cSRemy Horton 	shm->core_state[id_core] = core_state;
767b2a704cSRemy Horton 	shm->core_last_seen_times[id_core] = last_alive;
777b2a704cSRemy Horton 
787b2a704cSRemy Horton 	if (core_state == RTE_KEEPALIVE_SHM_DEAD) {
797b2a704cSRemy Horton 		/* Since core has died, also signal ka_agent.
807b2a704cSRemy Horton 		 *
817b2a704cSRemy Horton 		 * Limit number of times semaphore can be incremented, in case
827b2a704cSRemy Horton 		 * ka_agent is not active.
837b2a704cSRemy Horton 		 */
847b2a704cSRemy Horton 		if (sem_getvalue(&shm->core_died, &count) == -1) {
85*13830b98SStephen Hemminger 			RTE_LOG(ERR, L2FWD, "Semaphore check failed(%s)\n",
867b2a704cSRemy Horton 				strerror(errno));
877b2a704cSRemy Horton 			return;
887b2a704cSRemy Horton 		}
897b2a704cSRemy Horton 		if (count > 1)
907b2a704cSRemy Horton 			return;
917b2a704cSRemy Horton 
927b2a704cSRemy Horton 		if (sem_post(&shm->core_died) != 0)
93*13830b98SStephen Hemminger 			RTE_LOG(ERR, L2FWD, "Failed to increment semaphore (%s)\n",
947b2a704cSRemy Horton 				strerror(errno));
957b2a704cSRemy Horton 	}
967b2a704cSRemy Horton }
9793543923SRemy Horton 
rte_keepalive_shm_cleanup(struct rte_keepalive_shm * ka_shm)9893543923SRemy Horton void rte_keepalive_shm_cleanup(struct rte_keepalive_shm *ka_shm)
9993543923SRemy Horton {
10093543923SRemy Horton 	if (shm_unlink(RTE_KEEPALIVE_SHM_NAME) == -1 && errno != ENOENT)
101*13830b98SStephen Hemminger 		RTE_LOG(NOTICE, L2FWD, "Warning: Error unlinking  %s (%s)\n",
10293543923SRemy Horton 			RTE_KEEPALIVE_SHM_NAME, strerror(errno));
10393543923SRemy Horton 
10493543923SRemy Horton 	if (ka_shm && munmap(ka_shm, sizeof(struct rte_keepalive_shm)) != 0)
105*13830b98SStephen Hemminger 		RTE_LOG(ERR, L2FWD, "Warning: munmap() failed: %s\n",
106*13830b98SStephen Hemminger 			strerror(errno));
10793543923SRemy Horton }
108