xref: /spdk/lib/env_ocf/ocf_env.c (revision 510f4c134a21b45ff3a5add9ebc6c6cf7e49aeab)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 #include "ocf/ocf_def.h"
6 #include "ocf_env.h"
7 
8 #include "spdk/crc32.h"
9 #include "spdk/env.h"
10 #include "spdk/log.h"
11 
12 /* Number of buffers for mempool
13  * Need to be power of two - 1 for better memory utilization
14  * It depends on memory usage of OCF which
15  * in itself depends on the workload
16  * It is a big number because OCF uses allocators
17  * for every request it sends and receives
18  *
19  * The value of 16383 is tested to work on 24 caches
20  * running IO of io_size=512 and io_depth=512, which
21  * should be more than enough for any real life scenario.
22  * Increase this value if needed. It will result in
23  * more memory being used initially on SPDK app start,
24  * when compiled with OCF support.
25  */
26 #define ENV_ALLOCATOR_NBUFS 16383
27 
28 #define GET_ELEMENTS_COUNT(_limit) (_limit < 0 ? ENV_ALLOCATOR_NBUFS : _limit)
29 
30 /* Use unique index for env allocators */
31 static env_atomic g_env_allocator_index = 0;
32 
33 void *
34 env_allocator_new(env_allocator *allocator)
35 {
36 	void *mem = spdk_mempool_get(allocator->mempool);
37 
38 	if (spdk_unlikely(!mem)) {
39 		return NULL;
40 	}
41 
42 	if (allocator->zero) {
43 		memset(mem, 0, allocator->element_size);
44 	}
45 
46 	return mem;
47 }
48 
49 env_allocator *
50 env_allocator_create(uint32_t size, const char *name, bool zero)
51 {
52 	return env_allocator_create_extended(size, name, -1, zero);
53 }
54 
55 env_allocator *
56 env_allocator_create_extended(uint32_t size, const char *name, int limit, bool zero)
57 {
58 	env_allocator *allocator;
59 	char qualified_name[OCF_ALLOCATOR_NAME_MAX] = {0};
60 
61 	snprintf(qualified_name, OCF_ALLOCATOR_NAME_MAX, "ocf_env_%d:%s",
62 		 env_atomic_inc_return(&g_env_allocator_index), name);
63 
64 	allocator = calloc(1, sizeof(*allocator));
65 	if (!allocator) {
66 		return NULL;
67 	}
68 
69 	allocator->mempool = spdk_mempool_create(qualified_name,
70 			     GET_ELEMENTS_COUNT(limit), size,
71 			     SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
72 			     SPDK_ENV_SOCKET_ID_ANY);
73 
74 	if (!allocator->mempool) {
75 		SPDK_ERRLOG("mempool creation failed\n");
76 		free(allocator);
77 		return NULL;
78 	}
79 
80 	allocator->element_size = size;
81 	allocator->element_count = GET_ELEMENTS_COUNT(limit);
82 	allocator->zero = zero;
83 
84 	return allocator;
85 }
86 
87 void
88 env_allocator_del(env_allocator *allocator, void *item)
89 {
90 	spdk_mempool_put(allocator->mempool, item);
91 }
92 
93 void
94 env_allocator_destroy(env_allocator *allocator)
95 {
96 	if (allocator) {
97 		if (allocator->element_count - spdk_mempool_count(allocator->mempool)) {
98 			SPDK_ERRLOG("Not all objects deallocated\n");
99 			assert(false);
100 		}
101 
102 		spdk_mempool_free(allocator->mempool);
103 		free(allocator);
104 	}
105 }
106 /* *** CRC *** */
107 
108 uint32_t
109 env_crc32(uint32_t crc, uint8_t const *message, size_t len)
110 {
111 	return spdk_crc32_ieee_update(message, len, crc);
112 }
113 
114 /* EXECUTION CONTEXTS */
115 pthread_mutex_t *exec_context_mutex;
116 
117 static void
118 __attribute__((constructor)) init_execution_context(void)
119 {
120 	unsigned count = env_get_execution_context_count();
121 	unsigned i;
122 
123 	ENV_BUG_ON(count == 0);
124 	exec_context_mutex = malloc(count * sizeof(exec_context_mutex[0]));
125 	ENV_BUG_ON(exec_context_mutex == NULL);
126 	for (i = 0; i < count; i++) {
127 		ENV_BUG_ON(pthread_mutex_init(&exec_context_mutex[i], NULL));
128 	}
129 }
130 
131 static void
132 __attribute__((destructor)) deinit_execution_context(void)
133 {
134 	unsigned count = env_get_execution_context_count();
135 	unsigned i;
136 
137 	ENV_BUG_ON(count == 0);
138 	ENV_BUG_ON(exec_context_mutex == NULL);
139 
140 	for (i = 0; i < count; i++) {
141 		ENV_BUG_ON(pthread_mutex_destroy(&exec_context_mutex[i]));
142 	}
143 	free(exec_context_mutex);
144 }
145 
146 /* get_execution_context must assure that after the call finishes, the caller
147  * will not get preempted from current execution context. For userspace env
148  * we simulate this behavior by acquiring per execution context mutex. As a
149  * result the caller might actually get preempted, but no other thread will
150  * execute in this context by the time the caller puts current execution ctx. */
151 unsigned
152 env_get_execution_context(void)
153 {
154 	unsigned cpu;
155 
156 	cpu = sched_getcpu();
157 	cpu = (cpu == -1) ?  0 : cpu;
158 
159 	ENV_BUG_ON(pthread_mutex_lock(&exec_context_mutex[cpu]));
160 
161 	return cpu;
162 }
163 
164 void
165 env_put_execution_context(unsigned ctx)
166 {
167 	pthread_mutex_unlock(&exec_context_mutex[ctx]);
168 }
169 
170 unsigned
171 env_get_execution_context_count(void)
172 {
173 	int num = sysconf(_SC_NPROCESSORS_ONLN);
174 
175 	return (num == -1) ? 0 : num;
176 }
177