1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "ocf/ocf_def.h" 7 #include "ocf_env.h" 8 9 #include "spdk/crc32.h" 10 #include "spdk/env.h" 11 #include "spdk/log.h" 12 13 /* Number of buffers for mempool 14 * Need to be power of two - 1 for better memory utilization 15 * It depends on memory usage of OCF which 16 * in itself depends on the workload 17 * It is a big number because OCF uses allocators 18 * for every request it sends and receives 19 * 20 * The value of 16383 is tested to work on 24 caches 21 * running IO of io_size=512 and io_depth=512, which 22 * should be more than enough for any real life scenario. 23 * Increase this value if needed. It will result in 24 * more memory being used initially on SPDK app start, 25 * when compiled with OCF support. 26 */ 27 #define ENV_ALLOCATOR_NBUFS 16383 28 29 #define GET_ELEMENTS_COUNT(_limit) (_limit < 0 ? ENV_ALLOCATOR_NBUFS : _limit) 30 31 /* Use unique index for env allocators */ 32 static env_atomic g_env_allocator_index = 0; 33 34 void * 35 env_allocator_new(env_allocator *allocator) 36 { 37 void *mem = spdk_mempool_get(allocator->mempool); 38 39 if (spdk_unlikely(!mem)) { 40 return NULL; 41 } 42 43 if (allocator->zero) { 44 memset(mem, 0, allocator->element_size); 45 } 46 47 return mem; 48 } 49 50 env_allocator * 51 env_allocator_create(uint32_t size, const char *name, bool zero) 52 { 53 return env_allocator_create_extended(size, name, -1, zero); 54 } 55 56 env_allocator * 57 env_allocator_create_extended(uint32_t size, const char *name, int limit, bool zero) 58 { 59 env_allocator *allocator; 60 char qualified_name[OCF_ALLOCATOR_NAME_MAX] = {0}; 61 62 snprintf(qualified_name, OCF_ALLOCATOR_NAME_MAX, "ocf_env_%d:%s", 63 env_atomic_inc_return(&g_env_allocator_index), name); 64 65 allocator = env_zalloc(sizeof(*allocator), ENV_MEM_NOIO); 66 if (!allocator) { 67 return NULL; 68 } 69 70 allocator->mempool = spdk_mempool_create(qualified_name, 71 GET_ELEMENTS_COUNT(limit), size, 72 SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, 73 SPDK_ENV_SOCKET_ID_ANY); 74 75 if (!allocator->mempool) { 76 SPDK_ERRLOG("mempool creation failed\n"); 77 free(allocator); 78 return NULL; 79 } 80 81 allocator->element_size = size; 82 allocator->element_count = GET_ELEMENTS_COUNT(limit); 83 allocator->zero = zero; 84 85 return allocator; 86 } 87 88 void 89 env_allocator_del(env_allocator *allocator, void *item) 90 { 91 spdk_mempool_put(allocator->mempool, item); 92 } 93 94 void 95 env_allocator_destroy(env_allocator *allocator) 96 { 97 if (allocator) { 98 if (allocator->element_count - spdk_mempool_count(allocator->mempool)) { 99 SPDK_ERRLOG("Not all objects deallocated\n"); 100 assert(false); 101 } 102 103 spdk_mempool_free(allocator->mempool); 104 env_free(allocator); 105 } 106 } 107 /* *** CRC *** */ 108 109 uint32_t 110 env_crc32(uint32_t crc, uint8_t const *message, size_t len) 111 { 112 return spdk_crc32_ieee_update(message, len, crc); 113 } 114 115 /* EXECUTION CONTEXTS */ 116 pthread_mutex_t *exec_context_mutex; 117 118 static void 119 __attribute__((constructor)) init_execution_context(void) 120 { 121 unsigned count = env_get_execution_context_count(); 122 unsigned i; 123 124 ENV_BUG_ON(count == 0); 125 exec_context_mutex = malloc(count * sizeof(exec_context_mutex[0])); 126 ENV_BUG_ON(exec_context_mutex == NULL); 127 for (i = 0; i < count; i++) { 128 ENV_BUG_ON(pthread_mutex_init(&exec_context_mutex[i], NULL)); 129 } 130 } 131 132 static void 133 __attribute__((destructor)) deinit_execution_context(void) 134 { 135 unsigned count = env_get_execution_context_count(); 136 unsigned i; 137 138 ENV_BUG_ON(count == 0); 139 ENV_BUG_ON(exec_context_mutex == NULL); 140 141 for (i = 0; i < count; i++) { 142 ENV_BUG_ON(pthread_mutex_destroy(&exec_context_mutex[i])); 143 } 144 free(exec_context_mutex); 145 } 146 147 /* get_execution_context must assure that after the call finishes, the caller 148 * will not get preempted from current execution context. For userspace env 149 * we simulate this behavior by acquiring per execution context mutex. As a 150 * result the caller might actually get preempted, but no other thread will 151 * execute in this context by the time the caller puts current execution ctx. */ 152 unsigned 153 env_get_execution_context(void) 154 { 155 unsigned cpu; 156 157 cpu = sched_getcpu(); 158 cpu = (cpu == -1) ? 0 : cpu; 159 160 ENV_BUG_ON(pthread_mutex_lock(&exec_context_mutex[cpu])); 161 162 return cpu; 163 } 164 165 void 166 env_put_execution_context(unsigned ctx) 167 { 168 pthread_mutex_unlock(&exec_context_mutex[ctx]); 169 } 170 171 unsigned 172 env_get_execution_context_count(void) 173 { 174 int num = sysconf(_SC_NPROCESSORS_ONLN); 175 176 return (num == -1) ? 0 : num; 177 } 178