1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2019 Arm Limited 3 */ 4 5 #include <stdio.h> 6 #include <stdint.h> 7 #include <inttypes.h> 8 #include <string.h> 9 #include <unistd.h> 10 #include <sys/queue.h> 11 12 #include <rte_common.h> 13 #include <rte_memory.h> 14 #include <rte_per_lcore.h> 15 #include <rte_launch.h> 16 #include <rte_eal.h> 17 #include <rte_lcore.h> 18 #include <rte_cycles.h> 19 #include <rte_mcslock.h> 20 #include <rte_atomic.h> 21 22 #include "test.h" 23 24 /* 25 * RTE MCS lock test 26 * ================= 27 * 28 * These tests are derived from spin lock test cases. 29 * 30 * - The functional test takes all of these locks and launches the 31 * ''test_mcslock_per_core()'' function on each core (except the master). 32 * 33 * - The function takes the global lock, display something, then releases 34 * the global lock on each core. 35 * 36 * - A load test is carried out, with all cores attempting to lock a single 37 * lock multiple times. 38 */ 39 40 RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_me); 41 RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_try_me); 42 RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_perf_me); 43 44 rte_mcslock_t *p_ml; 45 rte_mcslock_t *p_ml_try; 46 rte_mcslock_t *p_ml_perf; 47 48 static unsigned int count; 49 50 static rte_atomic32_t synchro; 51 52 static int 53 test_mcslock_per_core(__attribute__((unused)) void *arg) 54 { 55 /* Per core me node. */ 56 rte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me); 57 58 rte_mcslock_lock(&p_ml, &ml_me); 59 printf("MCS lock taken on core %u\n", rte_lcore_id()); 60 rte_mcslock_unlock(&p_ml, &ml_me); 61 printf("MCS lock released on core %u\n", rte_lcore_id()); 62 63 return 0; 64 } 65 66 static uint64_t time_count[RTE_MAX_LCORE] = {0}; 67 68 #define MAX_LOOP 1000000 69 70 static int 71 load_loop_fn(void *func_param) 72 { 73 uint64_t time_diff = 0, begin; 74 uint64_t hz = rte_get_timer_hz(); 75 volatile uint64_t lcount = 0; 76 const int use_lock = *(int *)func_param; 77 const unsigned int lcore = rte_lcore_id(); 78 79 /**< Per core me node. */ 80 rte_mcslock_t ml_perf_me = RTE_PER_LCORE(_ml_perf_me); 81 82 /* wait synchro */ 83 while (rte_atomic32_read(&synchro) == 0) 84 ; 85 86 begin = rte_get_timer_cycles(); 87 while (lcount < MAX_LOOP) { 88 if (use_lock) 89 rte_mcslock_lock(&p_ml_perf, &ml_perf_me); 90 91 lcount++; 92 if (use_lock) 93 rte_mcslock_unlock(&p_ml_perf, &ml_perf_me); 94 } 95 time_diff = rte_get_timer_cycles() - begin; 96 time_count[lcore] = time_diff * 1000000 / hz; 97 return 0; 98 } 99 100 static int 101 test_mcslock_perf(void) 102 { 103 unsigned int i; 104 uint64_t total = 0; 105 int lock = 0; 106 const unsigned int lcore = rte_lcore_id(); 107 108 printf("\nTest with no lock on single core...\n"); 109 rte_atomic32_set(&synchro, 1); 110 load_loop_fn(&lock); 111 printf("Core [%u] Cost Time = %"PRIu64" us\n", 112 lcore, time_count[lcore]); 113 memset(time_count, 0, sizeof(time_count)); 114 115 printf("\nTest with lock on single core...\n"); 116 lock = 1; 117 rte_atomic32_set(&synchro, 1); 118 load_loop_fn(&lock); 119 printf("Core [%u] Cost Time = %"PRIu64" us\n", 120 lcore, time_count[lcore]); 121 memset(time_count, 0, sizeof(time_count)); 122 123 printf("\nTest with lock on %u cores...\n", (rte_lcore_count())); 124 125 rte_atomic32_set(&synchro, 0); 126 rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MASTER); 127 128 /* start synchro and launch test on master */ 129 rte_atomic32_set(&synchro, 1); 130 load_loop_fn(&lock); 131 132 rte_eal_mp_wait_lcore(); 133 134 RTE_LCORE_FOREACH(i) { 135 printf("Core [%u] Cost Time = %"PRIu64" us\n", 136 i, time_count[i]); 137 total += time_count[i]; 138 } 139 140 printf("Total Cost Time = %"PRIu64" us\n", total); 141 142 return 0; 143 } 144 145 /* 146 * Use rte_mcslock_trylock() to trylock a mcs lock object, 147 * If it could not lock the object successfully, it would 148 * return immediately. 149 */ 150 static int 151 test_mcslock_try(__attribute__((unused)) void *arg) 152 { 153 /**< Per core me node. */ 154 rte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me); 155 rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me); 156 157 /* Locked ml_try in the master lcore, so it should fail 158 * when trying to lock it in the slave lcore. 159 */ 160 if (rte_mcslock_trylock(&p_ml_try, &ml_try_me) == 0) { 161 rte_mcslock_lock(&p_ml, &ml_me); 162 count++; 163 rte_mcslock_unlock(&p_ml, &ml_me); 164 } 165 166 return 0; 167 } 168 169 170 /* 171 * Test rte_eal_get_lcore_state() in addition to mcs locks 172 * as we have "waiting" then "running" lcores. 173 */ 174 static int 175 test_mcslock(void) 176 { 177 int ret = 0; 178 int i; 179 180 /* Define per core me node. */ 181 rte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me); 182 rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me); 183 184 /* 185 * Test mcs lock & unlock on each core 186 */ 187 188 /* slave cores should be waiting: print it */ 189 RTE_LCORE_FOREACH_SLAVE(i) { 190 printf("lcore %d state: %d\n", i, 191 (int) rte_eal_get_lcore_state(i)); 192 } 193 194 rte_mcslock_lock(&p_ml, &ml_me); 195 196 RTE_LCORE_FOREACH_SLAVE(i) { 197 rte_eal_remote_launch(test_mcslock_per_core, NULL, i); 198 } 199 200 /* slave cores should be busy: print it */ 201 RTE_LCORE_FOREACH_SLAVE(i) { 202 printf("lcore %d state: %d\n", i, 203 (int) rte_eal_get_lcore_state(i)); 204 } 205 206 rte_mcslock_unlock(&p_ml, &ml_me); 207 208 rte_eal_mp_wait_lcore(); 209 210 /* 211 * Test if it could return immediately from try-locking a locked object. 212 * Here it will lock the mcs lock object first, then launch all the 213 * slave lcores to trylock the same mcs lock object. 214 * All the slave lcores should give up try-locking a locked object and 215 * return immediately, and then increase the "count" initialized with 216 * zero by one per times. 217 * We can check if the "count" is finally equal to the number of all 218 * slave lcores to see if the behavior of try-locking a locked 219 * mcslock object is correct. 220 */ 221 if (rte_mcslock_trylock(&p_ml_try, &ml_try_me) == 0) 222 return -1; 223 224 count = 0; 225 RTE_LCORE_FOREACH_SLAVE(i) { 226 rte_eal_remote_launch(test_mcslock_try, NULL, i); 227 } 228 rte_eal_mp_wait_lcore(); 229 rte_mcslock_unlock(&p_ml_try, &ml_try_me); 230 231 /* Test is_locked API */ 232 if (rte_mcslock_is_locked(p_ml)) { 233 printf("mcslock is locked but it should not be\n"); 234 return -1; 235 } 236 237 /* Counting the locked times in each core */ 238 rte_mcslock_lock(&p_ml, &ml_me); 239 if (count != (rte_lcore_count() - 1)) 240 ret = -1; 241 rte_mcslock_unlock(&p_ml, &ml_me); 242 243 /* mcs lock perf test */ 244 if (test_mcslock_perf() < 0) 245 return -1; 246 247 return ret; 248 } 249 250 REGISTER_TEST_COMMAND(mcslock_autotest, test_mcslock); 251