1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2019 Arm Limited 3 * Copyright(c) 2024 Ericsson AB 4 */ 5 6 #include <inttypes.h> 7 #include <stdbool.h> 8 9 #include <rte_bitops.h> 10 #include <rte_cycles.h> 11 #include <rte_launch.h> 12 #include <rte_lcore.h> 13 #include <rte_random.h> 14 #include "test.h" 15 16 static unsigned int 17 get_worker_lcore(void) 18 { 19 unsigned int lcore_id = rte_get_next_lcore(-1, 1, 0); 20 21 /* avoid checkers (like Coverity) false positives */ 22 RTE_VERIFY(lcore_id < RTE_MAX_LCORE); 23 24 return lcore_id; 25 } 26 27 #define GEN_TEST_BIT_ACCESS(test_name, set_fun, clear_fun, assign_fun, flip_fun, test_fun, size, \ 28 mod) \ 29 static int \ 30 test_name(void) \ 31 { \ 32 uint ## size ## _t reference = (uint ## size ## _t)rte_rand(); \ 33 unsigned int bit_nr; \ 34 mod uint ## size ## _t word = (uint ## size ## _t)rte_rand(); \ 35 for (bit_nr = 0; bit_nr < size; bit_nr++) { \ 36 bool reference_bit = (reference >> bit_nr) & 1; \ 37 bool assign = rte_rand() & 1; \ 38 if (assign) { \ 39 assign_fun(&word, bit_nr, reference_bit); \ 40 } else { \ 41 if (reference_bit) \ 42 set_fun(&word, bit_nr); \ 43 else \ 44 clear_fun(&word, bit_nr); \ 45 } \ 46 TEST_ASSERT(test_fun(&word, bit_nr) == reference_bit, \ 47 "Bit %d had unexpected value", bit_nr); \ 48 flip_fun(&word, bit_nr); \ 49 TEST_ASSERT(test_fun(&word, bit_nr) != reference_bit, \ 50 "Bit %d had unflipped value", bit_nr); \ 51 flip_fun(&word, bit_nr); \ 52 const mod uint ## size ## _t *const_ptr = &word; \ 53 TEST_ASSERT(test_fun(const_ptr, bit_nr) == reference_bit, \ 54 "Bit %d had unexpected value", bit_nr); \ 55 } \ 56 for (bit_nr = 0; bit_nr < size; bit_nr++) { \ 57 bool reference_bit = (reference >> bit_nr) & 1; \ 58 TEST_ASSERT(test_fun(&word, bit_nr) == reference_bit, \ 59 "Bit %d had unexpected value", bit_nr); \ 60 } \ 61 TEST_ASSERT(reference == word, "Word had unexpected value"); \ 62 return TEST_SUCCESS; \ 63 } 64 65 GEN_TEST_BIT_ACCESS(test_bit_access32, rte_bit_set, rte_bit_clear, rte_bit_assign, rte_bit_flip, 66 rte_bit_test, 32,) 67 68 GEN_TEST_BIT_ACCESS(test_bit_access64, rte_bit_set, rte_bit_clear, rte_bit_assign, rte_bit_flip, 69 rte_bit_test, 64,) 70 71 GEN_TEST_BIT_ACCESS(test_bit_v_access32, rte_bit_set, rte_bit_clear, rte_bit_assign, rte_bit_flip, 72 rte_bit_test, 32, volatile) 73 74 GEN_TEST_BIT_ACCESS(test_bit_v_access64, rte_bit_set, rte_bit_clear, rte_bit_assign, rte_bit_flip, 75 rte_bit_test, 64, volatile) 76 77 #define bit_atomic_set(addr, nr) \ 78 rte_bit_atomic_set(addr, nr, rte_memory_order_relaxed) 79 80 #define bit_atomic_clear(addr, nr) \ 81 rte_bit_atomic_clear(addr, nr, rte_memory_order_relaxed) 82 83 #define bit_atomic_assign(addr, nr, value) \ 84 rte_bit_atomic_assign(addr, nr, value, rte_memory_order_relaxed) 85 86 #define bit_atomic_flip(addr, nr) \ 87 rte_bit_atomic_flip(addr, nr, rte_memory_order_relaxed) 88 89 #define bit_atomic_test(addr, nr) \ 90 rte_bit_atomic_test(addr, nr, rte_memory_order_relaxed) 91 92 GEN_TEST_BIT_ACCESS(test_bit_atomic_access32, bit_atomic_set, bit_atomic_clear, bit_atomic_assign, 93 bit_atomic_flip, bit_atomic_test, 32,) 94 95 GEN_TEST_BIT_ACCESS(test_bit_atomic_access64, bit_atomic_set, bit_atomic_clear, bit_atomic_assign, 96 bit_atomic_flip, bit_atomic_test, 64,) 97 98 GEN_TEST_BIT_ACCESS(test_bit_atomic_v_access32, bit_atomic_set, bit_atomic_clear, bit_atomic_assign, 99 bit_atomic_flip, bit_atomic_test, 32, volatile) 100 101 GEN_TEST_BIT_ACCESS(test_bit_atomic_v_access64, bit_atomic_set, bit_atomic_clear, bit_atomic_assign, 102 bit_atomic_flip, bit_atomic_test, 64, volatile) 103 104 #define PARALLEL_TEST_RUNTIME 0.25 105 106 #define GEN_TEST_BIT_PARALLEL_ASSIGN(size) \ 107 struct parallel_access_lcore ## size \ 108 { \ 109 unsigned int bit; \ 110 uint ## size ##_t *word; \ 111 bool failed; \ 112 }; \ 113 static int \ 114 run_parallel_assign ## size(void *arg) \ 115 { \ 116 struct parallel_access_lcore ## size *lcore = arg; \ 117 uint64_t deadline = rte_get_timer_cycles() + PARALLEL_TEST_RUNTIME * rte_get_timer_hz(); \ 118 bool value = false; \ 119 do { \ 120 bool new_value = rte_rand() & 1; \ 121 bool use_test_and_modify = rte_rand() & 1; \ 122 bool use_assign = rte_rand() & 1; \ 123 if (rte_bit_atomic_test(lcore->word, lcore->bit, \ 124 rte_memory_order_relaxed) != value) { \ 125 lcore->failed = true; \ 126 break; \ 127 } \ 128 if (use_test_and_modify) { \ 129 bool old_value; \ 130 if (use_assign) { \ 131 old_value = rte_bit_atomic_test_and_assign(lcore->word, \ 132 lcore->bit, new_value, rte_memory_order_relaxed); \ 133 } else { \ 134 old_value = new_value ? \ 135 rte_bit_atomic_test_and_set(lcore->word, lcore->bit, \ 136 rte_memory_order_relaxed) : \ 137 rte_bit_atomic_test_and_clear(lcore->word, lcore->bit, \ 138 rte_memory_order_relaxed); \ 139 } \ 140 if (old_value != value) { \ 141 lcore->failed = true; \ 142 break; \ 143 } \ 144 } else { \ 145 if (use_assign) { \ 146 rte_bit_atomic_assign(lcore->word, lcore->bit, new_value, \ 147 rte_memory_order_relaxed); \ 148 } else { \ 149 if (new_value) \ 150 rte_bit_atomic_set(lcore->word, lcore->bit, \ 151 rte_memory_order_relaxed); \ 152 else \ 153 rte_bit_atomic_clear(lcore->word, lcore->bit, \ 154 rte_memory_order_relaxed); \ 155 } \ 156 } \ 157 value = new_value; \ 158 } while (rte_get_timer_cycles() < deadline); \ 159 return 0; \ 160 } \ 161 static int \ 162 test_bit_atomic_parallel_assign ## size(void) \ 163 { \ 164 unsigned int worker_lcore_id; \ 165 uint ## size ## _t word = 0; \ 166 struct parallel_access_lcore ## size lmain = { .word = &word }; \ 167 struct parallel_access_lcore ## size lworker = { .word = &word }; \ 168 if (rte_lcore_count() < 2) { \ 169 printf("Need multiple cores to run parallel test.\n"); \ 170 return TEST_SKIPPED; \ 171 } \ 172 worker_lcore_id = get_worker_lcore(); \ 173 lmain.bit = rte_rand_max(size); \ 174 do { \ 175 lworker.bit = rte_rand_max(size); \ 176 } while (lworker.bit == lmain.bit); \ 177 int rc = rte_eal_remote_launch(run_parallel_assign ## size, &lworker, worker_lcore_id); \ 178 TEST_ASSERT(rc == 0, "Worker thread launch failed"); \ 179 run_parallel_assign ## size(&lmain); \ 180 rte_eal_mp_wait_lcore(); \ 181 TEST_ASSERT(!lmain.failed, "Main lcore atomic access failed"); \ 182 TEST_ASSERT(!lworker.failed, "Worker lcore atomic access failed"); \ 183 return TEST_SUCCESS; \ 184 } 185 186 GEN_TEST_BIT_PARALLEL_ASSIGN(32) 187 GEN_TEST_BIT_PARALLEL_ASSIGN(64) 188 189 #define GEN_TEST_BIT_PARALLEL_TEST_AND_MODIFY(size) \ 190 struct parallel_test_and_set_lcore ## size \ 191 { \ 192 uint ## size ##_t *word; \ 193 unsigned int bit; \ 194 uint64_t flips; \ 195 }; \ 196 static int \ 197 run_parallel_test_and_modify ## size(void *arg) \ 198 { \ 199 struct parallel_test_and_set_lcore ## size *lcore = arg; \ 200 uint64_t deadline = rte_get_timer_cycles() + PARALLEL_TEST_RUNTIME * rte_get_timer_hz(); \ 201 do { \ 202 bool old_value; \ 203 bool new_value = rte_rand() & 1; \ 204 bool use_assign = rte_rand() & 1; \ 205 if (use_assign) \ 206 old_value = rte_bit_atomic_test_and_assign(lcore->word, lcore->bit, \ 207 new_value, rte_memory_order_relaxed); \ 208 else \ 209 old_value = new_value ? \ 210 rte_bit_atomic_test_and_set(lcore->word, lcore->bit, \ 211 rte_memory_order_relaxed) : \ 212 rte_bit_atomic_test_and_clear(lcore->word, lcore->bit, \ 213 rte_memory_order_relaxed); \ 214 if (old_value != new_value) \ 215 lcore->flips++; \ 216 } while (rte_get_timer_cycles() < deadline); \ 217 return 0; \ 218 } \ 219 static int \ 220 test_bit_atomic_parallel_test_and_modify ## size(void) \ 221 { \ 222 unsigned int worker_lcore_id; \ 223 uint ## size ## _t word = 0; \ 224 unsigned int bit = rte_rand_max(size); \ 225 struct parallel_test_and_set_lcore ## size lmain = { .word = &word, .bit = bit }; \ 226 struct parallel_test_and_set_lcore ## size lworker = { .word = &word, .bit = bit }; \ 227 if (rte_lcore_count() < 2) { \ 228 printf("Need multiple cores to run parallel test.\n"); \ 229 return TEST_SKIPPED; \ 230 } \ 231 worker_lcore_id = get_worker_lcore(); \ 232 int rc = rte_eal_remote_launch(run_parallel_test_and_modify ## size, &lworker, \ 233 worker_lcore_id); \ 234 TEST_ASSERT(rc == 0, "Worker thread launch failed"); \ 235 run_parallel_test_and_modify ## size(&lmain); \ 236 rte_eal_mp_wait_lcore(); \ 237 uint64_t total_flips = lmain.flips + lworker.flips; \ 238 bool expected_value = total_flips % 2; \ 239 TEST_ASSERT(expected_value == rte_bit_test(&word, bit), \ 240 "After %"PRId64" flips, the bit value should be %d", total_flips, expected_value); \ 241 uint ## size ## _t expected_word = 0; \ 242 rte_bit_assign(&expected_word, bit, expected_value); \ 243 TEST_ASSERT(expected_word == word, "Untouched bits have changed value"); \ 244 return TEST_SUCCESS; \ 245 } 246 247 GEN_TEST_BIT_PARALLEL_TEST_AND_MODIFY(32) 248 GEN_TEST_BIT_PARALLEL_TEST_AND_MODIFY(64) 249 250 #define GEN_TEST_BIT_PARALLEL_FLIP(size) \ 251 struct parallel_flip_lcore ## size \ 252 { \ 253 uint ## size ##_t *word; \ 254 unsigned int bit; \ 255 uint64_t flips; \ 256 }; \ 257 static int \ 258 run_parallel_flip ## size(void *arg) \ 259 { \ 260 struct parallel_flip_lcore ## size *lcore = arg; \ 261 uint64_t deadline = rte_get_timer_cycles() + PARALLEL_TEST_RUNTIME * rte_get_timer_hz(); \ 262 do { \ 263 rte_bit_atomic_flip(lcore->word, lcore->bit, rte_memory_order_relaxed); \ 264 lcore->flips++; \ 265 } while (rte_get_timer_cycles() < deadline); \ 266 return 0; \ 267 } \ 268 static int \ 269 test_bit_atomic_parallel_flip ## size(void) \ 270 { \ 271 unsigned int worker_lcore_id; \ 272 uint ## size ## _t word = 0; \ 273 unsigned int bit = rte_rand_max(size); \ 274 struct parallel_flip_lcore ## size lmain = { .word = &word, .bit = bit }; \ 275 struct parallel_flip_lcore ## size lworker = { .word = &word, .bit = bit }; \ 276 if (rte_lcore_count() < 2) { \ 277 printf("Need multiple cores to run parallel test.\n"); \ 278 return TEST_SKIPPED; \ 279 } \ 280 worker_lcore_id = get_worker_lcore(); \ 281 int rc = rte_eal_remote_launch(run_parallel_flip ## size, &lworker, worker_lcore_id); \ 282 TEST_ASSERT(rc == 0, "Worker thread launch failed"); \ 283 run_parallel_flip ## size(&lmain); \ 284 rte_eal_mp_wait_lcore(); \ 285 uint64_t total_flips = lmain.flips + lworker.flips; \ 286 bool expected_value = total_flips % 2; \ 287 TEST_ASSERT(expected_value == rte_bit_test(&word, bit), \ 288 "After %"PRId64" flips, the bit value should be %d", total_flips, expected_value); \ 289 uint ## size ## _t expected_word = 0; \ 290 rte_bit_assign(&expected_word, bit, expected_value); \ 291 TEST_ASSERT(expected_word == word, "Untouched bits have changed value"); \ 292 return TEST_SUCCESS; \ 293 } 294 295 GEN_TEST_BIT_PARALLEL_FLIP(32) 296 GEN_TEST_BIT_PARALLEL_FLIP(64) 297 298 static uint32_t val32; 299 static uint64_t val64; 300 301 #define MAX_BITS_32 32 302 #define MAX_BITS_64 64 303 304 /* 305 * Bitops functions 306 * ================ 307 * 308 * - The main test function performs several subtests. 309 * - Check bit operations on one core. 310 * - Initialize valXX to specified values, then set each bit of valXX 311 * to 1 one by one in "test_bit_relaxed_set". 312 * 313 * - Clear each bit of valXX to 0 one by one in "test_bit_relaxed_clear". 314 * 315 * - Function "test_bit_relaxed_test_set_clear" checks whether each bit 316 * of valXX can do "test and set" and "test and clear" correctly. 317 */ 318 319 static int 320 test_bit_relaxed_set(void) 321 { 322 unsigned int i; 323 324 for (i = 0; i < MAX_BITS_32; i++) 325 rte_bit_relaxed_set32(i, &val32); 326 327 for (i = 0; i < MAX_BITS_32; i++) 328 if (!rte_bit_relaxed_get32(i, &val32)) { 329 printf("Failed to set bit in relaxed version.\n"); 330 return TEST_FAILED; 331 } 332 333 for (i = 0; i < MAX_BITS_64; i++) 334 rte_bit_relaxed_set64(i, &val64); 335 336 for (i = 0; i < MAX_BITS_64; i++) 337 if (!rte_bit_relaxed_get64(i, &val64)) { 338 printf("Failed to set bit in relaxed version.\n"); 339 return TEST_FAILED; 340 } 341 342 return TEST_SUCCESS; 343 } 344 345 static int 346 test_bit_relaxed_clear(void) 347 { 348 unsigned int i; 349 350 for (i = 0; i < MAX_BITS_32; i++) 351 rte_bit_relaxed_clear32(i, &val32); 352 353 for (i = 0; i < MAX_BITS_32; i++) 354 if (rte_bit_relaxed_get32(i, &val32)) { 355 printf("Failed to clear bit in relaxed version.\n"); 356 return TEST_FAILED; 357 } 358 359 for (i = 0; i < MAX_BITS_64; i++) 360 rte_bit_relaxed_clear64(i, &val64); 361 362 for (i = 0; i < MAX_BITS_64; i++) 363 if (rte_bit_relaxed_get64(i, &val64)) { 364 printf("Failed to clear bit in relaxed version.\n"); 365 return TEST_FAILED; 366 } 367 368 return TEST_SUCCESS; 369 } 370 371 static int 372 test_bit_relaxed_test_set_clear(void) 373 { 374 unsigned int i; 375 376 for (i = 0; i < MAX_BITS_32; i++) 377 rte_bit_relaxed_test_and_set32(i, &val32); 378 379 for (i = 0; i < MAX_BITS_32; i++) 380 if (!rte_bit_relaxed_test_and_clear32(i, &val32)) { 381 printf("Failed to set and test bit in relaxed version.\n"); 382 return TEST_FAILED; 383 } 384 385 for (i = 0; i < MAX_BITS_32; i++) 386 if (rte_bit_relaxed_get32(i, &val32)) { 387 printf("Failed to test and clear bit in relaxed version.\n"); 388 return TEST_FAILED; 389 } 390 391 for (i = 0; i < MAX_BITS_64; i++) 392 rte_bit_relaxed_test_and_set64(i, &val64); 393 394 for (i = 0; i < MAX_BITS_64; i++) 395 if (!rte_bit_relaxed_test_and_clear64(i, &val64)) { 396 printf("Failed to set and test bit in relaxed version.\n"); 397 return TEST_FAILED; 398 } 399 400 for (i = 0; i < MAX_BITS_64; i++) 401 if (rte_bit_relaxed_get64(i, &val64)) { 402 printf("Failed to test and clear bit in relaxed version.\n"); 403 return TEST_FAILED; 404 } 405 406 return TEST_SUCCESS; 407 } 408 409 static struct unit_test_suite test_suite = { 410 .suite_name = "Bitops test suite", 411 .unit_test_cases = { 412 TEST_CASE(test_bit_access32), 413 TEST_CASE(test_bit_access64), 414 TEST_CASE(test_bit_access32), 415 TEST_CASE(test_bit_access64), 416 TEST_CASE(test_bit_v_access32), 417 TEST_CASE(test_bit_v_access64), 418 TEST_CASE(test_bit_atomic_access32), 419 TEST_CASE(test_bit_atomic_access64), 420 TEST_CASE(test_bit_atomic_v_access32), 421 TEST_CASE(test_bit_atomic_v_access64), 422 TEST_CASE(test_bit_atomic_parallel_assign32), 423 TEST_CASE(test_bit_atomic_parallel_assign64), 424 TEST_CASE(test_bit_atomic_parallel_test_and_modify32), 425 TEST_CASE(test_bit_atomic_parallel_test_and_modify64), 426 TEST_CASE(test_bit_atomic_parallel_flip32), 427 TEST_CASE(test_bit_atomic_parallel_flip64), 428 TEST_CASE(test_bit_relaxed_set), 429 TEST_CASE(test_bit_relaxed_clear), 430 TEST_CASE(test_bit_relaxed_test_set_clear), 431 TEST_CASES_END() 432 } 433 }; 434 435 static int 436 test_bitops(void) 437 { 438 return unit_test_suite_runner(&test_suite); 439 } 440 441 REGISTER_FAST_TEST(bitops_autotest, true, true, test_bitops); 442