1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2020 Arm Limited 3 * Copyright(c) 2010-2019 Intel Corporation 4 * Copyright(c) 2023 Microsoft Corporation 5 * Copyright(c) 2024 Ericsson AB 6 */ 7 8 #ifndef _RTE_BITOPS_H_ 9 #define _RTE_BITOPS_H_ 10 11 /** 12 * @file 13 * Bit Operations 14 * 15 * This file provides functionality for low-level, single-word 16 * arithmetic and bit-level operations, such as counting or 17 * setting individual bits. 18 */ 19 20 #include <stdint.h> 21 22 #include <rte_compat.h> 23 #include <rte_debug.h> 24 #include <rte_stdatomic.h> 25 26 #ifdef __cplusplus 27 extern "C" { 28 #endif 29 30 /** 31 * Get the uint64_t value for a specified bit set. 32 * 33 * @param nr 34 * The bit number in range of 0 to 63. 35 */ 36 #define RTE_BIT64(nr) (UINT64_C(1) << (nr)) 37 38 /** 39 * Get the uint32_t value for a specified bit set. 40 * 41 * @param nr 42 * The bit number in range of 0 to 31. 43 */ 44 #define RTE_BIT32(nr) (UINT32_C(1) << (nr)) 45 46 /** 47 * Get the uint32_t shifted value. 48 * 49 * @param val 50 * The value to be shifted. 51 * @param nr 52 * The shift number in range of 0 to (32 - width of val). 53 */ 54 #define RTE_SHIFT_VAL32(val, nr) (UINT32_C(val) << (nr)) 55 56 /** 57 * Get the uint64_t shifted value. 58 * 59 * @param val 60 * The value to be shifted. 61 * @param nr 62 * The shift number in range of 0 to (64 - width of val). 63 */ 64 #define RTE_SHIFT_VAL64(val, nr) (UINT64_C(val) << (nr)) 65 66 /** 67 * Generate a contiguous 32-bit mask 68 * starting at bit position low and ending at position high. 69 * 70 * @param high 71 * High bit position. 72 * @param low 73 * Low bit position. 74 */ 75 #define RTE_GENMASK32(high, low) \ 76 (((~UINT32_C(0)) << (low)) & (~UINT32_C(0) >> (31u - (high)))) 77 78 /** 79 * Generate a contiguous 64-bit mask 80 * starting at bit position low and ending at position high. 81 * 82 * @param high 83 * High bit position. 84 * @param low 85 * Low bit position. 86 */ 87 #define RTE_GENMASK64(high, low) \ 88 (((~UINT64_C(0)) << (low)) & (~UINT64_C(0) >> (63u - (high)))) 89 90 /** 91 * Extract a 32-bit field element. 92 * 93 * @param mask 94 * Shifted mask. 95 * @param reg 96 * Value of entire bitfield. 97 */ 98 #define RTE_FIELD_GET32(mask, reg) \ 99 ((typeof(mask))(((reg) & (mask)) >> rte_ctz32(mask))) 100 101 /** 102 * Extract a 64-bit field element. 103 * 104 * @param mask 105 * Shifted mask. 106 * @param reg 107 * Value of entire bitfield. 108 */ 109 #define RTE_FIELD_GET64(mask, reg) \ 110 ((typeof(mask))(((reg) & (mask)) >> rte_ctz64(mask))) 111 112 /** 113 * @warning 114 * @b EXPERIMENTAL: this API may change without prior notice. 115 * 116 * Test bit in word. 117 * 118 * Generic selection macro to test the value of a bit in a 32-bit or 119 * 64-bit word. The type of operation depends on the type of the @c 120 * addr parameter. 121 * 122 * This macro does not give any guarantees in regards to memory 123 * ordering or atomicity. 124 * 125 * @param addr 126 * A pointer to the word to modify. 127 * @param nr 128 * The index of the bit. 129 */ 130 #define rte_bit_test(addr, nr) \ 131 _Generic((addr), \ 132 uint32_t *: __rte_bit_test32, \ 133 const uint32_t *: __rte_bit_test32, \ 134 volatile uint32_t *: __rte_bit_v_test32, \ 135 const volatile uint32_t *: __rte_bit_v_test32, \ 136 uint64_t *: __rte_bit_test64, \ 137 const uint64_t *: __rte_bit_test64, \ 138 volatile uint64_t *: __rte_bit_v_test64, \ 139 const volatile uint64_t *: __rte_bit_v_test64) \ 140 (addr, nr) 141 142 /** 143 * @warning 144 * @b EXPERIMENTAL: this API may change without prior notice. 145 * 146 * Set bit in word. 147 * 148 * Generic selection macro to set a bit in a 32-bit or 64-bit 149 * word. The type of operation depends on the type of the @c addr 150 * parameter. 151 * 152 * This macro does not give any guarantees in regards to memory 153 * ordering or atomicity. 154 * 155 * @param addr 156 * A pointer to the word to modify. 157 * @param nr 158 * The index of the bit. 159 */ 160 #define rte_bit_set(addr, nr) \ 161 _Generic((addr), \ 162 uint32_t *: __rte_bit_set32, \ 163 volatile uint32_t *: __rte_bit_v_set32, \ 164 uint64_t *: __rte_bit_set64, \ 165 volatile uint64_t *: __rte_bit_v_set64) \ 166 (addr, nr) 167 168 /** 169 * @warning 170 * @b EXPERIMENTAL: this API may change without prior notice. 171 * 172 * Clear bit in word. 173 * 174 * Generic selection macro to clear a bit in a 32-bit or 64-bit 175 * word. The type of operation depends on the type of the @c addr 176 * parameter. 177 * 178 * This macro does not give any guarantees in regards to memory 179 * ordering or atomicity. 180 * 181 * @param addr 182 * A pointer to the word to modify. 183 * @param nr 184 * The index of the bit. 185 */ 186 #define rte_bit_clear(addr, nr) \ 187 _Generic((addr), \ 188 uint32_t *: __rte_bit_clear32, \ 189 volatile uint32_t *: __rte_bit_v_clear32, \ 190 uint64_t *: __rte_bit_clear64, \ 191 volatile uint64_t *: __rte_bit_v_clear64) \ 192 (addr, nr) 193 194 /** 195 * @warning 196 * @b EXPERIMENTAL: this API may change without prior notice. 197 * 198 * Assign a value to a bit in word. 199 * 200 * Generic selection macro to assign a value to a bit in a 32-bit or 64-bit 201 * word. The type of operation depends on the type of the @c addr parameter. 202 * 203 * This macro does not give any guarantees in regards to memory 204 * ordering or atomicity. 205 * 206 * @param addr 207 * A pointer to the word to modify. 208 * @param nr 209 * The index of the bit. 210 * @param value 211 * The new value of the bit - true for '1', or false for '0'. 212 */ 213 #define rte_bit_assign(addr, nr, value) \ 214 _Generic((addr), \ 215 uint32_t *: __rte_bit_assign32, \ 216 volatile uint32_t *: __rte_bit_v_assign32, \ 217 uint64_t *: __rte_bit_assign64, \ 218 volatile uint64_t *: __rte_bit_v_assign64) \ 219 (addr, nr, value) 220 221 /** 222 * @warning 223 * @b EXPERIMENTAL: this API may change without prior notice. 224 * 225 * Flip a bit in word. 226 * 227 * Generic selection macro to change the value of a bit to '0' if '1' 228 * or '1' if '0' in a 32-bit or 64-bit word. The type of operation 229 * depends on the type of the @c addr parameter. 230 * 231 * This macro does not give any guarantees in regards to memory 232 * ordering or atomicity. 233 * 234 * @param addr 235 * A pointer to the word to modify. 236 * @param nr 237 * The index of the bit. 238 */ 239 #define rte_bit_flip(addr, nr) \ 240 _Generic((addr), \ 241 uint32_t *: __rte_bit_flip32, \ 242 volatile uint32_t *: __rte_bit_v_flip32, \ 243 uint64_t *: __rte_bit_flip64, \ 244 volatile uint64_t *: __rte_bit_v_flip64) \ 245 (addr, nr) 246 247 /** 248 * @warning 249 * @b EXPERIMENTAL: this API may change without prior notice. 250 * 251 * Test if a particular bit in a word is set with a particular memory 252 * order. 253 * 254 * Test a bit with the resulting memory load ordered as per the 255 * specified memory order. 256 * 257 * @param addr 258 * A pointer to the word to query. 259 * @param nr 260 * The index of the bit. 261 * @param memory_order 262 * The memory order to use. 263 * @return 264 * Returns true if the bit is set, and false otherwise. 265 */ 266 #define rte_bit_atomic_test(addr, nr, memory_order) \ 267 _Generic((addr), \ 268 uint32_t *: __rte_bit_atomic_test32, \ 269 const uint32_t *: __rte_bit_atomic_test32, \ 270 volatile uint32_t *: __rte_bit_atomic_v_test32, \ 271 const volatile uint32_t *: __rte_bit_atomic_v_test32, \ 272 uint64_t *: __rte_bit_atomic_test64, \ 273 const uint64_t *: __rte_bit_atomic_test64, \ 274 volatile uint64_t *: __rte_bit_atomic_v_test64, \ 275 const volatile uint64_t *: __rte_bit_atomic_v_test64) \ 276 (addr, nr, memory_order) 277 278 /** 279 * @warning 280 * @b EXPERIMENTAL: this API may change without prior notice. 281 * 282 * Atomically set bit in word. 283 * 284 * Generic selection macro to atomically set bit specified by @c nr in 285 * the word pointed to by @c addr to '1', with the memory ordering as 286 * specified by @c memory_order. 287 * 288 * @param addr 289 * A pointer to the word to modify. 290 * @param nr 291 * The index of the bit. 292 * @param memory_order 293 * The memory order to use. 294 */ 295 #define rte_bit_atomic_set(addr, nr, memory_order) \ 296 _Generic((addr), \ 297 uint32_t *: __rte_bit_atomic_set32, \ 298 volatile uint32_t *: __rte_bit_atomic_v_set32, \ 299 uint64_t *: __rte_bit_atomic_set64, \ 300 volatile uint64_t *: __rte_bit_atomic_v_set64) \ 301 (addr, nr, memory_order) 302 303 /** 304 * @warning 305 * @b EXPERIMENTAL: this API may change without prior notice. 306 * 307 * Atomically clear bit in word. 308 * 309 * Generic selection macro to atomically set bit specified by @c nr in 310 * the word pointed to by @c addr to '0', with the memory ordering as 311 * specified by @c memory_order. 312 * 313 * @param addr 314 * A pointer to the word to modify. 315 * @param nr 316 * The index of the bit. 317 * @param memory_order 318 * The memory order to use. 319 */ 320 #define rte_bit_atomic_clear(addr, nr, memory_order) \ 321 _Generic((addr), \ 322 uint32_t *: __rte_bit_atomic_clear32, \ 323 volatile uint32_t *: __rte_bit_atomic_v_clear32, \ 324 uint64_t *: __rte_bit_atomic_clear64, \ 325 volatile uint64_t *: __rte_bit_atomic_v_clear64) \ 326 (addr, nr, memory_order) 327 328 /** 329 * @warning 330 * @b EXPERIMENTAL: this API may change without prior notice. 331 * 332 * Atomically assign a value to bit in word. 333 * 334 * Generic selection macro to atomically set bit specified by @c nr in the 335 * word pointed to by @c addr to the value indicated by @c value, with 336 * the memory ordering as specified with @c memory_order. 337 * 338 * @param addr 339 * A pointer to the word to modify. 340 * @param nr 341 * The index of the bit. 342 * @param value 343 * The new value of the bit - true for '1', or false for '0'. 344 * @param memory_order 345 * The memory order to use. 346 */ 347 #define rte_bit_atomic_assign(addr, nr, value, memory_order) \ 348 _Generic((addr), \ 349 uint32_t *: __rte_bit_atomic_assign32, \ 350 volatile uint32_t *: __rte_bit_atomic_v_assign32, \ 351 uint64_t *: __rte_bit_atomic_assign64, \ 352 volatile uint64_t *: __rte_bit_atomic_v_assign64) \ 353 (addr, nr, value, memory_order) 354 355 /** 356 * @warning 357 * @b EXPERIMENTAL: this API may change without prior notice. 358 * 359 * Atomically flip bit in word. 360 * 361 * Generic selection macro to atomically negate the value of the bit 362 * specified by @c nr in the word pointed to by @c addr to the value 363 * indicated by @c value, with the memory ordering as specified with 364 * @c memory_order. 365 * 366 * @param addr 367 * A pointer to the word to modify. 368 * @param nr 369 * The index of the bit. 370 * @param memory_order 371 * The memory order to use. 372 */ 373 #define rte_bit_atomic_flip(addr, nr, memory_order) \ 374 _Generic((addr), \ 375 uint32_t *: __rte_bit_atomic_flip32, \ 376 volatile uint32_t *: __rte_bit_atomic_v_flip32, \ 377 uint64_t *: __rte_bit_atomic_flip64, \ 378 volatile uint64_t *: __rte_bit_atomic_v_flip64) \ 379 (addr, nr, memory_order) 380 381 /** 382 * @warning 383 * @b EXPERIMENTAL: this API may change without prior notice. 384 * 385 * Atomically test and set a bit in word. 386 * 387 * Generic selection macro to atomically test and set bit specified by 388 * @c nr in the word pointed to by @c addr to '1', with the memory 389 * ordering as specified with @c memory_order. 390 * 391 * @param addr 392 * A pointer to the word to modify. 393 * @param nr 394 * The index of the bit. 395 * @param memory_order 396 * The memory order to use. 397 * @return 398 * Returns true if the bit was set, and false otherwise. 399 */ 400 #define rte_bit_atomic_test_and_set(addr, nr, memory_order) \ 401 _Generic((addr), \ 402 uint32_t *: __rte_bit_atomic_test_and_set32, \ 403 volatile uint32_t *: __rte_bit_atomic_v_test_and_set32, \ 404 uint64_t *: __rte_bit_atomic_test_and_set64, \ 405 volatile uint64_t *: __rte_bit_atomic_v_test_and_set64) \ 406 (addr, nr, memory_order) 407 408 /** 409 * @warning 410 * @b EXPERIMENTAL: this API may change without prior notice. 411 * 412 * Atomically test and clear a bit in word. 413 * 414 * Generic selection macro to atomically test and clear bit specified 415 * by @c nr in the word pointed to by @c addr to '0', with the memory 416 * ordering as specified with @c memory_order. 417 * 418 * @param addr 419 * A pointer to the word to modify. 420 * @param nr 421 * The index of the bit. 422 * @param memory_order 423 * The memory order to use. 424 * @return 425 * Returns true if the bit was set, and false otherwise. 426 */ 427 #define rte_bit_atomic_test_and_clear(addr, nr, memory_order) \ 428 _Generic((addr), \ 429 uint32_t *: __rte_bit_atomic_test_and_clear32, \ 430 volatile uint32_t *: __rte_bit_atomic_v_test_and_clear32, \ 431 uint64_t *: __rte_bit_atomic_test_and_clear64, \ 432 volatile uint64_t *: __rte_bit_atomic_v_test_and_clear64) \ 433 (addr, nr, memory_order) 434 435 /** 436 * @warning 437 * @b EXPERIMENTAL: this API may change without prior notice. 438 * 439 * Atomically test and assign a bit in word. 440 * 441 * Generic selection macro to atomically test and assign bit specified 442 * by @c nr in the word pointed to by @c addr the value specified by 443 * @c value, with the memory ordering as specified with @c 444 * memory_order. 445 * 446 * @param addr 447 * A pointer to the word to modify. 448 * @param nr 449 * The index of the bit. 450 * @param value 451 * The new value of the bit - true for '1', or false for '0'. 452 * @param memory_order 453 * The memory order to use. 454 * @return 455 * Returns true if the bit was set, and false otherwise. 456 */ 457 #define rte_bit_atomic_test_and_assign(addr, nr, value, memory_order) \ 458 _Generic((addr), \ 459 uint32_t *: __rte_bit_atomic_test_and_assign32, \ 460 volatile uint32_t *: __rte_bit_atomic_v_test_and_assign32, \ 461 uint64_t *: __rte_bit_atomic_test_and_assign64, \ 462 volatile uint64_t *: __rte_bit_atomic_v_test_and_assign64) \ 463 (addr, nr, value, memory_order) 464 465 #define __RTE_GEN_BIT_TEST(variant, qualifier, size) \ 466 __rte_experimental \ 467 static inline bool \ 468 __rte_bit_ ## variant ## test ## size(const qualifier uint ## size ## _t *addr, unsigned int nr) \ 469 { \ 470 RTE_ASSERT(nr < size); \ 471 uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \ 472 return *addr & mask; \ 473 } 474 475 #define __RTE_GEN_BIT_SET(variant, qualifier, size) \ 476 __rte_experimental \ 477 static inline void \ 478 __rte_bit_ ## variant ## set ## size(qualifier uint ## size ## _t *addr, unsigned int nr) \ 479 { \ 480 RTE_ASSERT(nr < size); \ 481 uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \ 482 *addr |= mask; \ 483 } 484 485 #define __RTE_GEN_BIT_CLEAR(variant, qualifier, size) \ 486 __rte_experimental \ 487 static inline void \ 488 __rte_bit_ ## variant ## clear ## size(qualifier uint ## size ## _t *addr, unsigned int nr) \ 489 { \ 490 RTE_ASSERT(nr < size); \ 491 uint ## size ## _t mask = ~((uint ## size ## _t)1 << nr); \ 492 (*addr) &= mask; \ 493 } 494 495 #define __RTE_GEN_BIT_ASSIGN(variant, qualifier, size) \ 496 __rte_experimental \ 497 static inline void \ 498 __rte_bit_ ## variant ## assign ## size(qualifier uint ## size ## _t *addr, unsigned int nr, \ 499 bool value) \ 500 { \ 501 if (value) \ 502 __rte_bit_ ## variant ## set ## size(addr, nr); \ 503 else \ 504 __rte_bit_ ## variant ## clear ## size(addr, nr); \ 505 } 506 507 #define __RTE_GEN_BIT_FLIP(variant, qualifier, size) \ 508 __rte_experimental \ 509 static inline void \ 510 __rte_bit_ ## variant ## flip ## size(qualifier uint ## size ## _t *addr, unsigned int nr) \ 511 { \ 512 bool value; \ 513 value = __rte_bit_ ## variant ## test ## size(addr, nr); \ 514 __rte_bit_ ## variant ## assign ## size(addr, nr, !value); \ 515 } 516 517 #define __RTE_GEN_BIT_OPS(v, qualifier, size) \ 518 __RTE_GEN_BIT_TEST(v, qualifier, size) \ 519 __RTE_GEN_BIT_SET(v, qualifier, size) \ 520 __RTE_GEN_BIT_CLEAR(v, qualifier, size) \ 521 __RTE_GEN_BIT_ASSIGN(v, qualifier, size) \ 522 __RTE_GEN_BIT_FLIP(v, qualifier, size) 523 524 #define __RTE_GEN_BIT_OPS_SIZE(size) \ 525 __RTE_GEN_BIT_OPS(,, size) \ 526 __RTE_GEN_BIT_OPS(v_, volatile, size) 527 528 #ifdef ALLOW_EXPERIMENTAL_API 529 __RTE_GEN_BIT_OPS_SIZE(32) 530 __RTE_GEN_BIT_OPS_SIZE(64) 531 #endif 532 533 #define __RTE_GEN_BIT_ATOMIC_TEST(variant, qualifier, size) \ 534 __rte_experimental \ 535 static inline bool \ 536 __rte_bit_atomic_ ## variant ## test ## size(const qualifier uint ## size ## _t *addr, \ 537 unsigned int nr, int memory_order) \ 538 { \ 539 RTE_ASSERT(nr < size); \ 540 const qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \ 541 (const qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \ 542 uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \ 543 return rte_atomic_load_explicit(a_addr, memory_order) & mask; \ 544 } 545 546 #define __RTE_GEN_BIT_ATOMIC_SET(variant, qualifier, size) \ 547 __rte_experimental \ 548 static inline void \ 549 __rte_bit_atomic_ ## variant ## set ## size(qualifier uint ## size ## _t *addr, \ 550 unsigned int nr, int memory_order) \ 551 { \ 552 RTE_ASSERT(nr < size); \ 553 qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \ 554 (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \ 555 uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \ 556 rte_atomic_fetch_or_explicit(a_addr, mask, memory_order); \ 557 } 558 559 #define __RTE_GEN_BIT_ATOMIC_CLEAR(variant, qualifier, size) \ 560 __rte_experimental \ 561 static inline void \ 562 __rte_bit_atomic_ ## variant ## clear ## size(qualifier uint ## size ## _t *addr, \ 563 unsigned int nr, int memory_order) \ 564 { \ 565 RTE_ASSERT(nr < size); \ 566 qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \ 567 (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \ 568 uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \ 569 rte_atomic_fetch_and_explicit(a_addr, ~mask, memory_order); \ 570 } 571 572 #define __RTE_GEN_BIT_ATOMIC_FLIP(variant, qualifier, size) \ 573 __rte_experimental \ 574 static inline void \ 575 __rte_bit_atomic_ ## variant ## flip ## size(qualifier uint ## size ## _t *addr, \ 576 unsigned int nr, int memory_order) \ 577 { \ 578 RTE_ASSERT(nr < size); \ 579 qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \ 580 (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \ 581 uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \ 582 rte_atomic_fetch_xor_explicit(a_addr, mask, memory_order); \ 583 } 584 585 #define __RTE_GEN_BIT_ATOMIC_ASSIGN(variant, qualifier, size) \ 586 __rte_experimental \ 587 static inline void \ 588 __rte_bit_atomic_## variant ## assign ## size(qualifier uint ## size ## _t *addr, \ 589 unsigned int nr, bool value, int memory_order) \ 590 { \ 591 if (value) \ 592 __rte_bit_atomic_ ## variant ## set ## size(addr, nr, memory_order); \ 593 else \ 594 __rte_bit_atomic_ ## variant ## clear ## size(addr, nr, memory_order); \ 595 } 596 597 #define __RTE_GEN_BIT_ATOMIC_TEST_AND_SET(variant, qualifier, size) \ 598 __rte_experimental \ 599 static inline bool \ 600 __rte_bit_atomic_ ## variant ## test_and_set ## size(qualifier uint ## size ## _t *addr, \ 601 unsigned int nr, int memory_order) \ 602 { \ 603 RTE_ASSERT(nr < size); \ 604 qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \ 605 (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \ 606 uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \ 607 uint ## size ## _t prev; \ 608 prev = rte_atomic_fetch_or_explicit(a_addr, mask, memory_order); \ 609 return prev & mask; \ 610 } 611 612 #define __RTE_GEN_BIT_ATOMIC_TEST_AND_CLEAR(variant, qualifier, size) \ 613 __rte_experimental \ 614 static inline bool \ 615 __rte_bit_atomic_ ## variant ## test_and_clear ## size(qualifier uint ## size ## _t *addr, \ 616 unsigned int nr, int memory_order) \ 617 { \ 618 RTE_ASSERT(nr < size); \ 619 qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \ 620 (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \ 621 uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \ 622 uint ## size ## _t prev; \ 623 prev = rte_atomic_fetch_and_explicit(a_addr, ~mask, memory_order); \ 624 return prev & mask; \ 625 } 626 627 #define __RTE_GEN_BIT_ATOMIC_TEST_AND_ASSIGN(variant, qualifier, size) \ 628 __rte_experimental \ 629 static inline bool \ 630 __rte_bit_atomic_ ## variant ## test_and_assign ## size( \ 631 qualifier uint ## size ## _t *addr, unsigned int nr, bool value, \ 632 int memory_order) \ 633 { \ 634 if (value) \ 635 return __rte_bit_atomic_ ## variant ## test_and_set ## size(addr, nr, \ 636 memory_order); \ 637 else \ 638 return __rte_bit_atomic_ ## variant ## test_and_clear ## size(addr, nr, \ 639 memory_order); \ 640 } 641 642 #define __RTE_GEN_BIT_ATOMIC_OPS(variant, qualifier, size) \ 643 __RTE_GEN_BIT_ATOMIC_TEST(variant, qualifier, size) \ 644 __RTE_GEN_BIT_ATOMIC_SET(variant, qualifier, size) \ 645 __RTE_GEN_BIT_ATOMIC_CLEAR(variant, qualifier, size) \ 646 __RTE_GEN_BIT_ATOMIC_ASSIGN(variant, qualifier, size) \ 647 __RTE_GEN_BIT_ATOMIC_TEST_AND_SET(variant, qualifier, size) \ 648 __RTE_GEN_BIT_ATOMIC_TEST_AND_CLEAR(variant, qualifier, size) \ 649 __RTE_GEN_BIT_ATOMIC_TEST_AND_ASSIGN(variant, qualifier, size) \ 650 __RTE_GEN_BIT_ATOMIC_FLIP(variant, qualifier, size) 651 652 #define __RTE_GEN_BIT_ATOMIC_OPS_SIZE(size) \ 653 __RTE_GEN_BIT_ATOMIC_OPS(,, size) \ 654 __RTE_GEN_BIT_ATOMIC_OPS(v_, volatile, size) 655 656 #ifdef ALLOW_EXPERIMENTAL_API 657 __RTE_GEN_BIT_ATOMIC_OPS_SIZE(32) 658 __RTE_GEN_BIT_ATOMIC_OPS_SIZE(64) 659 #endif 660 661 /*------------------------ 32-bit relaxed operations ------------------------*/ 662 663 /** 664 * Get the target bit from a 32-bit value without memory ordering. 665 * 666 * @param nr 667 * The target bit to get. 668 * @param addr 669 * The address holding the bit. 670 * @return 671 * The target bit. 672 */ 673 static inline uint32_t 674 rte_bit_relaxed_get32(unsigned int nr, volatile uint32_t *addr) 675 { 676 RTE_ASSERT(nr < 32); 677 678 uint32_t mask = UINT32_C(1) << nr; 679 return (*addr) & mask; 680 } 681 682 /** 683 * Set the target bit in a 32-bit value to 1 without memory ordering. 684 * 685 * @param nr 686 * The target bit to set. 687 * @param addr 688 * The address holding the bit. 689 */ 690 static inline void 691 rte_bit_relaxed_set32(unsigned int nr, volatile uint32_t *addr) 692 { 693 RTE_ASSERT(nr < 32); 694 695 uint32_t mask = RTE_BIT32(nr); 696 *addr = (*addr) | mask; 697 } 698 699 /** 700 * Clear the target bit in a 32-bit value to 0 without memory ordering. 701 * 702 * @param nr 703 * The target bit to clear. 704 * @param addr 705 * The address holding the bit. 706 */ 707 static inline void 708 rte_bit_relaxed_clear32(unsigned int nr, volatile uint32_t *addr) 709 { 710 RTE_ASSERT(nr < 32); 711 712 uint32_t mask = RTE_BIT32(nr); 713 *addr = (*addr) & (~mask); 714 } 715 716 /** 717 * Return the original bit from a 32-bit value, then set it to 1 without 718 * memory ordering. 719 * 720 * @param nr 721 * The target bit to get and set. 722 * @param addr 723 * The address holding the bit. 724 * @return 725 * The original bit. 726 */ 727 static inline uint32_t 728 rte_bit_relaxed_test_and_set32(unsigned int nr, volatile uint32_t *addr) 729 { 730 RTE_ASSERT(nr < 32); 731 732 uint32_t mask = RTE_BIT32(nr); 733 uint32_t val = *addr; 734 *addr = val | mask; 735 return val & mask; 736 } 737 738 /** 739 * Return the original bit from a 32-bit value, then clear it to 0 without 740 * memory ordering. 741 * 742 * @param nr 743 * The target bit to get and clear. 744 * @param addr 745 * The address holding the bit. 746 * @return 747 * The original bit. 748 */ 749 static inline uint32_t 750 rte_bit_relaxed_test_and_clear32(unsigned int nr, volatile uint32_t *addr) 751 { 752 RTE_ASSERT(nr < 32); 753 754 uint32_t mask = RTE_BIT32(nr); 755 uint32_t val = *addr; 756 *addr = val & (~mask); 757 return val & mask; 758 } 759 760 /*------------------------ 64-bit relaxed operations ------------------------*/ 761 762 /** 763 * Get the target bit from a 64-bit value without memory ordering. 764 * 765 * @param nr 766 * The target bit to get. 767 * @param addr 768 * The address holding the bit. 769 * @return 770 * The target bit. 771 */ 772 static inline uint64_t 773 rte_bit_relaxed_get64(unsigned int nr, volatile uint64_t *addr) 774 { 775 RTE_ASSERT(nr < 64); 776 777 uint64_t mask = RTE_BIT64(nr); 778 return (*addr) & mask; 779 } 780 781 /** 782 * Set the target bit in a 64-bit value to 1 without memory ordering. 783 * 784 * @param nr 785 * The target bit to set. 786 * @param addr 787 * The address holding the bit. 788 */ 789 static inline void 790 rte_bit_relaxed_set64(unsigned int nr, volatile uint64_t *addr) 791 { 792 RTE_ASSERT(nr < 64); 793 794 uint64_t mask = RTE_BIT64(nr); 795 (*addr) = (*addr) | mask; 796 } 797 798 /** 799 * Clear the target bit in a 64-bit value to 0 without memory ordering. 800 * 801 * @param nr 802 * The target bit to clear. 803 * @param addr 804 * The address holding the bit. 805 */ 806 static inline void 807 rte_bit_relaxed_clear64(unsigned int nr, volatile uint64_t *addr) 808 { 809 RTE_ASSERT(nr < 64); 810 811 uint64_t mask = RTE_BIT64(nr); 812 *addr = (*addr) & (~mask); 813 } 814 815 /** 816 * Return the original bit from a 64-bit value, then set it to 1 without 817 * memory ordering. 818 * 819 * @param nr 820 * The target bit to get and set. 821 * @param addr 822 * The address holding the bit. 823 * @return 824 * The original bit. 825 */ 826 static inline uint64_t 827 rte_bit_relaxed_test_and_set64(unsigned int nr, volatile uint64_t *addr) 828 { 829 RTE_ASSERT(nr < 64); 830 831 uint64_t mask = RTE_BIT64(nr); 832 uint64_t val = *addr; 833 *addr = val | mask; 834 return val; 835 } 836 837 /** 838 * Return the original bit from a 64-bit value, then clear it to 0 without 839 * memory ordering. 840 * 841 * @param nr 842 * The target bit to get and clear. 843 * @param addr 844 * The address holding the bit. 845 * @return 846 * The original bit. 847 */ 848 static inline uint64_t 849 rte_bit_relaxed_test_and_clear64(unsigned int nr, volatile uint64_t *addr) 850 { 851 RTE_ASSERT(nr < 64); 852 853 uint64_t mask = RTE_BIT64(nr); 854 uint64_t val = *addr; 855 *addr = val & (~mask); 856 return val & mask; 857 } 858 859 #ifdef RTE_TOOLCHAIN_MSVC 860 861 /** 862 * Get the count of leading 0-bits in v. 863 * 864 * @param v 865 * The value. 866 * @return 867 * The count of leading zero bits. 868 */ 869 static inline unsigned int 870 rte_clz32(uint32_t v) 871 { 872 unsigned long rv; 873 874 (void)_BitScanReverse(&rv, v); 875 876 return (unsigned int)(sizeof(v) * CHAR_BIT - 1 - rv); 877 } 878 879 /** 880 * Get the count of leading 0-bits in v. 881 * 882 * @param v 883 * The value. 884 * @return 885 * The count of leading zero bits. 886 */ 887 static inline unsigned int 888 rte_clz64(uint64_t v) 889 { 890 unsigned long rv; 891 892 (void)_BitScanReverse64(&rv, v); 893 894 return (unsigned int)(sizeof(v) * CHAR_BIT - 1 - rv); 895 } 896 897 /** 898 * Get the count of trailing 0-bits in v. 899 * 900 * @param v 901 * The value. 902 * @return 903 * The count of trailing zero bits. 904 */ 905 static inline unsigned int 906 rte_ctz32(uint32_t v) 907 { 908 unsigned long rv; 909 910 (void)_BitScanForward(&rv, v); 911 912 return (unsigned int)rv; 913 } 914 915 /** 916 * Get the count of trailing 0-bits in v. 917 * 918 * @param v 919 * The value. 920 * @return 921 * The count of trailing zero bits. 922 */ 923 static inline unsigned int 924 rte_ctz64(uint64_t v) 925 { 926 unsigned long rv; 927 928 (void)_BitScanForward64(&rv, v); 929 930 return (unsigned int)rv; 931 } 932 933 /** 934 * Get the count of 1-bits in v. 935 * 936 * @param v 937 * The value. 938 * @return 939 * The count of 1-bits. 940 */ 941 static inline unsigned int 942 rte_popcount32(uint32_t v) 943 { 944 return (unsigned int)__popcnt(v); 945 } 946 947 /** 948 * Get the count of 1-bits in v. 949 * 950 * @param v 951 * The value. 952 * @return 953 * The count of 1-bits. 954 */ 955 static inline unsigned int 956 rte_popcount64(uint64_t v) 957 { 958 return (unsigned int)__popcnt64(v); 959 } 960 961 /** 962 * @warning 963 * @b EXPERIMENTAL: this API may change without prior notice. 964 * 965 * Search v from least significant bit (LSB) to the most 966 * significant bit (MSB) for a set bit (1). 967 * 968 * @param v 969 * The value. 970 * @return 971 * Bit index + 1 if a set bit is found, zero otherwise. 972 */ 973 __rte_experimental 974 static inline unsigned int 975 rte_ffs32(uint32_t v) 976 { 977 unsigned long rv; 978 979 if (_BitScanForward(&rv, v) == 0) 980 return 0; 981 982 return (unsigned int)rv + 1; 983 } 984 985 /** 986 * @warning 987 * @b EXPERIMENTAL: this API may change without prior notice. 988 * 989 * Search v from least significant bit (LSB) to the most 990 * significant bit (MSB) for a set bit (1). 991 * 992 * @param v 993 * The value. 994 * @return 995 * Bit index + 1 if a set bit is found, zero otherwise. 996 */ 997 __rte_experimental 998 static inline unsigned int 999 rte_ffs64(uint64_t v) 1000 { 1001 unsigned long rv; 1002 1003 if (_BitScanForward64(&rv, v) == 0) 1004 return 0; 1005 1006 return (unsigned int)rv + 1; 1007 } 1008 1009 #else 1010 1011 /** 1012 * Get the count of leading 0-bits in v. 1013 * 1014 * @param v 1015 * The value. 1016 * @return 1017 * The count of leading zero bits. 1018 */ 1019 static inline unsigned int 1020 rte_clz32(uint32_t v) 1021 { 1022 return (unsigned int)__builtin_clz(v); 1023 } 1024 1025 /** 1026 * Get the count of leading 0-bits in v. 1027 * 1028 * @param v 1029 * The value. 1030 * @return 1031 * The count of leading zero bits. 1032 */ 1033 static inline unsigned int 1034 rte_clz64(uint64_t v) 1035 { 1036 return (unsigned int)__builtin_clzll(v); 1037 } 1038 1039 /** 1040 * Get the count of trailing 0-bits in v. 1041 * 1042 * @param v 1043 * The value. 1044 * @return 1045 * The count of trailing zero bits. 1046 */ 1047 static inline unsigned int 1048 rte_ctz32(uint32_t v) 1049 { 1050 return (unsigned int)__builtin_ctz(v); 1051 } 1052 1053 /** 1054 * Get the count of trailing 0-bits in v. 1055 * 1056 * @param v 1057 * The value. 1058 * @return 1059 * The count of trailing zero bits. 1060 */ 1061 static inline unsigned int 1062 rte_ctz64(uint64_t v) 1063 { 1064 return (unsigned int)__builtin_ctzll(v); 1065 } 1066 1067 /** 1068 * Get the count of 1-bits in v. 1069 * 1070 * @param v 1071 * The value. 1072 * @return 1073 * The count of 1-bits. 1074 */ 1075 static inline unsigned int 1076 rte_popcount32(uint32_t v) 1077 { 1078 return (unsigned int)__builtin_popcount(v); 1079 } 1080 1081 /** 1082 * Get the count of 1-bits in v. 1083 * 1084 * @param v 1085 * The value. 1086 * @return 1087 * The count of 1-bits. 1088 */ 1089 static inline unsigned int 1090 rte_popcount64(uint64_t v) 1091 { 1092 return (unsigned int)__builtin_popcountll(v); 1093 } 1094 1095 /** 1096 * @warning 1097 * @b EXPERIMENTAL: this API may change without prior notice. 1098 * 1099 * Search v from least significant bit (LSB) to the most 1100 * significant bit (MSB) for a set bit (1). 1101 * 1102 * @param v 1103 * The value. 1104 * @return 1105 * Bit index + 1 if a set bit is found, zero otherwise. 1106 */ 1107 __rte_experimental 1108 static inline unsigned int 1109 rte_ffs32(uint32_t v) 1110 { 1111 return (unsigned int)__builtin_ffs(v); 1112 } 1113 1114 /** 1115 * @warning 1116 * @b EXPERIMENTAL: this API may change without prior notice. 1117 * 1118 * Search v from least significant bit (LSB) to the most 1119 * significant bit (MSB) for a set bit (1). 1120 * 1121 * @param v 1122 * The value. 1123 * @return 1124 * Bit index + 1 if a set bit is found, zero otherwise. 1125 */ 1126 __rte_experimental 1127 static inline unsigned int 1128 rte_ffs64(uint64_t v) 1129 { 1130 return (unsigned int)__builtin_ffsll(v); 1131 } 1132 1133 #endif 1134 1135 /** 1136 * Combines 32b inputs most significant set bits into the least 1137 * significant bits to construct a value with the same MSBs as x 1138 * but all 1's under it. 1139 * 1140 * @param x 1141 * The integer whose MSBs need to be combined with its LSBs 1142 * @return 1143 * The combined value. 1144 */ 1145 static inline uint32_t 1146 rte_combine32ms1b(uint32_t x) 1147 { 1148 x |= x >> 1; 1149 x |= x >> 2; 1150 x |= x >> 4; 1151 x |= x >> 8; 1152 x |= x >> 16; 1153 1154 return x; 1155 } 1156 1157 /** 1158 * Combines 64b inputs most significant set bits into the least 1159 * significant bits to construct a value with the same MSBs as x 1160 * but all 1's under it. 1161 * 1162 * @param v 1163 * The integer whose MSBs need to be combined with its LSBs 1164 * @return 1165 * The combined value. 1166 */ 1167 static inline uint64_t 1168 rte_combine64ms1b(uint64_t v) 1169 { 1170 v |= v >> 1; 1171 v |= v >> 2; 1172 v |= v >> 4; 1173 v |= v >> 8; 1174 v |= v >> 16; 1175 v |= v >> 32; 1176 1177 return v; 1178 } 1179 1180 /** 1181 * Searches the input parameter for the least significant set bit 1182 * (starting from zero). 1183 * If a least significant 1 bit is found, its bit index is returned. 1184 * If the content of the input parameter is zero, then the content of the return 1185 * value is undefined. 1186 * @param v 1187 * input parameter, should not be zero. 1188 * @return 1189 * least significant set bit in the input parameter. 1190 */ 1191 static inline uint32_t 1192 rte_bsf32(uint32_t v) 1193 { 1194 return (uint32_t)rte_ctz32(v); 1195 } 1196 1197 /** 1198 * Searches the input parameter for the least significant set bit 1199 * (starting from zero). Safe version (checks for input parameter being zero). 1200 * 1201 * @warning ``pos`` must be a valid pointer. It is not checked! 1202 * 1203 * @param v 1204 * The input parameter. 1205 * @param pos 1206 * If ``v`` was not 0, this value will contain position of least significant 1207 * bit within the input parameter. 1208 * @return 1209 * Returns 0 if ``v`` was 0, otherwise returns 1. 1210 */ 1211 static inline int 1212 rte_bsf32_safe(uint32_t v, uint32_t *pos) 1213 { 1214 if (v == 0) 1215 return 0; 1216 1217 *pos = rte_bsf32(v); 1218 return 1; 1219 } 1220 1221 /** 1222 * Searches the input parameter for the least significant set bit 1223 * (starting from zero). 1224 * If a least significant 1 bit is found, its bit index is returned. 1225 * If the content of the input parameter is zero, then the content of the return 1226 * value is undefined. 1227 * @param v 1228 * input parameter, should not be zero. 1229 * @return 1230 * least significant set bit in the input parameter. 1231 */ 1232 static inline uint32_t 1233 rte_bsf64(uint64_t v) 1234 { 1235 return (uint32_t)rte_ctz64(v); 1236 } 1237 1238 /** 1239 * Searches the input parameter for the least significant set bit 1240 * (starting from zero). Safe version (checks for input parameter being zero). 1241 * 1242 * @warning ``pos`` must be a valid pointer. It is not checked! 1243 * 1244 * @param v 1245 * The input parameter. 1246 * @param pos 1247 * If ``v`` was not 0, this value will contain position of least significant 1248 * bit within the input parameter. 1249 * @return 1250 * Returns 0 if ``v`` was 0, otherwise returns 1. 1251 */ 1252 static inline int 1253 rte_bsf64_safe(uint64_t v, uint32_t *pos) 1254 { 1255 if (v == 0) 1256 return 0; 1257 1258 *pos = rte_bsf64(v); 1259 return 1; 1260 } 1261 1262 /** 1263 * Return the last (most-significant) bit set. 1264 * 1265 * @note The last (most significant) bit is at position 32. 1266 * @note rte_fls_u32(0) = 0, rte_fls_u32(1) = 1, rte_fls_u32(0x80000000) = 32 1267 * 1268 * @param x 1269 * The input parameter. 1270 * @return 1271 * The last (most-significant) bit set, or 0 if the input is 0. 1272 */ 1273 static inline uint32_t 1274 rte_fls_u32(uint32_t x) 1275 { 1276 return (x == 0) ? 0 : 32 - rte_clz32(x); 1277 } 1278 1279 /** 1280 * Return the last (most-significant) bit set. 1281 * 1282 * @note The last (most significant) bit is at position 64. 1283 * @note rte_fls_u64(0) = 0, rte_fls_u64(1) = 1, 1284 * rte_fls_u64(0x8000000000000000) = 64 1285 * 1286 * @param x 1287 * The input parameter. 1288 * @return 1289 * The last (most-significant) bit set, or 0 if the input is 0. 1290 */ 1291 static inline uint32_t 1292 rte_fls_u64(uint64_t x) 1293 { 1294 return (x == 0) ? 0 : 64 - rte_clz64(x); 1295 } 1296 1297 /*********** Macros to work with powers of 2 ********/ 1298 1299 /** 1300 * Macro to return 1 if n is a power of 2, 0 otherwise 1301 */ 1302 #define RTE_IS_POWER_OF_2(n) ((n) && !(((n) - 1) & (n))) 1303 1304 /** 1305 * Returns true if n is a power of 2 1306 * @param n 1307 * Number to check 1308 * @return 1 if true, 0 otherwise 1309 */ 1310 static inline int 1311 rte_is_power_of_2(uint32_t n) 1312 { 1313 return n && !(n & (n - 1)); 1314 } 1315 1316 /** 1317 * Aligns input parameter to the next power of 2 1318 * 1319 * @param x 1320 * The integer value to align 1321 * 1322 * @return 1323 * Input parameter aligned to the next power of 2 1324 */ 1325 static inline uint32_t 1326 rte_align32pow2(uint32_t x) 1327 { 1328 x--; 1329 x = rte_combine32ms1b(x); 1330 1331 return x + 1; 1332 } 1333 1334 /** 1335 * Aligns input parameter to the previous power of 2 1336 * 1337 * @param x 1338 * The integer value to align 1339 * 1340 * @return 1341 * Input parameter aligned to the previous power of 2 1342 */ 1343 static inline uint32_t 1344 rte_align32prevpow2(uint32_t x) 1345 { 1346 x = rte_combine32ms1b(x); 1347 1348 return x - (x >> 1); 1349 } 1350 1351 /** 1352 * Aligns 64b input parameter to the next power of 2 1353 * 1354 * @param v 1355 * The 64b value to align 1356 * 1357 * @return 1358 * Input parameter aligned to the next power of 2 1359 */ 1360 static inline uint64_t 1361 rte_align64pow2(uint64_t v) 1362 { 1363 v--; 1364 v = rte_combine64ms1b(v); 1365 1366 return v + 1; 1367 } 1368 1369 /** 1370 * Aligns 64b input parameter to the previous power of 2 1371 * 1372 * @param v 1373 * The 64b value to align 1374 * 1375 * @return 1376 * Input parameter aligned to the previous power of 2 1377 */ 1378 static inline uint64_t 1379 rte_align64prevpow2(uint64_t v) 1380 { 1381 v = rte_combine64ms1b(v); 1382 1383 return v - (v >> 1); 1384 } 1385 1386 /** 1387 * Return the rounded-up log2 of a integer. 1388 * 1389 * @note Contrary to the logarithm mathematical operation, 1390 * rte_log2_u32(0) == 0 and not -inf. 1391 * 1392 * @param v 1393 * The input parameter. 1394 * @return 1395 * The rounded-up log2 of the input, or 0 if the input is 0. 1396 */ 1397 static inline uint32_t 1398 rte_log2_u32(uint32_t v) 1399 { 1400 if (v == 0) 1401 return 0; 1402 v = rte_align32pow2(v); 1403 return rte_bsf32(v); 1404 } 1405 1406 /** 1407 * Return the rounded-up log2 of a 64-bit integer. 1408 * 1409 * @note Contrary to the logarithm mathematical operation, 1410 * rte_log2_u64(0) == 0 and not -inf. 1411 * 1412 * @param v 1413 * The input parameter. 1414 * @return 1415 * The rounded-up log2 of the input, or 0 if the input is 0. 1416 */ 1417 static inline uint32_t 1418 rte_log2_u64(uint64_t v) 1419 { 1420 if (v == 0) 1421 return 0; 1422 v = rte_align64pow2(v); 1423 /* we checked for v being 0 already, so no undefined behavior */ 1424 return rte_bsf64(v); 1425 } 1426 1427 #ifdef __cplusplus 1428 } 1429 1430 /* 1431 * Since C++ doesn't support generic selection (i.e., _Generic), 1432 * function overloading is used instead. Such functions must be 1433 * defined outside 'extern "C"' to be accepted by the compiler. 1434 */ 1435 1436 #undef rte_bit_test 1437 #undef rte_bit_set 1438 #undef rte_bit_clear 1439 #undef rte_bit_assign 1440 #undef rte_bit_flip 1441 1442 #undef rte_bit_atomic_test 1443 #undef rte_bit_atomic_set 1444 #undef rte_bit_atomic_clear 1445 #undef rte_bit_atomic_assign 1446 #undef rte_bit_atomic_flip 1447 #undef rte_bit_atomic_test_and_set 1448 #undef rte_bit_atomic_test_and_clear 1449 #undef rte_bit_atomic_test_and_assign 1450 1451 #define __RTE_BIT_OVERLOAD_V_2(family, v, fun, qualifier, size, arg1_type, arg1_name) \ 1452 static inline void \ 1453 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name) \ 1454 { \ 1455 __rte_bit_ ## family ## v ## fun ## size(addr, arg1_name); \ 1456 } 1457 1458 #define __RTE_BIT_OVERLOAD_SZ_2(family, fun, qualifier, size, arg1_type, arg1_name) \ 1459 __RTE_BIT_OVERLOAD_V_2(family,, fun, qualifier, size, arg1_type, arg1_name) \ 1460 __RTE_BIT_OVERLOAD_V_2(family, v_, fun, qualifier volatile, size, arg1_type, arg1_name) 1461 1462 #define __RTE_BIT_OVERLOAD_2(family, fun, qualifier, arg1_type, arg1_name) \ 1463 __RTE_BIT_OVERLOAD_SZ_2(family, fun, qualifier, 32, arg1_type, arg1_name) \ 1464 __RTE_BIT_OVERLOAD_SZ_2(family, fun, qualifier, 64, arg1_type, arg1_name) 1465 1466 #define __RTE_BIT_OVERLOAD_V_2R(family, v, fun, qualifier, size, ret_type, arg1_type, arg1_name) \ 1467 static inline ret_type \ 1468 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name) \ 1469 { \ 1470 return __rte_bit_ ## family ## v ## fun ## size(addr, arg1_name); \ 1471 } 1472 1473 #define __RTE_BIT_OVERLOAD_SZ_2R(family, fun, qualifier, size, ret_type, arg1_type, arg1_name) \ 1474 __RTE_BIT_OVERLOAD_V_2R(family,, fun, qualifier, size, ret_type, arg1_type, arg1_name) \ 1475 __RTE_BIT_OVERLOAD_V_2R(family, v_, fun, qualifier volatile, size, ret_type, arg1_type, \ 1476 arg1_name) 1477 1478 #define __RTE_BIT_OVERLOAD_2R(family, fun, qualifier, ret_type, arg1_type, arg1_name) \ 1479 __RTE_BIT_OVERLOAD_SZ_2R(family, fun, qualifier, 32, ret_type, arg1_type, arg1_name) \ 1480 __RTE_BIT_OVERLOAD_SZ_2R(family, fun, qualifier, 64, ret_type, arg1_type, arg1_name) 1481 1482 #define __RTE_BIT_OVERLOAD_V_3(family, v, fun, qualifier, size, arg1_type, arg1_name, \ 1483 arg2_type, arg2_name) \ 1484 static inline void \ 1485 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name, \ 1486 arg2_type arg2_name) \ 1487 { \ 1488 __rte_bit_ ## family ## v ## fun ## size(addr, arg1_name, arg2_name); \ 1489 } 1490 1491 #define __RTE_BIT_OVERLOAD_SZ_3(family, fun, qualifier, size, arg1_type, arg1_name, \ 1492 arg2_type, arg2_name) \ 1493 __RTE_BIT_OVERLOAD_V_3(family,, fun, qualifier, size, arg1_type, arg1_name, \ 1494 arg2_type, arg2_name) \ 1495 __RTE_BIT_OVERLOAD_V_3(family, v_, fun, qualifier volatile, size, arg1_type, arg1_name, \ 1496 arg2_type, arg2_name) 1497 1498 #define __RTE_BIT_OVERLOAD_3(family, fun, qualifier, arg1_type, arg1_name, arg2_type, arg2_name) \ 1499 __RTE_BIT_OVERLOAD_SZ_3(family, fun, qualifier, 32, arg1_type, arg1_name, \ 1500 arg2_type, arg2_name) \ 1501 __RTE_BIT_OVERLOAD_SZ_3(family, fun, qualifier, 64, arg1_type, arg1_name, \ 1502 arg2_type, arg2_name) 1503 1504 #define __RTE_BIT_OVERLOAD_V_3R(family, v, fun, qualifier, size, ret_type, arg1_type, arg1_name, \ 1505 arg2_type, arg2_name) \ 1506 static inline ret_type \ 1507 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name, \ 1508 arg2_type arg2_name) \ 1509 { \ 1510 return __rte_bit_ ## family ## v ## fun ## size(addr, arg1_name, arg2_name); \ 1511 } 1512 1513 #define __RTE_BIT_OVERLOAD_SZ_3R(family, fun, qualifier, size, ret_type, arg1_type, arg1_name, \ 1514 arg2_type, arg2_name) \ 1515 __RTE_BIT_OVERLOAD_V_3R(family,, fun, qualifier, size, ret_type, arg1_type, arg1_name, \ 1516 arg2_type, arg2_name) \ 1517 __RTE_BIT_OVERLOAD_V_3R(family, v_, fun, qualifier volatile, size, ret_type, \ 1518 arg1_type, arg1_name, arg2_type, arg2_name) 1519 1520 #define __RTE_BIT_OVERLOAD_3R(family, fun, qualifier, ret_type, arg1_type, arg1_name, \ 1521 arg2_type, arg2_name) \ 1522 __RTE_BIT_OVERLOAD_SZ_3R(family, fun, qualifier, 32, ret_type, arg1_type, arg1_name, \ 1523 arg2_type, arg2_name) \ 1524 __RTE_BIT_OVERLOAD_SZ_3R(family, fun, qualifier, 64, ret_type, arg1_type, arg1_name, \ 1525 arg2_type, arg2_name) 1526 1527 #define __RTE_BIT_OVERLOAD_V_4(family, v, fun, qualifier, size, arg1_type, arg1_name, \ 1528 arg2_type, arg2_name, arg3_type, arg3_name) \ 1529 static inline void \ 1530 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name, \ 1531 arg2_type arg2_name, arg3_type arg3_name) \ 1532 { \ 1533 __rte_bit_ ## family ## v ## fun ## size(addr, arg1_name, arg2_name, arg3_name); \ 1534 } 1535 1536 #define __RTE_BIT_OVERLOAD_SZ_4(family, fun, qualifier, size, arg1_type, arg1_name, \ 1537 arg2_type, arg2_name, arg3_type, arg3_name) \ 1538 __RTE_BIT_OVERLOAD_V_4(family,, fun, qualifier, size, arg1_type, arg1_name, \ 1539 arg2_type, arg2_name, arg3_type, arg3_name) \ 1540 __RTE_BIT_OVERLOAD_V_4(family, v_, fun, qualifier volatile, size, arg1_type, arg1_name, \ 1541 arg2_type, arg2_name, arg3_type, arg3_name) 1542 1543 #define __RTE_BIT_OVERLOAD_4(family, fun, qualifier, arg1_type, arg1_name, arg2_type, arg2_name, \ 1544 arg3_type, arg3_name) \ 1545 __RTE_BIT_OVERLOAD_SZ_4(family, fun, qualifier, 32, arg1_type, arg1_name, \ 1546 arg2_type, arg2_name, arg3_type, arg3_name) \ 1547 __RTE_BIT_OVERLOAD_SZ_4(family, fun, qualifier, 64, arg1_type, arg1_name, \ 1548 arg2_type, arg2_name, arg3_type, arg3_name) 1549 1550 #define __RTE_BIT_OVERLOAD_V_4R(family, v, fun, qualifier, size, ret_type, arg1_type, arg1_name, \ 1551 arg2_type, arg2_name, arg3_type, arg3_name) \ 1552 static inline ret_type \ 1553 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name, \ 1554 arg2_type arg2_name, arg3_type arg3_name) \ 1555 { \ 1556 return __rte_bit_ ## family ## v ## fun ## size(addr, arg1_name, arg2_name, \ 1557 arg3_name); \ 1558 } 1559 1560 #define __RTE_BIT_OVERLOAD_SZ_4R(family, fun, qualifier, size, ret_type, arg1_type, arg1_name, \ 1561 arg2_type, arg2_name, arg3_type, arg3_name) \ 1562 __RTE_BIT_OVERLOAD_V_4R(family,, fun, qualifier, size, ret_type, arg1_type, arg1_name, \ 1563 arg2_type, arg2_name, arg3_type, arg3_name) \ 1564 __RTE_BIT_OVERLOAD_V_4R(family, v_, fun, qualifier volatile, size, ret_type, \ 1565 arg1_type, arg1_name, arg2_type, arg2_name, arg3_type, arg3_name) 1566 1567 #define __RTE_BIT_OVERLOAD_4R(family, fun, qualifier, ret_type, arg1_type, arg1_name, \ 1568 arg2_type, arg2_name, arg3_type, arg3_name) \ 1569 __RTE_BIT_OVERLOAD_SZ_4R(family, fun, qualifier, 32, ret_type, arg1_type, arg1_name, \ 1570 arg2_type, arg2_name, arg3_type, arg3_name) \ 1571 __RTE_BIT_OVERLOAD_SZ_4R(family, fun, qualifier, 64, ret_type, arg1_type, arg1_name, \ 1572 arg2_type, arg2_name, arg3_type, arg3_name) 1573 1574 #ifdef ALLOW_EXPERIMENTAL_API 1575 __RTE_BIT_OVERLOAD_2R(, test, const, bool, unsigned int, nr) 1576 __RTE_BIT_OVERLOAD_2(, set,, unsigned int, nr) 1577 __RTE_BIT_OVERLOAD_2(, clear,, unsigned int, nr) 1578 __RTE_BIT_OVERLOAD_3(, assign,, unsigned int, nr, bool, value) 1579 __RTE_BIT_OVERLOAD_2(, flip,, unsigned int, nr) 1580 1581 __RTE_BIT_OVERLOAD_3R(atomic_, test, const, bool, unsigned int, nr, int, memory_order) 1582 __RTE_BIT_OVERLOAD_3(atomic_, set,, unsigned int, nr, int, memory_order) 1583 __RTE_BIT_OVERLOAD_3(atomic_, clear,, unsigned int, nr, int, memory_order) 1584 __RTE_BIT_OVERLOAD_4(atomic_, assign,, unsigned int, nr, bool, value, int, memory_order) 1585 __RTE_BIT_OVERLOAD_3(atomic_, flip,, unsigned int, nr, int, memory_order) 1586 __RTE_BIT_OVERLOAD_3R(atomic_, test_and_set,, bool, unsigned int, nr, int, memory_order) 1587 __RTE_BIT_OVERLOAD_3R(atomic_, test_and_clear,, bool, unsigned int, nr, int, memory_order) 1588 __RTE_BIT_OVERLOAD_4R(atomic_, test_and_assign,, bool, unsigned int, nr, bool, value, 1589 int, memory_order) 1590 #endif 1591 1592 #endif 1593 1594 #endif /* _RTE_BITOPS_H_ */ 1595