xref: /dpdk/lib/eal/include/rte_bitops.h (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Arm Limited
3  * Copyright(c) 2010-2019 Intel Corporation
4  * Copyright(c) 2023 Microsoft Corporation
5  * Copyright(c) 2024 Ericsson AB
6  */
7 
8 #ifndef _RTE_BITOPS_H_
9 #define _RTE_BITOPS_H_
10 
11 /**
12  * @file
13  * Bit Operations
14  *
15  * This file provides functionality for low-level, single-word
16  * arithmetic and bit-level operations, such as counting or
17  * setting individual bits.
18  */
19 
20 #include <stdint.h>
21 
22 #include <rte_compat.h>
23 #include <rte_debug.h>
24 #include <rte_stdatomic.h>
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
30 /**
31  * Get the uint64_t value for a specified bit set.
32  *
33  * @param nr
34  *   The bit number in range of 0 to 63.
35  */
36 #define RTE_BIT64(nr) (UINT64_C(1) << (nr))
37 
38 /**
39  * Get the uint32_t value for a specified bit set.
40  *
41  * @param nr
42  *   The bit number in range of 0 to 31.
43  */
44 #define RTE_BIT32(nr) (UINT32_C(1) << (nr))
45 
46 /**
47  * Get the uint32_t shifted value.
48  *
49  * @param val
50  *   The value to be shifted.
51  * @param nr
52  *   The shift number in range of 0 to (32 - width of val).
53  */
54 #define RTE_SHIFT_VAL32(val, nr) (UINT32_C(val) << (nr))
55 
56 /**
57  * Get the uint64_t shifted value.
58  *
59  * @param val
60  *   The value to be shifted.
61  * @param nr
62  *   The shift number in range of 0 to (64 - width of val).
63  */
64 #define RTE_SHIFT_VAL64(val, nr) (UINT64_C(val) << (nr))
65 
66 /**
67  * Generate a contiguous 32-bit mask
68  * starting at bit position low and ending at position high.
69  *
70  * @param high
71  *   High bit position.
72  * @param low
73  *   Low bit position.
74  */
75 #define RTE_GENMASK32(high, low) \
76 		(((~UINT32_C(0)) << (low)) & (~UINT32_C(0) >> (31u - (high))))
77 
78 /**
79  * Generate a contiguous 64-bit mask
80  * starting at bit position low and ending at position high.
81  *
82  * @param high
83  *   High bit position.
84  * @param low
85  *   Low bit position.
86  */
87 #define RTE_GENMASK64(high, low) \
88 		(((~UINT64_C(0)) << (low)) & (~UINT64_C(0) >> (63u - (high))))
89 
90 /**
91  * Extract a 32-bit field element.
92  *
93  * @param mask
94  *   Shifted mask.
95  * @param reg
96  *   Value of entire bitfield.
97  */
98 #define RTE_FIELD_GET32(mask, reg) \
99 		((typeof(mask))(((reg) & (mask)) >> rte_ctz32(mask)))
100 
101 /**
102  * Extract a 64-bit field element.
103  *
104  * @param mask
105  *   Shifted mask.
106  * @param reg
107  *   Value of entire bitfield.
108  */
109 #define RTE_FIELD_GET64(mask, reg) \
110 		((typeof(mask))(((reg) & (mask)) >> rte_ctz64(mask)))
111 
112 /**
113  * @warning
114  * @b EXPERIMENTAL: this API may change without prior notice.
115  *
116  * Test bit in word.
117  *
118  * Generic selection macro to test the value of a bit in a 32-bit or
119  * 64-bit word. The type of operation depends on the type of the @c
120  * addr parameter.
121  *
122  * This macro does not give any guarantees in regards to memory
123  * ordering or atomicity.
124  *
125  * @param addr
126  *   A pointer to the word to modify.
127  * @param nr
128  *   The index of the bit.
129  */
130 #define rte_bit_test(addr, nr) \
131 	_Generic((addr), \
132 		uint32_t *: __rte_bit_test32, \
133 		const uint32_t *: __rte_bit_test32, \
134 		volatile uint32_t *: __rte_bit_v_test32, \
135 		const volatile uint32_t *: __rte_bit_v_test32, \
136 		uint64_t *: __rte_bit_test64, \
137 		const uint64_t *: __rte_bit_test64, \
138 		volatile uint64_t *: __rte_bit_v_test64, \
139 		const volatile uint64_t *: __rte_bit_v_test64) \
140 			(addr, nr)
141 
142 /**
143  * @warning
144  * @b EXPERIMENTAL: this API may change without prior notice.
145  *
146  * Set bit in word.
147  *
148  * Generic selection macro to set a bit in a 32-bit or 64-bit
149  * word. The type of operation depends on the type of the @c addr
150  * parameter.
151  *
152  * This macro does not give any guarantees in regards to memory
153  * ordering or atomicity.
154  *
155  * @param addr
156  *   A pointer to the word to modify.
157  * @param nr
158  *   The index of the bit.
159  */
160 #define rte_bit_set(addr, nr) \
161 	_Generic((addr), \
162 		uint32_t *: __rte_bit_set32, \
163 		volatile uint32_t *: __rte_bit_v_set32, \
164 		uint64_t *: __rte_bit_set64, \
165 		volatile uint64_t *: __rte_bit_v_set64) \
166 			(addr, nr)
167 
168 /**
169  * @warning
170  * @b EXPERIMENTAL: this API may change without prior notice.
171  *
172  * Clear bit in word.
173  *
174  * Generic selection macro to clear a bit in a 32-bit or 64-bit
175  * word. The type of operation depends on the type of the @c addr
176  * parameter.
177  *
178  * This macro does not give any guarantees in regards to memory
179  * ordering or atomicity.
180  *
181  * @param addr
182  *   A pointer to the word to modify.
183  * @param nr
184  *   The index of the bit.
185  */
186 #define rte_bit_clear(addr, nr) \
187 	_Generic((addr), \
188 		uint32_t *: __rte_bit_clear32, \
189 		volatile uint32_t *: __rte_bit_v_clear32, \
190 		uint64_t *: __rte_bit_clear64, \
191 		volatile uint64_t *: __rte_bit_v_clear64) \
192 			(addr, nr)
193 
194 /**
195  * @warning
196  * @b EXPERIMENTAL: this API may change without prior notice.
197  *
198  * Assign a value to a bit in word.
199  *
200  * Generic selection macro to assign a value to a bit in a 32-bit or 64-bit
201  * word. The type of operation depends on the type of the @c addr parameter.
202  *
203  * This macro does not give any guarantees in regards to memory
204  * ordering or atomicity.
205  *
206  * @param addr
207  *   A pointer to the word to modify.
208  * @param nr
209  *   The index of the bit.
210  * @param value
211  *   The new value of the bit - true for '1', or false for '0'.
212  */
213 #define rte_bit_assign(addr, nr, value) \
214 	_Generic((addr), \
215 		uint32_t *: __rte_bit_assign32, \
216 		volatile uint32_t *: __rte_bit_v_assign32, \
217 		uint64_t *: __rte_bit_assign64, \
218 		volatile uint64_t *: __rte_bit_v_assign64) \
219 			(addr, nr, value)
220 
221 /**
222  * @warning
223  * @b EXPERIMENTAL: this API may change without prior notice.
224  *
225  * Flip a bit in word.
226  *
227  * Generic selection macro to change the value of a bit to '0' if '1'
228  * or '1' if '0' in a 32-bit or 64-bit word. The type of operation
229  * depends on the type of the @c addr parameter.
230  *
231  * This macro does not give any guarantees in regards to memory
232  * ordering or atomicity.
233  *
234  * @param addr
235  *   A pointer to the word to modify.
236  * @param nr
237  *   The index of the bit.
238  */
239 #define rte_bit_flip(addr, nr) \
240 	_Generic((addr), \
241 		uint32_t *: __rte_bit_flip32, \
242 		volatile uint32_t *: __rte_bit_v_flip32, \
243 		uint64_t *: __rte_bit_flip64, \
244 		volatile uint64_t *: __rte_bit_v_flip64) \
245 			(addr, nr)
246 
247 /**
248  * @warning
249  * @b EXPERIMENTAL: this API may change without prior notice.
250  *
251  * Test if a particular bit in a word is set with a particular memory
252  * order.
253  *
254  * Test a bit with the resulting memory load ordered as per the
255  * specified memory order.
256  *
257  * @param addr
258  *   A pointer to the word to query.
259  * @param nr
260  *   The index of the bit.
261  * @param memory_order
262  *   The memory order to use.
263  * @return
264  *   Returns true if the bit is set, and false otherwise.
265  */
266 #define rte_bit_atomic_test(addr, nr, memory_order) \
267 	_Generic((addr), \
268 		uint32_t *: __rte_bit_atomic_test32, \
269 		const uint32_t *: __rte_bit_atomic_test32, \
270 		volatile uint32_t *: __rte_bit_atomic_v_test32, \
271 		const volatile uint32_t *: __rte_bit_atomic_v_test32, \
272 		uint64_t *: __rte_bit_atomic_test64, \
273 		const uint64_t *: __rte_bit_atomic_test64, \
274 		volatile uint64_t *: __rte_bit_atomic_v_test64, \
275 		const volatile uint64_t *: __rte_bit_atomic_v_test64) \
276 			(addr, nr, memory_order)
277 
278 /**
279  * @warning
280  * @b EXPERIMENTAL: this API may change without prior notice.
281  *
282  * Atomically set bit in word.
283  *
284  * Generic selection macro to atomically set bit specified by @c nr in
285  * the word pointed to by @c addr to '1', with the memory ordering as
286  * specified by @c memory_order.
287  *
288  * @param addr
289  *   A pointer to the word to modify.
290  * @param nr
291  *   The index of the bit.
292  * @param memory_order
293  *   The memory order to use.
294  */
295 #define rte_bit_atomic_set(addr, nr, memory_order) \
296 	_Generic((addr), \
297 		uint32_t *: __rte_bit_atomic_set32, \
298 		volatile uint32_t *: __rte_bit_atomic_v_set32, \
299 		uint64_t *: __rte_bit_atomic_set64, \
300 		volatile uint64_t *: __rte_bit_atomic_v_set64) \
301 			(addr, nr, memory_order)
302 
303 /**
304  * @warning
305  * @b EXPERIMENTAL: this API may change without prior notice.
306  *
307  * Atomically clear bit in word.
308  *
309  * Generic selection macro to atomically set bit specified by @c nr in
310  * the word pointed to by @c addr to '0', with the memory ordering as
311  * specified by @c memory_order.
312  *
313  * @param addr
314  *   A pointer to the word to modify.
315  * @param nr
316  *   The index of the bit.
317  * @param memory_order
318  *   The memory order to use.
319  */
320 #define rte_bit_atomic_clear(addr, nr, memory_order) \
321 	_Generic((addr), \
322 		uint32_t *: __rte_bit_atomic_clear32, \
323 		volatile uint32_t *: __rte_bit_atomic_v_clear32, \
324 		uint64_t *: __rte_bit_atomic_clear64, \
325 		volatile uint64_t *: __rte_bit_atomic_v_clear64) \
326 			(addr, nr, memory_order)
327 
328 /**
329  * @warning
330  * @b EXPERIMENTAL: this API may change without prior notice.
331  *
332  * Atomically assign a value to bit in word.
333  *
334  * Generic selection macro to atomically set bit specified by @c nr in the
335  * word pointed to by @c addr to the value indicated by @c value, with
336  * the memory ordering as specified with @c memory_order.
337  *
338  * @param addr
339  *   A pointer to the word to modify.
340  * @param nr
341  *   The index of the bit.
342  * @param value
343  *   The new value of the bit - true for '1', or false for '0'.
344  * @param memory_order
345  *   The memory order to use.
346  */
347 #define rte_bit_atomic_assign(addr, nr, value, memory_order) \
348 	_Generic((addr), \
349 		uint32_t *: __rte_bit_atomic_assign32, \
350 		volatile uint32_t *: __rte_bit_atomic_v_assign32, \
351 		uint64_t *: __rte_bit_atomic_assign64, \
352 		volatile uint64_t *: __rte_bit_atomic_v_assign64) \
353 			(addr, nr, value, memory_order)
354 
355 /**
356  * @warning
357  * @b EXPERIMENTAL: this API may change without prior notice.
358  *
359  * Atomically flip bit in word.
360  *
361  * Generic selection macro to atomically negate the value of the bit
362  * specified by @c nr in the word pointed to by @c addr to the value
363  * indicated by @c value, with the memory ordering as specified with
364  * @c memory_order.
365  *
366  * @param addr
367  *   A pointer to the word to modify.
368  * @param nr
369  *   The index of the bit.
370  * @param memory_order
371  *   The memory order to use.
372  */
373 #define rte_bit_atomic_flip(addr, nr, memory_order) \
374 	_Generic((addr), \
375 		uint32_t *: __rte_bit_atomic_flip32, \
376 		volatile uint32_t *: __rte_bit_atomic_v_flip32, \
377 		uint64_t *: __rte_bit_atomic_flip64, \
378 		volatile uint64_t *: __rte_bit_atomic_v_flip64) \
379 			(addr, nr, memory_order)
380 
381 /**
382  * @warning
383  * @b EXPERIMENTAL: this API may change without prior notice.
384  *
385  * Atomically test and set a bit in word.
386  *
387  * Generic selection macro to atomically test and set bit specified by
388  * @c nr in the word pointed to by @c addr to '1', with the memory
389  * ordering as specified with @c memory_order.
390  *
391  * @param addr
392  *   A pointer to the word to modify.
393  * @param nr
394  *   The index of the bit.
395  * @param memory_order
396  *   The memory order to use.
397  * @return
398  *   Returns true if the bit was set, and false otherwise.
399  */
400 #define rte_bit_atomic_test_and_set(addr, nr, memory_order) \
401 	_Generic((addr), \
402 		uint32_t *: __rte_bit_atomic_test_and_set32, \
403 		volatile uint32_t *: __rte_bit_atomic_v_test_and_set32, \
404 		uint64_t *: __rte_bit_atomic_test_and_set64, \
405 		volatile uint64_t *: __rte_bit_atomic_v_test_and_set64) \
406 			(addr, nr, memory_order)
407 
408 /**
409  * @warning
410  * @b EXPERIMENTAL: this API may change without prior notice.
411  *
412  * Atomically test and clear a bit in word.
413  *
414  * Generic selection macro to atomically test and clear bit specified
415  * by @c nr in the word pointed to by @c addr to '0', with the memory
416  * ordering as specified with @c memory_order.
417  *
418  * @param addr
419  *   A pointer to the word to modify.
420  * @param nr
421  *   The index of the bit.
422  * @param memory_order
423  *   The memory order to use.
424  * @return
425  *   Returns true if the bit was set, and false otherwise.
426  */
427 #define rte_bit_atomic_test_and_clear(addr, nr, memory_order) \
428 	_Generic((addr), \
429 		uint32_t *: __rte_bit_atomic_test_and_clear32, \
430 		volatile uint32_t *: __rte_bit_atomic_v_test_and_clear32, \
431 		uint64_t *: __rte_bit_atomic_test_and_clear64, \
432 		volatile uint64_t *: __rte_bit_atomic_v_test_and_clear64) \
433 			(addr, nr, memory_order)
434 
435 /**
436  * @warning
437  * @b EXPERIMENTAL: this API may change without prior notice.
438  *
439  * Atomically test and assign a bit in word.
440  *
441  * Generic selection macro to atomically test and assign bit specified
442  * by @c nr in the word pointed to by @c addr the value specified by
443  * @c value, with the memory ordering as specified with @c
444  * memory_order.
445  *
446  * @param addr
447  *   A pointer to the word to modify.
448  * @param nr
449  *   The index of the bit.
450  * @param value
451  *   The new value of the bit - true for '1', or false for '0'.
452  * @param memory_order
453  *   The memory order to use.
454  * @return
455  *   Returns true if the bit was set, and false otherwise.
456  */
457 #define rte_bit_atomic_test_and_assign(addr, nr, value, memory_order) \
458 	_Generic((addr), \
459 		uint32_t *: __rte_bit_atomic_test_and_assign32, \
460 		volatile uint32_t *: __rte_bit_atomic_v_test_and_assign32, \
461 		uint64_t *: __rte_bit_atomic_test_and_assign64, \
462 		volatile uint64_t *: __rte_bit_atomic_v_test_and_assign64) \
463 			(addr, nr, value, memory_order)
464 
465 #define __RTE_GEN_BIT_TEST(variant, qualifier, size) \
466 __rte_experimental \
467 static inline bool \
468 __rte_bit_ ## variant ## test ## size(const qualifier uint ## size ## _t *addr, unsigned int nr) \
469 { \
470 	RTE_ASSERT(nr < size); \
471 	uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
472 	return *addr & mask; \
473 }
474 
475 #define __RTE_GEN_BIT_SET(variant, qualifier, size) \
476 __rte_experimental \
477 static inline void \
478 __rte_bit_ ## variant ## set ## size(qualifier uint ## size ## _t *addr, unsigned int nr) \
479 { \
480 	RTE_ASSERT(nr < size); \
481 	uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
482 	*addr |= mask; \
483 }
484 
485 #define __RTE_GEN_BIT_CLEAR(variant, qualifier, size) \
486 __rte_experimental \
487 static inline void \
488 __rte_bit_ ## variant ## clear ## size(qualifier uint ## size ## _t *addr, unsigned int nr) \
489 { \
490 	RTE_ASSERT(nr < size); \
491 	uint ## size ## _t mask = ~((uint ## size ## _t)1 << nr); \
492 	(*addr) &= mask; \
493 }
494 
495 #define __RTE_GEN_BIT_ASSIGN(variant, qualifier, size) \
496 __rte_experimental \
497 static inline void \
498 __rte_bit_ ## variant ## assign ## size(qualifier uint ## size ## _t *addr, unsigned int nr, \
499 		bool value) \
500 { \
501 	if (value) \
502 		__rte_bit_ ## variant ## set ## size(addr, nr); \
503 	else \
504 		__rte_bit_ ## variant ## clear ## size(addr, nr); \
505 }
506 
507 #define __RTE_GEN_BIT_FLIP(variant, qualifier, size) \
508 __rte_experimental \
509 static inline void \
510 __rte_bit_ ## variant ## flip ## size(qualifier uint ## size ## _t *addr, unsigned int nr) \
511 { \
512 	bool value; \
513 	value = __rte_bit_ ## variant ## test ## size(addr, nr); \
514 	__rte_bit_ ## variant ## assign ## size(addr, nr, !value); \
515 }
516 
517 #define __RTE_GEN_BIT_OPS(v, qualifier, size) \
518 	__RTE_GEN_BIT_TEST(v, qualifier, size) \
519 	__RTE_GEN_BIT_SET(v, qualifier, size) \
520 	__RTE_GEN_BIT_CLEAR(v, qualifier, size) \
521 	__RTE_GEN_BIT_ASSIGN(v, qualifier, size) \
522 	__RTE_GEN_BIT_FLIP(v, qualifier, size)
523 
524 #define __RTE_GEN_BIT_OPS_SIZE(size) \
525 	__RTE_GEN_BIT_OPS(,, size) \
526 	__RTE_GEN_BIT_OPS(v_, volatile, size)
527 
528 __RTE_GEN_BIT_OPS_SIZE(32)
529 __RTE_GEN_BIT_OPS_SIZE(64)
530 
531 #define __RTE_GEN_BIT_ATOMIC_TEST(variant, qualifier, size) \
532 __rte_experimental \
533 static inline bool \
534 __rte_bit_atomic_ ## variant ## test ## size(const qualifier uint ## size ## _t *addr, \
535 		unsigned int nr, int memory_order) \
536 { \
537 	RTE_ASSERT(nr < size); \
538 	const qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
539 		(const qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
540 	uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
541 	return rte_atomic_load_explicit(a_addr, memory_order) & mask; \
542 }
543 
544 #define __RTE_GEN_BIT_ATOMIC_SET(variant, qualifier, size) \
545 __rte_experimental \
546 static inline void \
547 __rte_bit_atomic_ ## variant ## set ## size(qualifier uint ## size ## _t *addr, \
548 		unsigned int nr, int memory_order) \
549 { \
550 	RTE_ASSERT(nr < size); \
551 	qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
552 		(qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
553 	uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
554 	rte_atomic_fetch_or_explicit(a_addr, mask, memory_order); \
555 }
556 
557 #define __RTE_GEN_BIT_ATOMIC_CLEAR(variant, qualifier, size) \
558 __rte_experimental \
559 static inline void \
560 __rte_bit_atomic_ ## variant ## clear ## size(qualifier uint ## size ## _t *addr, \
561 		unsigned int nr, int memory_order) \
562 { \
563 	RTE_ASSERT(nr < size); \
564 	qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
565 		(qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
566 	uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
567 	rte_atomic_fetch_and_explicit(a_addr, ~mask, memory_order); \
568 }
569 
570 #define __RTE_GEN_BIT_ATOMIC_FLIP(variant, qualifier, size) \
571 __rte_experimental \
572 static inline void \
573 __rte_bit_atomic_ ## variant ## flip ## size(qualifier uint ## size ## _t *addr, \
574 		unsigned int nr, int memory_order) \
575 { \
576 	RTE_ASSERT(nr < size); \
577 	qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
578 		(qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
579 	uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
580 	rte_atomic_fetch_xor_explicit(a_addr, mask, memory_order); \
581 }
582 
583 #define __RTE_GEN_BIT_ATOMIC_ASSIGN(variant, qualifier, size) \
584 __rte_experimental \
585 static inline void \
586 __rte_bit_atomic_## variant ## assign ## size(qualifier uint ## size ## _t *addr, \
587 		unsigned int nr, bool value, int memory_order) \
588 { \
589 	if (value) \
590 		__rte_bit_atomic_ ## variant ## set ## size(addr, nr, memory_order); \
591 	else \
592 		__rte_bit_atomic_ ## variant ## clear ## size(addr, nr, memory_order); \
593 }
594 
595 #define __RTE_GEN_BIT_ATOMIC_TEST_AND_SET(variant, qualifier, size) \
596 __rte_experimental \
597 static inline bool \
598 __rte_bit_atomic_ ## variant ## test_and_set ## size(qualifier uint ## size ## _t *addr, \
599 		unsigned int nr, int memory_order) \
600 { \
601 	RTE_ASSERT(nr < size); \
602 	qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
603 		(qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
604 	uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
605 	uint ## size ## _t prev; \
606 	prev = rte_atomic_fetch_or_explicit(a_addr, mask, memory_order); \
607 	return prev & mask; \
608 }
609 
610 #define __RTE_GEN_BIT_ATOMIC_TEST_AND_CLEAR(variant, qualifier, size) \
611 __rte_experimental \
612 static inline bool \
613 __rte_bit_atomic_ ## variant ## test_and_clear ## size(qualifier uint ## size ## _t *addr, \
614 		unsigned int nr, int memory_order) \
615 { \
616 	RTE_ASSERT(nr < size); \
617 	qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
618 		(qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
619 	uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
620 	uint ## size ## _t prev; \
621 	prev = rte_atomic_fetch_and_explicit(a_addr, ~mask, memory_order); \
622 	return prev & mask; \
623 }
624 
625 #define __RTE_GEN_BIT_ATOMIC_TEST_AND_ASSIGN(variant, qualifier, size) \
626 __rte_experimental \
627 static inline bool \
628 __rte_bit_atomic_ ## variant ## test_and_assign ## size( \
629 		qualifier uint ## size ## _t *addr, unsigned int nr, bool value, \
630 		int memory_order) \
631 { \
632 	if (value) \
633 		return __rte_bit_atomic_ ## variant ## test_and_set ## size(addr, nr, \
634 			memory_order); \
635 	else \
636 		return __rte_bit_atomic_ ## variant ## test_and_clear ## size(addr, nr, \
637 			memory_order); \
638 }
639 
640 #define __RTE_GEN_BIT_ATOMIC_OPS(variant, qualifier, size) \
641 	__RTE_GEN_BIT_ATOMIC_TEST(variant, qualifier, size) \
642 	__RTE_GEN_BIT_ATOMIC_SET(variant, qualifier, size) \
643 	__RTE_GEN_BIT_ATOMIC_CLEAR(variant, qualifier, size) \
644 	__RTE_GEN_BIT_ATOMIC_ASSIGN(variant, qualifier, size) \
645 	__RTE_GEN_BIT_ATOMIC_TEST_AND_SET(variant, qualifier, size) \
646 	__RTE_GEN_BIT_ATOMIC_TEST_AND_CLEAR(variant, qualifier, size) \
647 	__RTE_GEN_BIT_ATOMIC_TEST_AND_ASSIGN(variant, qualifier, size) \
648 	__RTE_GEN_BIT_ATOMIC_FLIP(variant, qualifier, size)
649 
650 #define __RTE_GEN_BIT_ATOMIC_OPS_SIZE(size) \
651 	__RTE_GEN_BIT_ATOMIC_OPS(,, size) \
652 	__RTE_GEN_BIT_ATOMIC_OPS(v_, volatile, size)
653 
654 __RTE_GEN_BIT_ATOMIC_OPS_SIZE(32)
655 __RTE_GEN_BIT_ATOMIC_OPS_SIZE(64)
656 
657 /*------------------------ 32-bit relaxed operations ------------------------*/
658 
659 /**
660  * Get the target bit from a 32-bit value without memory ordering.
661  *
662  * @param nr
663  *   The target bit to get.
664  * @param addr
665  *   The address holding the bit.
666  * @return
667  *   The target bit.
668  */
669 static inline uint32_t
670 rte_bit_relaxed_get32(unsigned int nr, volatile uint32_t *addr)
671 {
672 	RTE_ASSERT(nr < 32);
673 
674 	uint32_t mask = UINT32_C(1) << nr;
675 	return (*addr) & mask;
676 }
677 
678 /**
679  * Set the target bit in a 32-bit value to 1 without memory ordering.
680  *
681  * @param nr
682  *   The target bit to set.
683  * @param addr
684  *   The address holding the bit.
685  */
686 static inline void
687 rte_bit_relaxed_set32(unsigned int nr, volatile uint32_t *addr)
688 {
689 	RTE_ASSERT(nr < 32);
690 
691 	uint32_t mask = RTE_BIT32(nr);
692 	*addr = (*addr) | mask;
693 }
694 
695 /**
696  * Clear the target bit in a 32-bit value to 0 without memory ordering.
697  *
698  * @param nr
699  *   The target bit to clear.
700  * @param addr
701  *   The address holding the bit.
702  */
703 static inline void
704 rte_bit_relaxed_clear32(unsigned int nr, volatile uint32_t *addr)
705 {
706 	RTE_ASSERT(nr < 32);
707 
708 	uint32_t mask = RTE_BIT32(nr);
709 	*addr = (*addr) & (~mask);
710 }
711 
712 /**
713  * Return the original bit from a 32-bit value, then set it to 1 without
714  * memory ordering.
715  *
716  * @param nr
717  *   The target bit to get and set.
718  * @param addr
719  *   The address holding the bit.
720  * @return
721  *   The original bit.
722  */
723 static inline uint32_t
724 rte_bit_relaxed_test_and_set32(unsigned int nr, volatile uint32_t *addr)
725 {
726 	RTE_ASSERT(nr < 32);
727 
728 	uint32_t mask = RTE_BIT32(nr);
729 	uint32_t val = *addr;
730 	*addr = val | mask;
731 	return val & mask;
732 }
733 
734 /**
735  * Return the original bit from a 32-bit value, then clear it to 0 without
736  * memory ordering.
737  *
738  * @param nr
739  *   The target bit to get and clear.
740  * @param addr
741  *   The address holding the bit.
742  * @return
743  *   The original bit.
744  */
745 static inline uint32_t
746 rte_bit_relaxed_test_and_clear32(unsigned int nr, volatile uint32_t *addr)
747 {
748 	RTE_ASSERT(nr < 32);
749 
750 	uint32_t mask = RTE_BIT32(nr);
751 	uint32_t val = *addr;
752 	*addr = val & (~mask);
753 	return val & mask;
754 }
755 
756 /*------------------------ 64-bit relaxed operations ------------------------*/
757 
758 /**
759  * Get the target bit from a 64-bit value without memory ordering.
760  *
761  * @param nr
762  *   The target bit to get.
763  * @param addr
764  *   The address holding the bit.
765  * @return
766  *   The target bit.
767  */
768 static inline uint64_t
769 rte_bit_relaxed_get64(unsigned int nr, volatile uint64_t *addr)
770 {
771 	RTE_ASSERT(nr < 64);
772 
773 	uint64_t mask = RTE_BIT64(nr);
774 	return (*addr) & mask;
775 }
776 
777 /**
778  * Set the target bit in a 64-bit value to 1 without memory ordering.
779  *
780  * @param nr
781  *   The target bit to set.
782  * @param addr
783  *   The address holding the bit.
784  */
785 static inline void
786 rte_bit_relaxed_set64(unsigned int nr, volatile uint64_t *addr)
787 {
788 	RTE_ASSERT(nr < 64);
789 
790 	uint64_t mask = RTE_BIT64(nr);
791 	(*addr) = (*addr) | mask;
792 }
793 
794 /**
795  * Clear the target bit in a 64-bit value to 0 without memory ordering.
796  *
797  * @param nr
798  *   The target bit to clear.
799  * @param addr
800  *   The address holding the bit.
801  */
802 static inline void
803 rte_bit_relaxed_clear64(unsigned int nr, volatile uint64_t *addr)
804 {
805 	RTE_ASSERT(nr < 64);
806 
807 	uint64_t mask = RTE_BIT64(nr);
808 	*addr = (*addr) & (~mask);
809 }
810 
811 /**
812  * Return the original bit from a 64-bit value, then set it to 1 without
813  * memory ordering.
814  *
815  * @param nr
816  *   The target bit to get and set.
817  * @param addr
818  *   The address holding the bit.
819  * @return
820  *   The original bit.
821  */
822 static inline uint64_t
823 rte_bit_relaxed_test_and_set64(unsigned int nr, volatile uint64_t *addr)
824 {
825 	RTE_ASSERT(nr < 64);
826 
827 	uint64_t mask = RTE_BIT64(nr);
828 	uint64_t val = *addr;
829 	*addr = val | mask;
830 	return val;
831 }
832 
833 /**
834  * Return the original bit from a 64-bit value, then clear it to 0 without
835  * memory ordering.
836  *
837  * @param nr
838  *   The target bit to get and clear.
839  * @param addr
840  *   The address holding the bit.
841  * @return
842  *   The original bit.
843  */
844 static inline uint64_t
845 rte_bit_relaxed_test_and_clear64(unsigned int nr, volatile uint64_t *addr)
846 {
847 	RTE_ASSERT(nr < 64);
848 
849 	uint64_t mask = RTE_BIT64(nr);
850 	uint64_t val = *addr;
851 	*addr = val & (~mask);
852 	return val & mask;
853 }
854 
855 #ifdef RTE_TOOLCHAIN_MSVC
856 
857 /**
858  * Get the count of leading 0-bits in v.
859  *
860  * @param v
861  *   The value.
862  * @return
863  *   The count of leading zero bits.
864  */
865 static inline unsigned int
866 rte_clz32(uint32_t v)
867 {
868 	unsigned long rv;
869 
870 	(void)_BitScanReverse(&rv, v);
871 
872 	return (unsigned int)(sizeof(v) * CHAR_BIT - 1 - rv);
873 }
874 
875 /**
876  * Get the count of leading 0-bits in v.
877  *
878  * @param v
879  *   The value.
880  * @return
881  *   The count of leading zero bits.
882  */
883 static inline unsigned int
884 rte_clz64(uint64_t v)
885 {
886 	unsigned long rv;
887 
888 	(void)_BitScanReverse64(&rv, v);
889 
890 	return (unsigned int)(sizeof(v) * CHAR_BIT - 1 - rv);
891 }
892 
893 /**
894  * Get the count of trailing 0-bits in v.
895  *
896  * @param v
897  *   The value.
898  * @return
899  *   The count of trailing zero bits.
900  */
901 static inline unsigned int
902 rte_ctz32(uint32_t v)
903 {
904 	unsigned long rv;
905 
906 	(void)_BitScanForward(&rv, v);
907 
908 	return (unsigned int)rv;
909 }
910 
911 /**
912  * Get the count of trailing 0-bits in v.
913  *
914  * @param v
915  *   The value.
916  * @return
917  *   The count of trailing zero bits.
918  */
919 static inline unsigned int
920 rte_ctz64(uint64_t v)
921 {
922 	unsigned long rv;
923 
924 	(void)_BitScanForward64(&rv, v);
925 
926 	return (unsigned int)rv;
927 }
928 
929 /**
930  * Get the count of 1-bits in v.
931  *
932  * @param v
933  *   The value.
934  * @return
935  *   The count of 1-bits.
936  */
937 static inline unsigned int
938 rte_popcount32(uint32_t v)
939 {
940 	return (unsigned int)__popcnt(v);
941 }
942 
943 /**
944  * Get the count of 1-bits in v.
945  *
946  * @param v
947  *   The value.
948  * @return
949  *   The count of 1-bits.
950  */
951 static inline unsigned int
952 rte_popcount64(uint64_t v)
953 {
954 	return (unsigned int)__popcnt64(v);
955 }
956 
957 #else
958 
959 /**
960  * Get the count of leading 0-bits in v.
961  *
962  * @param v
963  *   The value.
964  * @return
965  *   The count of leading zero bits.
966  */
967 static inline unsigned int
968 rte_clz32(uint32_t v)
969 {
970 	return (unsigned int)__builtin_clz(v);
971 }
972 
973 /**
974  * Get the count of leading 0-bits in v.
975  *
976  * @param v
977  *   The value.
978  * @return
979  *   The count of leading zero bits.
980  */
981 static inline unsigned int
982 rte_clz64(uint64_t v)
983 {
984 	return (unsigned int)__builtin_clzll(v);
985 }
986 
987 /**
988  * Get the count of trailing 0-bits in v.
989  *
990  * @param v
991  *   The value.
992  * @return
993  *   The count of trailing zero bits.
994  */
995 static inline unsigned int
996 rte_ctz32(uint32_t v)
997 {
998 	return (unsigned int)__builtin_ctz(v);
999 }
1000 
1001 /**
1002  * Get the count of trailing 0-bits in v.
1003  *
1004  * @param v
1005  *   The value.
1006  * @return
1007  *   The count of trailing zero bits.
1008  */
1009 static inline unsigned int
1010 rte_ctz64(uint64_t v)
1011 {
1012 	return (unsigned int)__builtin_ctzll(v);
1013 }
1014 
1015 /**
1016  * Get the count of 1-bits in v.
1017  *
1018  * @param v
1019  *   The value.
1020  * @return
1021  *   The count of 1-bits.
1022  */
1023 static inline unsigned int
1024 rte_popcount32(uint32_t v)
1025 {
1026 	return (unsigned int)__builtin_popcount(v);
1027 }
1028 
1029 /**
1030  * Get the count of 1-bits in v.
1031  *
1032  * @param v
1033  *   The value.
1034  * @return
1035  *   The count of 1-bits.
1036  */
1037 static inline unsigned int
1038 rte_popcount64(uint64_t v)
1039 {
1040 	return (unsigned int)__builtin_popcountll(v);
1041 }
1042 
1043 #endif
1044 
1045 /**
1046  * Combines 32b inputs most significant set bits into the least
1047  * significant bits to construct a value with the same MSBs as x
1048  * but all 1's under it.
1049  *
1050  * @param x
1051  *    The integer whose MSBs need to be combined with its LSBs
1052  * @return
1053  *    The combined value.
1054  */
1055 static inline uint32_t
1056 rte_combine32ms1b(uint32_t x)
1057 {
1058 	x |= x >> 1;
1059 	x |= x >> 2;
1060 	x |= x >> 4;
1061 	x |= x >> 8;
1062 	x |= x >> 16;
1063 
1064 	return x;
1065 }
1066 
1067 /**
1068  * Combines 64b inputs most significant set bits into the least
1069  * significant bits to construct a value with the same MSBs as x
1070  * but all 1's under it.
1071  *
1072  * @param v
1073  *    The integer whose MSBs need to be combined with its LSBs
1074  * @return
1075  *    The combined value.
1076  */
1077 static inline uint64_t
1078 rte_combine64ms1b(uint64_t v)
1079 {
1080 	v |= v >> 1;
1081 	v |= v >> 2;
1082 	v |= v >> 4;
1083 	v |= v >> 8;
1084 	v |= v >> 16;
1085 	v |= v >> 32;
1086 
1087 	return v;
1088 }
1089 
1090 /**
1091  * Searches the input parameter for the least significant set bit
1092  * (starting from zero).
1093  * If a least significant 1 bit is found, its bit index is returned.
1094  * If the content of the input parameter is zero, then the content of the return
1095  * value is undefined.
1096  * @param v
1097  *     input parameter, should not be zero.
1098  * @return
1099  *     least significant set bit in the input parameter.
1100  */
1101 static inline uint32_t
1102 rte_bsf32(uint32_t v)
1103 {
1104 	return (uint32_t)rte_ctz32(v);
1105 }
1106 
1107 /**
1108  * Searches the input parameter for the least significant set bit
1109  * (starting from zero). Safe version (checks for input parameter being zero).
1110  *
1111  * @warning ``pos`` must be a valid pointer. It is not checked!
1112  *
1113  * @param v
1114  *     The input parameter.
1115  * @param pos
1116  *     If ``v`` was not 0, this value will contain position of least significant
1117  *     bit within the input parameter.
1118  * @return
1119  *     Returns 0 if ``v`` was 0, otherwise returns 1.
1120  */
1121 static inline int
1122 rte_bsf32_safe(uint32_t v, uint32_t *pos)
1123 {
1124 	if (v == 0)
1125 		return 0;
1126 
1127 	*pos = rte_bsf32(v);
1128 	return 1;
1129 }
1130 
1131 /**
1132  * Searches the input parameter for the least significant set bit
1133  * (starting from zero).
1134  * If a least significant 1 bit is found, its bit index is returned.
1135  * If the content of the input parameter is zero, then the content of the return
1136  * value is undefined.
1137  * @param v
1138  *     input parameter, should not be zero.
1139  * @return
1140  *     least significant set bit in the input parameter.
1141  */
1142 static inline uint32_t
1143 rte_bsf64(uint64_t v)
1144 {
1145 	return (uint32_t)rte_ctz64(v);
1146 }
1147 
1148 /**
1149  * Searches the input parameter for the least significant set bit
1150  * (starting from zero). Safe version (checks for input parameter being zero).
1151  *
1152  * @warning ``pos`` must be a valid pointer. It is not checked!
1153  *
1154  * @param v
1155  *     The input parameter.
1156  * @param pos
1157  *     If ``v`` was not 0, this value will contain position of least significant
1158  *     bit within the input parameter.
1159  * @return
1160  *     Returns 0 if ``v`` was 0, otherwise returns 1.
1161  */
1162 static inline int
1163 rte_bsf64_safe(uint64_t v, uint32_t *pos)
1164 {
1165 	if (v == 0)
1166 		return 0;
1167 
1168 	*pos = rte_bsf64(v);
1169 	return 1;
1170 }
1171 
1172 /**
1173  * Return the last (most-significant) bit set.
1174  *
1175  * @note The last (most significant) bit is at position 32.
1176  * @note rte_fls_u32(0) = 0, rte_fls_u32(1) = 1, rte_fls_u32(0x80000000) = 32
1177  *
1178  * @param x
1179  *     The input parameter.
1180  * @return
1181  *     The last (most-significant) bit set, or 0 if the input is 0.
1182  */
1183 static inline uint32_t
1184 rte_fls_u32(uint32_t x)
1185 {
1186 	return (x == 0) ? 0 : 32 - rte_clz32(x);
1187 }
1188 
1189 /**
1190  * Return the last (most-significant) bit set.
1191  *
1192  * @note The last (most significant) bit is at position 64.
1193  * @note rte_fls_u64(0) = 0, rte_fls_u64(1) = 1,
1194  *       rte_fls_u64(0x8000000000000000) = 64
1195  *
1196  * @param x
1197  *     The input parameter.
1198  * @return
1199  *     The last (most-significant) bit set, or 0 if the input is 0.
1200  */
1201 static inline uint32_t
1202 rte_fls_u64(uint64_t x)
1203 {
1204 	return (x == 0) ? 0 : 64 - rte_clz64(x);
1205 }
1206 
1207 /*********** Macros to work with powers of 2 ********/
1208 
1209 /**
1210  * Macro to return 1 if n is a power of 2, 0 otherwise
1211  */
1212 #define RTE_IS_POWER_OF_2(n) ((n) && !(((n) - 1) & (n)))
1213 
1214 /**
1215  * Returns true if n is a power of 2
1216  * @param n
1217  *     Number to check
1218  * @return 1 if true, 0 otherwise
1219  */
1220 static inline int
1221 rte_is_power_of_2(uint32_t n)
1222 {
1223 	return n && !(n & (n - 1));
1224 }
1225 
1226 /**
1227  * Aligns input parameter to the next power of 2
1228  *
1229  * @param x
1230  *   The integer value to align
1231  *
1232  * @return
1233  *   Input parameter aligned to the next power of 2
1234  */
1235 static inline uint32_t
1236 rte_align32pow2(uint32_t x)
1237 {
1238 	x--;
1239 	x = rte_combine32ms1b(x);
1240 
1241 	return x + 1;
1242 }
1243 
1244 /**
1245  * Aligns input parameter to the previous power of 2
1246  *
1247  * @param x
1248  *   The integer value to align
1249  *
1250  * @return
1251  *   Input parameter aligned to the previous power of 2
1252  */
1253 static inline uint32_t
1254 rte_align32prevpow2(uint32_t x)
1255 {
1256 	x = rte_combine32ms1b(x);
1257 
1258 	return x - (x >> 1);
1259 }
1260 
1261 /**
1262  * Aligns 64b input parameter to the next power of 2
1263  *
1264  * @param v
1265  *   The 64b value to align
1266  *
1267  * @return
1268  *   Input parameter aligned to the next power of 2
1269  */
1270 static inline uint64_t
1271 rte_align64pow2(uint64_t v)
1272 {
1273 	v--;
1274 	v = rte_combine64ms1b(v);
1275 
1276 	return v + 1;
1277 }
1278 
1279 /**
1280  * Aligns 64b input parameter to the previous power of 2
1281  *
1282  * @param v
1283  *   The 64b value to align
1284  *
1285  * @return
1286  *   Input parameter aligned to the previous power of 2
1287  */
1288 static inline uint64_t
1289 rte_align64prevpow2(uint64_t v)
1290 {
1291 	v = rte_combine64ms1b(v);
1292 
1293 	return v - (v >> 1);
1294 }
1295 
1296 /**
1297  * Return the rounded-up log2 of a integer.
1298  *
1299  * @note Contrary to the logarithm mathematical operation,
1300  * rte_log2_u32(0) == 0 and not -inf.
1301  *
1302  * @param v
1303  *     The input parameter.
1304  * @return
1305  *     The rounded-up log2 of the input, or 0 if the input is 0.
1306  */
1307 static inline uint32_t
1308 rte_log2_u32(uint32_t v)
1309 {
1310 	if (v == 0)
1311 		return 0;
1312 	v = rte_align32pow2(v);
1313 	return rte_bsf32(v);
1314 }
1315 
1316 /**
1317  * Return the rounded-up log2 of a 64-bit integer.
1318  *
1319  * @note Contrary to the logarithm mathematical operation,
1320  * rte_log2_u64(0) == 0 and not -inf.
1321  *
1322  * @param v
1323  *     The input parameter.
1324  * @return
1325  *     The rounded-up log2 of the input, or 0 if the input is 0.
1326  */
1327 static inline uint32_t
1328 rte_log2_u64(uint64_t v)
1329 {
1330 	if (v == 0)
1331 		return 0;
1332 	v = rte_align64pow2(v);
1333 	/* we checked for v being 0 already, so no undefined behavior */
1334 	return rte_bsf64(v);
1335 }
1336 
1337 #ifdef __cplusplus
1338 }
1339 
1340 /*
1341  * Since C++ doesn't support generic selection (i.e., _Generic),
1342  * function overloading is used instead. Such functions must be
1343  * defined outside 'extern "C"' to be accepted by the compiler.
1344  */
1345 
1346 #undef rte_bit_test
1347 #undef rte_bit_set
1348 #undef rte_bit_clear
1349 #undef rte_bit_assign
1350 #undef rte_bit_flip
1351 
1352 #undef rte_bit_atomic_test
1353 #undef rte_bit_atomic_set
1354 #undef rte_bit_atomic_clear
1355 #undef rte_bit_atomic_assign
1356 #undef rte_bit_atomic_flip
1357 #undef rte_bit_atomic_test_and_set
1358 #undef rte_bit_atomic_test_and_clear
1359 #undef rte_bit_atomic_test_and_assign
1360 
1361 #define __RTE_BIT_OVERLOAD_V_2(family, v, fun, qualifier, size, arg1_type, arg1_name) \
1362 static inline void \
1363 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name) \
1364 { \
1365 	__rte_bit_ ## family ## v ## fun ## size(addr, arg1_name); \
1366 }
1367 
1368 #define __RTE_BIT_OVERLOAD_SZ_2(family, fun, qualifier, size, arg1_type, arg1_name) \
1369 	__RTE_BIT_OVERLOAD_V_2(family,, fun, qualifier, size, arg1_type, arg1_name) \
1370 	__RTE_BIT_OVERLOAD_V_2(family, v_, fun, qualifier volatile, size, arg1_type, arg1_name)
1371 
1372 #define __RTE_BIT_OVERLOAD_2(family, fun, qualifier, arg1_type, arg1_name) \
1373 	__RTE_BIT_OVERLOAD_SZ_2(family, fun, qualifier, 32, arg1_type, arg1_name) \
1374 	__RTE_BIT_OVERLOAD_SZ_2(family, fun, qualifier, 64, arg1_type, arg1_name)
1375 
1376 #define __RTE_BIT_OVERLOAD_V_2R(family, v, fun, qualifier, size, ret_type, arg1_type, arg1_name) \
1377 static inline ret_type \
1378 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name) \
1379 { \
1380 	return __rte_bit_ ## family ## v ## fun ## size(addr, arg1_name); \
1381 }
1382 
1383 #define __RTE_BIT_OVERLOAD_SZ_2R(family, fun, qualifier, size, ret_type, arg1_type, arg1_name) \
1384 	__RTE_BIT_OVERLOAD_V_2R(family,, fun, qualifier, size, ret_type, arg1_type, arg1_name) \
1385 	__RTE_BIT_OVERLOAD_V_2R(family, v_, fun, qualifier volatile, size, ret_type, arg1_type, \
1386 		arg1_name)
1387 
1388 #define __RTE_BIT_OVERLOAD_2R(family, fun, qualifier, ret_type, arg1_type, arg1_name) \
1389 	__RTE_BIT_OVERLOAD_SZ_2R(family, fun, qualifier, 32, ret_type, arg1_type, arg1_name) \
1390 	__RTE_BIT_OVERLOAD_SZ_2R(family, fun, qualifier, 64, ret_type, arg1_type, arg1_name)
1391 
1392 #define __RTE_BIT_OVERLOAD_V_3(family, v, fun, qualifier, size, arg1_type, arg1_name, \
1393 		arg2_type, arg2_name) \
1394 static inline void \
1395 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name, \
1396 		arg2_type arg2_name) \
1397 { \
1398 	__rte_bit_ ## family ## v ## fun ## size(addr, arg1_name, arg2_name); \
1399 }
1400 
1401 #define __RTE_BIT_OVERLOAD_SZ_3(family, fun, qualifier, size, arg1_type, arg1_name, \
1402 		arg2_type, arg2_name) \
1403 	__RTE_BIT_OVERLOAD_V_3(family,, fun, qualifier, size, arg1_type, arg1_name, \
1404 		arg2_type, arg2_name) \
1405 	__RTE_BIT_OVERLOAD_V_3(family, v_, fun, qualifier volatile, size, arg1_type, arg1_name, \
1406 		arg2_type, arg2_name)
1407 
1408 #define __RTE_BIT_OVERLOAD_3(family, fun, qualifier, arg1_type, arg1_name, arg2_type, arg2_name) \
1409 	__RTE_BIT_OVERLOAD_SZ_3(family, fun, qualifier, 32, arg1_type, arg1_name, \
1410 		arg2_type, arg2_name) \
1411 	__RTE_BIT_OVERLOAD_SZ_3(family, fun, qualifier, 64, arg1_type, arg1_name, \
1412 		arg2_type, arg2_name)
1413 
1414 #define __RTE_BIT_OVERLOAD_V_3R(family, v, fun, qualifier, size, ret_type, arg1_type, arg1_name, \
1415 		arg2_type, arg2_name) \
1416 static inline ret_type \
1417 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name, \
1418 		arg2_type arg2_name) \
1419 { \
1420 	return __rte_bit_ ## family ## v ## fun ## size(addr, arg1_name, arg2_name); \
1421 }
1422 
1423 #define __RTE_BIT_OVERLOAD_SZ_3R(family, fun, qualifier, size, ret_type, arg1_type, arg1_name, \
1424 		arg2_type, arg2_name) \
1425 	__RTE_BIT_OVERLOAD_V_3R(family,, fun, qualifier, size, ret_type, arg1_type, arg1_name, \
1426 		arg2_type, arg2_name) \
1427 	__RTE_BIT_OVERLOAD_V_3R(family, v_, fun, qualifier volatile, size, ret_type, \
1428 		arg1_type, arg1_name, arg2_type, arg2_name)
1429 
1430 #define __RTE_BIT_OVERLOAD_3R(family, fun, qualifier, ret_type, arg1_type, arg1_name, \
1431 		arg2_type, arg2_name) \
1432 	__RTE_BIT_OVERLOAD_SZ_3R(family, fun, qualifier, 32, ret_type, arg1_type, arg1_name, \
1433 		arg2_type, arg2_name) \
1434 	__RTE_BIT_OVERLOAD_SZ_3R(family, fun, qualifier, 64, ret_type, arg1_type, arg1_name, \
1435 		arg2_type, arg2_name)
1436 
1437 #define __RTE_BIT_OVERLOAD_V_4(family, v, fun, qualifier, size, arg1_type, arg1_name, \
1438 		arg2_type, arg2_name, arg3_type, arg3_name) \
1439 static inline void \
1440 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name, \
1441 		arg2_type arg2_name, arg3_type arg3_name) \
1442 { \
1443 	__rte_bit_ ## family ## v ## fun ## size(addr, arg1_name, arg2_name, arg3_name); \
1444 }
1445 
1446 #define __RTE_BIT_OVERLOAD_SZ_4(family, fun, qualifier, size, arg1_type, arg1_name, \
1447 		arg2_type, arg2_name, arg3_type, arg3_name) \
1448 	__RTE_BIT_OVERLOAD_V_4(family,, fun, qualifier, size, arg1_type, arg1_name, \
1449 		arg2_type, arg2_name, arg3_type, arg3_name) \
1450 	__RTE_BIT_OVERLOAD_V_4(family, v_, fun, qualifier volatile, size, arg1_type, arg1_name, \
1451 		arg2_type, arg2_name, arg3_type, arg3_name)
1452 
1453 #define __RTE_BIT_OVERLOAD_4(family, fun, qualifier, arg1_type, arg1_name, arg2_type, arg2_name, \
1454 		arg3_type, arg3_name) \
1455 	__RTE_BIT_OVERLOAD_SZ_4(family, fun, qualifier, 32, arg1_type, arg1_name, \
1456 		arg2_type, arg2_name, arg3_type, arg3_name) \
1457 	__RTE_BIT_OVERLOAD_SZ_4(family, fun, qualifier, 64, arg1_type, arg1_name, \
1458 		arg2_type, arg2_name, arg3_type, arg3_name)
1459 
1460 #define __RTE_BIT_OVERLOAD_V_4R(family, v, fun, qualifier, size, ret_type, arg1_type, arg1_name, \
1461 		arg2_type, arg2_name, arg3_type, arg3_name) \
1462 static inline ret_type \
1463 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name, \
1464 		arg2_type arg2_name, arg3_type arg3_name) \
1465 { \
1466 	return __rte_bit_ ## family ## v ## fun ## size(addr, arg1_name, arg2_name, \
1467 		arg3_name); \
1468 }
1469 
1470 #define __RTE_BIT_OVERLOAD_SZ_4R(family, fun, qualifier, size, ret_type, arg1_type, arg1_name, \
1471 		arg2_type, arg2_name, arg3_type, arg3_name) \
1472 	__RTE_BIT_OVERLOAD_V_4R(family,, fun, qualifier, size, ret_type, arg1_type, arg1_name, \
1473 		arg2_type, arg2_name, arg3_type, arg3_name) \
1474 	__RTE_BIT_OVERLOAD_V_4R(family, v_, fun, qualifier volatile, size, ret_type, \
1475 		arg1_type, arg1_name, arg2_type, arg2_name, arg3_type, arg3_name)
1476 
1477 #define __RTE_BIT_OVERLOAD_4R(family, fun, qualifier, ret_type, arg1_type, arg1_name, \
1478 		arg2_type, arg2_name, arg3_type, arg3_name) \
1479 	__RTE_BIT_OVERLOAD_SZ_4R(family, fun, qualifier, 32, ret_type, arg1_type, arg1_name, \
1480 		arg2_type, arg2_name, arg3_type, arg3_name) \
1481 	__RTE_BIT_OVERLOAD_SZ_4R(family, fun, qualifier, 64, ret_type, arg1_type, arg1_name, \
1482 		arg2_type, arg2_name, arg3_type, arg3_name)
1483 
1484 __RTE_BIT_OVERLOAD_2R(, test, const, bool, unsigned int, nr)
1485 __RTE_BIT_OVERLOAD_2(, set,, unsigned int, nr)
1486 __RTE_BIT_OVERLOAD_2(, clear,, unsigned int, nr)
1487 __RTE_BIT_OVERLOAD_3(, assign,, unsigned int, nr, bool, value)
1488 __RTE_BIT_OVERLOAD_2(, flip,, unsigned int, nr)
1489 
1490 __RTE_BIT_OVERLOAD_3R(atomic_, test, const, bool, unsigned int, nr, int, memory_order)
1491 __RTE_BIT_OVERLOAD_3(atomic_, set,, unsigned int, nr, int, memory_order)
1492 __RTE_BIT_OVERLOAD_3(atomic_, clear,, unsigned int, nr, int, memory_order)
1493 __RTE_BIT_OVERLOAD_4(atomic_, assign,, unsigned int, nr, bool, value, int, memory_order)
1494 __RTE_BIT_OVERLOAD_3(atomic_, flip,, unsigned int, nr, int, memory_order)
1495 __RTE_BIT_OVERLOAD_3R(atomic_, test_and_set,, bool, unsigned int, nr, int, memory_order)
1496 __RTE_BIT_OVERLOAD_3R(atomic_, test_and_clear,, bool, unsigned int, nr, int, memory_order)
1497 __RTE_BIT_OVERLOAD_4R(atomic_, test_and_assign,, bool, unsigned int, nr, bool, value,
1498 	int, memory_order)
1499 
1500 #endif
1501 
1502 #endif /* _RTE_BITOPS_H_ */
1503