xref: /dpdk/lib/eal/include/rte_bitops.h (revision 21a66096bb44a4468353782c36fc85913520dc6c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Arm Limited
3  * Copyright(c) 2010-2019 Intel Corporation
4  * Copyright(c) 2023 Microsoft Corporation
5  * Copyright(c) 2024 Ericsson AB
6  */
7 
8 #ifndef _RTE_BITOPS_H_
9 #define _RTE_BITOPS_H_
10 
11 /**
12  * @file
13  * Bit Operations
14  *
15  * This file provides functionality for low-level, single-word
16  * arithmetic and bit-level operations, such as counting or
17  * setting individual bits.
18  */
19 
20 #include <stdint.h>
21 
22 #include <rte_compat.h>
23 #include <rte_debug.h>
24 #include <rte_stdatomic.h>
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
30 /**
31  * Get the uint64_t value for a specified bit set.
32  *
33  * @param nr
34  *   The bit number in range of 0 to 63.
35  */
36 #define RTE_BIT64(nr) (UINT64_C(1) << (nr))
37 
38 /**
39  * Get the uint32_t value for a specified bit set.
40  *
41  * @param nr
42  *   The bit number in range of 0 to 31.
43  */
44 #define RTE_BIT32(nr) (UINT32_C(1) << (nr))
45 
46 /**
47  * Get the uint32_t shifted value.
48  *
49  * @param val
50  *   The value to be shifted.
51  * @param nr
52  *   The shift number in range of 0 to (32 - width of val).
53  */
54 #define RTE_SHIFT_VAL32(val, nr) (UINT32_C(val) << (nr))
55 
56 /**
57  * Get the uint64_t shifted value.
58  *
59  * @param val
60  *   The value to be shifted.
61  * @param nr
62  *   The shift number in range of 0 to (64 - width of val).
63  */
64 #define RTE_SHIFT_VAL64(val, nr) (UINT64_C(val) << (nr))
65 
66 /**
67  * Generate a contiguous 32-bit mask
68  * starting at bit position low and ending at position high.
69  *
70  * @param high
71  *   High bit position.
72  * @param low
73  *   Low bit position.
74  */
75 #define RTE_GENMASK32(high, low) \
76 		(((~UINT32_C(0)) << (low)) & (~UINT32_C(0) >> (31u - (high))))
77 
78 /**
79  * Generate a contiguous 64-bit mask
80  * starting at bit position low and ending at position high.
81  *
82  * @param high
83  *   High bit position.
84  * @param low
85  *   Low bit position.
86  */
87 #define RTE_GENMASK64(high, low) \
88 		(((~UINT64_C(0)) << (low)) & (~UINT64_C(0) >> (63u - (high))))
89 
90 /**
91  * Extract a 32-bit field element.
92  *
93  * @param mask
94  *   Shifted mask.
95  * @param reg
96  *   Value of entire bitfield.
97  */
98 #define RTE_FIELD_GET32(mask, reg) \
99 		((typeof(mask))(((reg) & (mask)) >> rte_ctz32(mask)))
100 
101 /**
102  * Extract a 64-bit field element.
103  *
104  * @param mask
105  *   Shifted mask.
106  * @param reg
107  *   Value of entire bitfield.
108  */
109 #define RTE_FIELD_GET64(mask, reg) \
110 		((typeof(mask))(((reg) & (mask)) >> rte_ctz64(mask)))
111 
112 /**
113  * @warning
114  * @b EXPERIMENTAL: this API may change without prior notice.
115  *
116  * Test bit in word.
117  *
118  * Generic selection macro to test the value of a bit in a 32-bit or
119  * 64-bit word. The type of operation depends on the type of the @c
120  * addr parameter.
121  *
122  * This macro does not give any guarantees in regards to memory
123  * ordering or atomicity.
124  *
125  * @param addr
126  *   A pointer to the word to modify.
127  * @param nr
128  *   The index of the bit.
129  */
130 #define rte_bit_test(addr, nr) \
131 	_Generic((addr), \
132 		uint32_t *: __rte_bit_test32, \
133 		const uint32_t *: __rte_bit_test32, \
134 		volatile uint32_t *: __rte_bit_v_test32, \
135 		const volatile uint32_t *: __rte_bit_v_test32, \
136 		uint64_t *: __rte_bit_test64, \
137 		const uint64_t *: __rte_bit_test64, \
138 		volatile uint64_t *: __rte_bit_v_test64, \
139 		const volatile uint64_t *: __rte_bit_v_test64) \
140 			(addr, nr)
141 
142 /**
143  * @warning
144  * @b EXPERIMENTAL: this API may change without prior notice.
145  *
146  * Set bit in word.
147  *
148  * Generic selection macro to set a bit in a 32-bit or 64-bit
149  * word. The type of operation depends on the type of the @c addr
150  * parameter.
151  *
152  * This macro does not give any guarantees in regards to memory
153  * ordering or atomicity.
154  *
155  * @param addr
156  *   A pointer to the word to modify.
157  * @param nr
158  *   The index of the bit.
159  */
160 #define rte_bit_set(addr, nr) \
161 	_Generic((addr), \
162 		uint32_t *: __rte_bit_set32, \
163 		volatile uint32_t *: __rte_bit_v_set32, \
164 		uint64_t *: __rte_bit_set64, \
165 		volatile uint64_t *: __rte_bit_v_set64) \
166 			(addr, nr)
167 
168 /**
169  * @warning
170  * @b EXPERIMENTAL: this API may change without prior notice.
171  *
172  * Clear bit in word.
173  *
174  * Generic selection macro to clear a bit in a 32-bit or 64-bit
175  * word. The type of operation depends on the type of the @c addr
176  * parameter.
177  *
178  * This macro does not give any guarantees in regards to memory
179  * ordering or atomicity.
180  *
181  * @param addr
182  *   A pointer to the word to modify.
183  * @param nr
184  *   The index of the bit.
185  */
186 #define rte_bit_clear(addr, nr) \
187 	_Generic((addr), \
188 		uint32_t *: __rte_bit_clear32, \
189 		volatile uint32_t *: __rte_bit_v_clear32, \
190 		uint64_t *: __rte_bit_clear64, \
191 		volatile uint64_t *: __rte_bit_v_clear64) \
192 			(addr, nr)
193 
194 /**
195  * @warning
196  * @b EXPERIMENTAL: this API may change without prior notice.
197  *
198  * Assign a value to a bit in word.
199  *
200  * Generic selection macro to assign a value to a bit in a 32-bit or 64-bit
201  * word. The type of operation depends on the type of the @c addr parameter.
202  *
203  * This macro does not give any guarantees in regards to memory
204  * ordering or atomicity.
205  *
206  * @param addr
207  *   A pointer to the word to modify.
208  * @param nr
209  *   The index of the bit.
210  * @param value
211  *   The new value of the bit - true for '1', or false for '0'.
212  */
213 #define rte_bit_assign(addr, nr, value) \
214 	_Generic((addr), \
215 		uint32_t *: __rte_bit_assign32, \
216 		volatile uint32_t *: __rte_bit_v_assign32, \
217 		uint64_t *: __rte_bit_assign64, \
218 		volatile uint64_t *: __rte_bit_v_assign64) \
219 			(addr, nr, value)
220 
221 /**
222  * @warning
223  * @b EXPERIMENTAL: this API may change without prior notice.
224  *
225  * Flip a bit in word.
226  *
227  * Generic selection macro to change the value of a bit to '0' if '1'
228  * or '1' if '0' in a 32-bit or 64-bit word. The type of operation
229  * depends on the type of the @c addr parameter.
230  *
231  * This macro does not give any guarantees in regards to memory
232  * ordering or atomicity.
233  *
234  * @param addr
235  *   A pointer to the word to modify.
236  * @param nr
237  *   The index of the bit.
238  */
239 #define rte_bit_flip(addr, nr) \
240 	_Generic((addr), \
241 		uint32_t *: __rte_bit_flip32, \
242 		volatile uint32_t *: __rte_bit_v_flip32, \
243 		uint64_t *: __rte_bit_flip64, \
244 		volatile uint64_t *: __rte_bit_v_flip64) \
245 			(addr, nr)
246 
247 /**
248  * @warning
249  * @b EXPERIMENTAL: this API may change without prior notice.
250  *
251  * Test if a particular bit in a word is set with a particular memory
252  * order.
253  *
254  * Test a bit with the resulting memory load ordered as per the
255  * specified memory order.
256  *
257  * @param addr
258  *   A pointer to the word to query.
259  * @param nr
260  *   The index of the bit.
261  * @param memory_order
262  *   The memory order to use.
263  * @return
264  *   Returns true if the bit is set, and false otherwise.
265  */
266 #define rte_bit_atomic_test(addr, nr, memory_order) \
267 	_Generic((addr), \
268 		uint32_t *: __rte_bit_atomic_test32, \
269 		const uint32_t *: __rte_bit_atomic_test32, \
270 		volatile uint32_t *: __rte_bit_atomic_v_test32, \
271 		const volatile uint32_t *: __rte_bit_atomic_v_test32, \
272 		uint64_t *: __rte_bit_atomic_test64, \
273 		const uint64_t *: __rte_bit_atomic_test64, \
274 		volatile uint64_t *: __rte_bit_atomic_v_test64, \
275 		const volatile uint64_t *: __rte_bit_atomic_v_test64) \
276 			(addr, nr, memory_order)
277 
278 /**
279  * @warning
280  * @b EXPERIMENTAL: this API may change without prior notice.
281  *
282  * Atomically set bit in word.
283  *
284  * Generic selection macro to atomically set bit specified by @c nr in
285  * the word pointed to by @c addr to '1', with the memory ordering as
286  * specified by @c memory_order.
287  *
288  * @param addr
289  *   A pointer to the word to modify.
290  * @param nr
291  *   The index of the bit.
292  * @param memory_order
293  *   The memory order to use.
294  */
295 #define rte_bit_atomic_set(addr, nr, memory_order) \
296 	_Generic((addr), \
297 		uint32_t *: __rte_bit_atomic_set32, \
298 		volatile uint32_t *: __rte_bit_atomic_v_set32, \
299 		uint64_t *: __rte_bit_atomic_set64, \
300 		volatile uint64_t *: __rte_bit_atomic_v_set64) \
301 			(addr, nr, memory_order)
302 
303 /**
304  * @warning
305  * @b EXPERIMENTAL: this API may change without prior notice.
306  *
307  * Atomically clear bit in word.
308  *
309  * Generic selection macro to atomically set bit specified by @c nr in
310  * the word pointed to by @c addr to '0', with the memory ordering as
311  * specified by @c memory_order.
312  *
313  * @param addr
314  *   A pointer to the word to modify.
315  * @param nr
316  *   The index of the bit.
317  * @param memory_order
318  *   The memory order to use.
319  */
320 #define rte_bit_atomic_clear(addr, nr, memory_order) \
321 	_Generic((addr), \
322 		uint32_t *: __rte_bit_atomic_clear32, \
323 		volatile uint32_t *: __rte_bit_atomic_v_clear32, \
324 		uint64_t *: __rte_bit_atomic_clear64, \
325 		volatile uint64_t *: __rte_bit_atomic_v_clear64) \
326 			(addr, nr, memory_order)
327 
328 /**
329  * @warning
330  * @b EXPERIMENTAL: this API may change without prior notice.
331  *
332  * Atomically assign a value to bit in word.
333  *
334  * Generic selection macro to atomically set bit specified by @c nr in the
335  * word pointed to by @c addr to the value indicated by @c value, with
336  * the memory ordering as specified with @c memory_order.
337  *
338  * @param addr
339  *   A pointer to the word to modify.
340  * @param nr
341  *   The index of the bit.
342  * @param value
343  *   The new value of the bit - true for '1', or false for '0'.
344  * @param memory_order
345  *   The memory order to use.
346  */
347 #define rte_bit_atomic_assign(addr, nr, value, memory_order) \
348 	_Generic((addr), \
349 		uint32_t *: __rte_bit_atomic_assign32, \
350 		volatile uint32_t *: __rte_bit_atomic_v_assign32, \
351 		uint64_t *: __rte_bit_atomic_assign64, \
352 		volatile uint64_t *: __rte_bit_atomic_v_assign64) \
353 			(addr, nr, value, memory_order)
354 
355 /**
356  * @warning
357  * @b EXPERIMENTAL: this API may change without prior notice.
358  *
359  * Atomically flip bit in word.
360  *
361  * Generic selection macro to atomically negate the value of the bit
362  * specified by @c nr in the word pointed to by @c addr to the value
363  * indicated by @c value, with the memory ordering as specified with
364  * @c memory_order.
365  *
366  * @param addr
367  *   A pointer to the word to modify.
368  * @param nr
369  *   The index of the bit.
370  * @param memory_order
371  *   The memory order to use.
372  */
373 #define rte_bit_atomic_flip(addr, nr, memory_order) \
374 	_Generic((addr), \
375 		uint32_t *: __rte_bit_atomic_flip32, \
376 		volatile uint32_t *: __rte_bit_atomic_v_flip32, \
377 		uint64_t *: __rte_bit_atomic_flip64, \
378 		volatile uint64_t *: __rte_bit_atomic_v_flip64) \
379 			(addr, nr, memory_order)
380 
381 /**
382  * @warning
383  * @b EXPERIMENTAL: this API may change without prior notice.
384  *
385  * Atomically test and set a bit in word.
386  *
387  * Generic selection macro to atomically test and set bit specified by
388  * @c nr in the word pointed to by @c addr to '1', with the memory
389  * ordering as specified with @c memory_order.
390  *
391  * @param addr
392  *   A pointer to the word to modify.
393  * @param nr
394  *   The index of the bit.
395  * @param memory_order
396  *   The memory order to use.
397  * @return
398  *   Returns true if the bit was set, and false otherwise.
399  */
400 #define rte_bit_atomic_test_and_set(addr, nr, memory_order) \
401 	_Generic((addr), \
402 		uint32_t *: __rte_bit_atomic_test_and_set32, \
403 		volatile uint32_t *: __rte_bit_atomic_v_test_and_set32, \
404 		uint64_t *: __rte_bit_atomic_test_and_set64, \
405 		volatile uint64_t *: __rte_bit_atomic_v_test_and_set64) \
406 			(addr, nr, memory_order)
407 
408 /**
409  * @warning
410  * @b EXPERIMENTAL: this API may change without prior notice.
411  *
412  * Atomically test and clear a bit in word.
413  *
414  * Generic selection macro to atomically test and clear bit specified
415  * by @c nr in the word pointed to by @c addr to '0', with the memory
416  * ordering as specified with @c memory_order.
417  *
418  * @param addr
419  *   A pointer to the word to modify.
420  * @param nr
421  *   The index of the bit.
422  * @param memory_order
423  *   The memory order to use.
424  * @return
425  *   Returns true if the bit was set, and false otherwise.
426  */
427 #define rte_bit_atomic_test_and_clear(addr, nr, memory_order) \
428 	_Generic((addr), \
429 		uint32_t *: __rte_bit_atomic_test_and_clear32, \
430 		volatile uint32_t *: __rte_bit_atomic_v_test_and_clear32, \
431 		uint64_t *: __rte_bit_atomic_test_and_clear64, \
432 		volatile uint64_t *: __rte_bit_atomic_v_test_and_clear64) \
433 			(addr, nr, memory_order)
434 
435 /**
436  * @warning
437  * @b EXPERIMENTAL: this API may change without prior notice.
438  *
439  * Atomically test and assign a bit in word.
440  *
441  * Generic selection macro to atomically test and assign bit specified
442  * by @c nr in the word pointed to by @c addr the value specified by
443  * @c value, with the memory ordering as specified with @c
444  * memory_order.
445  *
446  * @param addr
447  *   A pointer to the word to modify.
448  * @param nr
449  *   The index of the bit.
450  * @param value
451  *   The new value of the bit - true for '1', or false for '0'.
452  * @param memory_order
453  *   The memory order to use.
454  * @return
455  *   Returns true if the bit was set, and false otherwise.
456  */
457 #define rte_bit_atomic_test_and_assign(addr, nr, value, memory_order) \
458 	_Generic((addr), \
459 		uint32_t *: __rte_bit_atomic_test_and_assign32, \
460 		volatile uint32_t *: __rte_bit_atomic_v_test_and_assign32, \
461 		uint64_t *: __rte_bit_atomic_test_and_assign64, \
462 		volatile uint64_t *: __rte_bit_atomic_v_test_and_assign64) \
463 			(addr, nr, value, memory_order)
464 
465 #define __RTE_GEN_BIT_TEST(variant, qualifier, size) \
466 __rte_experimental \
467 static inline bool \
468 __rte_bit_ ## variant ## test ## size(const qualifier uint ## size ## _t *addr, unsigned int nr) \
469 { \
470 	RTE_ASSERT(nr < size); \
471 	uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
472 	return *addr & mask; \
473 }
474 
475 #define __RTE_GEN_BIT_SET(variant, qualifier, size) \
476 __rte_experimental \
477 static inline void \
478 __rte_bit_ ## variant ## set ## size(qualifier uint ## size ## _t *addr, unsigned int nr) \
479 { \
480 	RTE_ASSERT(nr < size); \
481 	uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
482 	*addr |= mask; \
483 }
484 
485 #define __RTE_GEN_BIT_CLEAR(variant, qualifier, size) \
486 __rte_experimental \
487 static inline void \
488 __rte_bit_ ## variant ## clear ## size(qualifier uint ## size ## _t *addr, unsigned int nr) \
489 { \
490 	RTE_ASSERT(nr < size); \
491 	uint ## size ## _t mask = ~((uint ## size ## _t)1 << nr); \
492 	(*addr) &= mask; \
493 }
494 
495 #define __RTE_GEN_BIT_ASSIGN(variant, qualifier, size) \
496 __rte_experimental \
497 static inline void \
498 __rte_bit_ ## variant ## assign ## size(qualifier uint ## size ## _t *addr, unsigned int nr, \
499 		bool value) \
500 { \
501 	if (value) \
502 		__rte_bit_ ## variant ## set ## size(addr, nr); \
503 	else \
504 		__rte_bit_ ## variant ## clear ## size(addr, nr); \
505 }
506 
507 #define __RTE_GEN_BIT_FLIP(variant, qualifier, size) \
508 __rte_experimental \
509 static inline void \
510 __rte_bit_ ## variant ## flip ## size(qualifier uint ## size ## _t *addr, unsigned int nr) \
511 { \
512 	bool value; \
513 	value = __rte_bit_ ## variant ## test ## size(addr, nr); \
514 	__rte_bit_ ## variant ## assign ## size(addr, nr, !value); \
515 }
516 
517 #define __RTE_GEN_BIT_OPS(v, qualifier, size) \
518 	__RTE_GEN_BIT_TEST(v, qualifier, size) \
519 	__RTE_GEN_BIT_SET(v, qualifier, size) \
520 	__RTE_GEN_BIT_CLEAR(v, qualifier, size) \
521 	__RTE_GEN_BIT_ASSIGN(v, qualifier, size) \
522 	__RTE_GEN_BIT_FLIP(v, qualifier, size)
523 
524 #define __RTE_GEN_BIT_OPS_SIZE(size) \
525 	__RTE_GEN_BIT_OPS(,, size) \
526 	__RTE_GEN_BIT_OPS(v_, volatile, size)
527 
528 #ifdef ALLOW_EXPERIMENTAL_API
529 __RTE_GEN_BIT_OPS_SIZE(32)
530 __RTE_GEN_BIT_OPS_SIZE(64)
531 #endif
532 
533 #define __RTE_GEN_BIT_ATOMIC_TEST(variant, qualifier, size) \
534 __rte_experimental \
535 static inline bool \
536 __rte_bit_atomic_ ## variant ## test ## size(const qualifier uint ## size ## _t *addr, \
537 		unsigned int nr, int memory_order) \
538 { \
539 	RTE_ASSERT(nr < size); \
540 	const qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
541 		(const qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
542 	uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
543 	return rte_atomic_load_explicit(a_addr, memory_order) & mask; \
544 }
545 
546 #define __RTE_GEN_BIT_ATOMIC_SET(variant, qualifier, size) \
547 __rte_experimental \
548 static inline void \
549 __rte_bit_atomic_ ## variant ## set ## size(qualifier uint ## size ## _t *addr, \
550 		unsigned int nr, int memory_order) \
551 { \
552 	RTE_ASSERT(nr < size); \
553 	qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
554 		(qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
555 	uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
556 	rte_atomic_fetch_or_explicit(a_addr, mask, memory_order); \
557 }
558 
559 #define __RTE_GEN_BIT_ATOMIC_CLEAR(variant, qualifier, size) \
560 __rte_experimental \
561 static inline void \
562 __rte_bit_atomic_ ## variant ## clear ## size(qualifier uint ## size ## _t *addr, \
563 		unsigned int nr, int memory_order) \
564 { \
565 	RTE_ASSERT(nr < size); \
566 	qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
567 		(qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
568 	uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
569 	rte_atomic_fetch_and_explicit(a_addr, ~mask, memory_order); \
570 }
571 
572 #define __RTE_GEN_BIT_ATOMIC_FLIP(variant, qualifier, size) \
573 __rte_experimental \
574 static inline void \
575 __rte_bit_atomic_ ## variant ## flip ## size(qualifier uint ## size ## _t *addr, \
576 		unsigned int nr, int memory_order) \
577 { \
578 	RTE_ASSERT(nr < size); \
579 	qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
580 		(qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
581 	uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
582 	rte_atomic_fetch_xor_explicit(a_addr, mask, memory_order); \
583 }
584 
585 #define __RTE_GEN_BIT_ATOMIC_ASSIGN(variant, qualifier, size) \
586 __rte_experimental \
587 static inline void \
588 __rte_bit_atomic_## variant ## assign ## size(qualifier uint ## size ## _t *addr, \
589 		unsigned int nr, bool value, int memory_order) \
590 { \
591 	if (value) \
592 		__rte_bit_atomic_ ## variant ## set ## size(addr, nr, memory_order); \
593 	else \
594 		__rte_bit_atomic_ ## variant ## clear ## size(addr, nr, memory_order); \
595 }
596 
597 #define __RTE_GEN_BIT_ATOMIC_TEST_AND_SET(variant, qualifier, size) \
598 __rte_experimental \
599 static inline bool \
600 __rte_bit_atomic_ ## variant ## test_and_set ## size(qualifier uint ## size ## _t *addr, \
601 		unsigned int nr, int memory_order) \
602 { \
603 	RTE_ASSERT(nr < size); \
604 	qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
605 		(qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
606 	uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
607 	uint ## size ## _t prev; \
608 	prev = rte_atomic_fetch_or_explicit(a_addr, mask, memory_order); \
609 	return prev & mask; \
610 }
611 
612 #define __RTE_GEN_BIT_ATOMIC_TEST_AND_CLEAR(variant, qualifier, size) \
613 __rte_experimental \
614 static inline bool \
615 __rte_bit_atomic_ ## variant ## test_and_clear ## size(qualifier uint ## size ## _t *addr, \
616 		unsigned int nr, int memory_order) \
617 { \
618 	RTE_ASSERT(nr < size); \
619 	qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
620 		(qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
621 	uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
622 	uint ## size ## _t prev; \
623 	prev = rte_atomic_fetch_and_explicit(a_addr, ~mask, memory_order); \
624 	return prev & mask; \
625 }
626 
627 #define __RTE_GEN_BIT_ATOMIC_TEST_AND_ASSIGN(variant, qualifier, size) \
628 __rte_experimental \
629 static inline bool \
630 __rte_bit_atomic_ ## variant ## test_and_assign ## size( \
631 		qualifier uint ## size ## _t *addr, unsigned int nr, bool value, \
632 		int memory_order) \
633 { \
634 	if (value) \
635 		return __rte_bit_atomic_ ## variant ## test_and_set ## size(addr, nr, \
636 			memory_order); \
637 	else \
638 		return __rte_bit_atomic_ ## variant ## test_and_clear ## size(addr, nr, \
639 			memory_order); \
640 }
641 
642 #define __RTE_GEN_BIT_ATOMIC_OPS(variant, qualifier, size) \
643 	__RTE_GEN_BIT_ATOMIC_TEST(variant, qualifier, size) \
644 	__RTE_GEN_BIT_ATOMIC_SET(variant, qualifier, size) \
645 	__RTE_GEN_BIT_ATOMIC_CLEAR(variant, qualifier, size) \
646 	__RTE_GEN_BIT_ATOMIC_ASSIGN(variant, qualifier, size) \
647 	__RTE_GEN_BIT_ATOMIC_TEST_AND_SET(variant, qualifier, size) \
648 	__RTE_GEN_BIT_ATOMIC_TEST_AND_CLEAR(variant, qualifier, size) \
649 	__RTE_GEN_BIT_ATOMIC_TEST_AND_ASSIGN(variant, qualifier, size) \
650 	__RTE_GEN_BIT_ATOMIC_FLIP(variant, qualifier, size)
651 
652 #define __RTE_GEN_BIT_ATOMIC_OPS_SIZE(size) \
653 	__RTE_GEN_BIT_ATOMIC_OPS(,, size) \
654 	__RTE_GEN_BIT_ATOMIC_OPS(v_, volatile, size)
655 
656 #ifdef ALLOW_EXPERIMENTAL_API
657 __RTE_GEN_BIT_ATOMIC_OPS_SIZE(32)
658 __RTE_GEN_BIT_ATOMIC_OPS_SIZE(64)
659 #endif
660 
661 /*------------------------ 32-bit relaxed operations ------------------------*/
662 
663 /**
664  * Get the target bit from a 32-bit value without memory ordering.
665  *
666  * @param nr
667  *   The target bit to get.
668  * @param addr
669  *   The address holding the bit.
670  * @return
671  *   The target bit.
672  */
673 static inline uint32_t
674 rte_bit_relaxed_get32(unsigned int nr, volatile uint32_t *addr)
675 {
676 	RTE_ASSERT(nr < 32);
677 
678 	uint32_t mask = UINT32_C(1) << nr;
679 	return (*addr) & mask;
680 }
681 
682 /**
683  * Set the target bit in a 32-bit value to 1 without memory ordering.
684  *
685  * @param nr
686  *   The target bit to set.
687  * @param addr
688  *   The address holding the bit.
689  */
690 static inline void
691 rte_bit_relaxed_set32(unsigned int nr, volatile uint32_t *addr)
692 {
693 	RTE_ASSERT(nr < 32);
694 
695 	uint32_t mask = RTE_BIT32(nr);
696 	*addr = (*addr) | mask;
697 }
698 
699 /**
700  * Clear the target bit in a 32-bit value to 0 without memory ordering.
701  *
702  * @param nr
703  *   The target bit to clear.
704  * @param addr
705  *   The address holding the bit.
706  */
707 static inline void
708 rte_bit_relaxed_clear32(unsigned int nr, volatile uint32_t *addr)
709 {
710 	RTE_ASSERT(nr < 32);
711 
712 	uint32_t mask = RTE_BIT32(nr);
713 	*addr = (*addr) & (~mask);
714 }
715 
716 /**
717  * Return the original bit from a 32-bit value, then set it to 1 without
718  * memory ordering.
719  *
720  * @param nr
721  *   The target bit to get and set.
722  * @param addr
723  *   The address holding the bit.
724  * @return
725  *   The original bit.
726  */
727 static inline uint32_t
728 rte_bit_relaxed_test_and_set32(unsigned int nr, volatile uint32_t *addr)
729 {
730 	RTE_ASSERT(nr < 32);
731 
732 	uint32_t mask = RTE_BIT32(nr);
733 	uint32_t val = *addr;
734 	*addr = val | mask;
735 	return val & mask;
736 }
737 
738 /**
739  * Return the original bit from a 32-bit value, then clear it to 0 without
740  * memory ordering.
741  *
742  * @param nr
743  *   The target bit to get and clear.
744  * @param addr
745  *   The address holding the bit.
746  * @return
747  *   The original bit.
748  */
749 static inline uint32_t
750 rte_bit_relaxed_test_and_clear32(unsigned int nr, volatile uint32_t *addr)
751 {
752 	RTE_ASSERT(nr < 32);
753 
754 	uint32_t mask = RTE_BIT32(nr);
755 	uint32_t val = *addr;
756 	*addr = val & (~mask);
757 	return val & mask;
758 }
759 
760 /*------------------------ 64-bit relaxed operations ------------------------*/
761 
762 /**
763  * Get the target bit from a 64-bit value without memory ordering.
764  *
765  * @param nr
766  *   The target bit to get.
767  * @param addr
768  *   The address holding the bit.
769  * @return
770  *   The target bit.
771  */
772 static inline uint64_t
773 rte_bit_relaxed_get64(unsigned int nr, volatile uint64_t *addr)
774 {
775 	RTE_ASSERT(nr < 64);
776 
777 	uint64_t mask = RTE_BIT64(nr);
778 	return (*addr) & mask;
779 }
780 
781 /**
782  * Set the target bit in a 64-bit value to 1 without memory ordering.
783  *
784  * @param nr
785  *   The target bit to set.
786  * @param addr
787  *   The address holding the bit.
788  */
789 static inline void
790 rte_bit_relaxed_set64(unsigned int nr, volatile uint64_t *addr)
791 {
792 	RTE_ASSERT(nr < 64);
793 
794 	uint64_t mask = RTE_BIT64(nr);
795 	(*addr) = (*addr) | mask;
796 }
797 
798 /**
799  * Clear the target bit in a 64-bit value to 0 without memory ordering.
800  *
801  * @param nr
802  *   The target bit to clear.
803  * @param addr
804  *   The address holding the bit.
805  */
806 static inline void
807 rte_bit_relaxed_clear64(unsigned int nr, volatile uint64_t *addr)
808 {
809 	RTE_ASSERT(nr < 64);
810 
811 	uint64_t mask = RTE_BIT64(nr);
812 	*addr = (*addr) & (~mask);
813 }
814 
815 /**
816  * Return the original bit from a 64-bit value, then set it to 1 without
817  * memory ordering.
818  *
819  * @param nr
820  *   The target bit to get and set.
821  * @param addr
822  *   The address holding the bit.
823  * @return
824  *   The original bit.
825  */
826 static inline uint64_t
827 rte_bit_relaxed_test_and_set64(unsigned int nr, volatile uint64_t *addr)
828 {
829 	RTE_ASSERT(nr < 64);
830 
831 	uint64_t mask = RTE_BIT64(nr);
832 	uint64_t val = *addr;
833 	*addr = val | mask;
834 	return val;
835 }
836 
837 /**
838  * Return the original bit from a 64-bit value, then clear it to 0 without
839  * memory ordering.
840  *
841  * @param nr
842  *   The target bit to get and clear.
843  * @param addr
844  *   The address holding the bit.
845  * @return
846  *   The original bit.
847  */
848 static inline uint64_t
849 rte_bit_relaxed_test_and_clear64(unsigned int nr, volatile uint64_t *addr)
850 {
851 	RTE_ASSERT(nr < 64);
852 
853 	uint64_t mask = RTE_BIT64(nr);
854 	uint64_t val = *addr;
855 	*addr = val & (~mask);
856 	return val & mask;
857 }
858 
859 #ifdef RTE_TOOLCHAIN_MSVC
860 
861 /**
862  * Get the count of leading 0-bits in v.
863  *
864  * @param v
865  *   The value.
866  * @return
867  *   The count of leading zero bits.
868  */
869 static inline unsigned int
870 rte_clz32(uint32_t v)
871 {
872 	unsigned long rv;
873 
874 	(void)_BitScanReverse(&rv, v);
875 
876 	return (unsigned int)(sizeof(v) * CHAR_BIT - 1 - rv);
877 }
878 
879 /**
880  * Get the count of leading 0-bits in v.
881  *
882  * @param v
883  *   The value.
884  * @return
885  *   The count of leading zero bits.
886  */
887 static inline unsigned int
888 rte_clz64(uint64_t v)
889 {
890 	unsigned long rv;
891 
892 	(void)_BitScanReverse64(&rv, v);
893 
894 	return (unsigned int)(sizeof(v) * CHAR_BIT - 1 - rv);
895 }
896 
897 /**
898  * Get the count of trailing 0-bits in v.
899  *
900  * @param v
901  *   The value.
902  * @return
903  *   The count of trailing zero bits.
904  */
905 static inline unsigned int
906 rte_ctz32(uint32_t v)
907 {
908 	unsigned long rv;
909 
910 	(void)_BitScanForward(&rv, v);
911 
912 	return (unsigned int)rv;
913 }
914 
915 /**
916  * Get the count of trailing 0-bits in v.
917  *
918  * @param v
919  *   The value.
920  * @return
921  *   The count of trailing zero bits.
922  */
923 static inline unsigned int
924 rte_ctz64(uint64_t v)
925 {
926 	unsigned long rv;
927 
928 	(void)_BitScanForward64(&rv, v);
929 
930 	return (unsigned int)rv;
931 }
932 
933 /**
934  * Get the count of 1-bits in v.
935  *
936  * @param v
937  *   The value.
938  * @return
939  *   The count of 1-bits.
940  */
941 static inline unsigned int
942 rte_popcount32(uint32_t v)
943 {
944 	return (unsigned int)__popcnt(v);
945 }
946 
947 /**
948  * Get the count of 1-bits in v.
949  *
950  * @param v
951  *   The value.
952  * @return
953  *   The count of 1-bits.
954  */
955 static inline unsigned int
956 rte_popcount64(uint64_t v)
957 {
958 	return (unsigned int)__popcnt64(v);
959 }
960 
961 #else
962 
963 /**
964  * Get the count of leading 0-bits in v.
965  *
966  * @param v
967  *   The value.
968  * @return
969  *   The count of leading zero bits.
970  */
971 static inline unsigned int
972 rte_clz32(uint32_t v)
973 {
974 	return (unsigned int)__builtin_clz(v);
975 }
976 
977 /**
978  * Get the count of leading 0-bits in v.
979  *
980  * @param v
981  *   The value.
982  * @return
983  *   The count of leading zero bits.
984  */
985 static inline unsigned int
986 rte_clz64(uint64_t v)
987 {
988 	return (unsigned int)__builtin_clzll(v);
989 }
990 
991 /**
992  * Get the count of trailing 0-bits in v.
993  *
994  * @param v
995  *   The value.
996  * @return
997  *   The count of trailing zero bits.
998  */
999 static inline unsigned int
1000 rte_ctz32(uint32_t v)
1001 {
1002 	return (unsigned int)__builtin_ctz(v);
1003 }
1004 
1005 /**
1006  * Get the count of trailing 0-bits in v.
1007  *
1008  * @param v
1009  *   The value.
1010  * @return
1011  *   The count of trailing zero bits.
1012  */
1013 static inline unsigned int
1014 rte_ctz64(uint64_t v)
1015 {
1016 	return (unsigned int)__builtin_ctzll(v);
1017 }
1018 
1019 /**
1020  * Get the count of 1-bits in v.
1021  *
1022  * @param v
1023  *   The value.
1024  * @return
1025  *   The count of 1-bits.
1026  */
1027 static inline unsigned int
1028 rte_popcount32(uint32_t v)
1029 {
1030 	return (unsigned int)__builtin_popcount(v);
1031 }
1032 
1033 /**
1034  * Get the count of 1-bits in v.
1035  *
1036  * @param v
1037  *   The value.
1038  * @return
1039  *   The count of 1-bits.
1040  */
1041 static inline unsigned int
1042 rte_popcount64(uint64_t v)
1043 {
1044 	return (unsigned int)__builtin_popcountll(v);
1045 }
1046 
1047 #endif
1048 
1049 /**
1050  * Combines 32b inputs most significant set bits into the least
1051  * significant bits to construct a value with the same MSBs as x
1052  * but all 1's under it.
1053  *
1054  * @param x
1055  *    The integer whose MSBs need to be combined with its LSBs
1056  * @return
1057  *    The combined value.
1058  */
1059 static inline uint32_t
1060 rte_combine32ms1b(uint32_t x)
1061 {
1062 	x |= x >> 1;
1063 	x |= x >> 2;
1064 	x |= x >> 4;
1065 	x |= x >> 8;
1066 	x |= x >> 16;
1067 
1068 	return x;
1069 }
1070 
1071 /**
1072  * Combines 64b inputs most significant set bits into the least
1073  * significant bits to construct a value with the same MSBs as x
1074  * but all 1's under it.
1075  *
1076  * @param v
1077  *    The integer whose MSBs need to be combined with its LSBs
1078  * @return
1079  *    The combined value.
1080  */
1081 static inline uint64_t
1082 rte_combine64ms1b(uint64_t v)
1083 {
1084 	v |= v >> 1;
1085 	v |= v >> 2;
1086 	v |= v >> 4;
1087 	v |= v >> 8;
1088 	v |= v >> 16;
1089 	v |= v >> 32;
1090 
1091 	return v;
1092 }
1093 
1094 /**
1095  * Searches the input parameter for the least significant set bit
1096  * (starting from zero).
1097  * If a least significant 1 bit is found, its bit index is returned.
1098  * If the content of the input parameter is zero, then the content of the return
1099  * value is undefined.
1100  * @param v
1101  *     input parameter, should not be zero.
1102  * @return
1103  *     least significant set bit in the input parameter.
1104  */
1105 static inline uint32_t
1106 rte_bsf32(uint32_t v)
1107 {
1108 	return (uint32_t)rte_ctz32(v);
1109 }
1110 
1111 /**
1112  * Searches the input parameter for the least significant set bit
1113  * (starting from zero). Safe version (checks for input parameter being zero).
1114  *
1115  * @warning ``pos`` must be a valid pointer. It is not checked!
1116  *
1117  * @param v
1118  *     The input parameter.
1119  * @param pos
1120  *     If ``v`` was not 0, this value will contain position of least significant
1121  *     bit within the input parameter.
1122  * @return
1123  *     Returns 0 if ``v`` was 0, otherwise returns 1.
1124  */
1125 static inline int
1126 rte_bsf32_safe(uint32_t v, uint32_t *pos)
1127 {
1128 	if (v == 0)
1129 		return 0;
1130 
1131 	*pos = rte_bsf32(v);
1132 	return 1;
1133 }
1134 
1135 /**
1136  * Searches the input parameter for the least significant set bit
1137  * (starting from zero).
1138  * If a least significant 1 bit is found, its bit index is returned.
1139  * If the content of the input parameter is zero, then the content of the return
1140  * value is undefined.
1141  * @param v
1142  *     input parameter, should not be zero.
1143  * @return
1144  *     least significant set bit in the input parameter.
1145  */
1146 static inline uint32_t
1147 rte_bsf64(uint64_t v)
1148 {
1149 	return (uint32_t)rte_ctz64(v);
1150 }
1151 
1152 /**
1153  * Searches the input parameter for the least significant set bit
1154  * (starting from zero). Safe version (checks for input parameter being zero).
1155  *
1156  * @warning ``pos`` must be a valid pointer. It is not checked!
1157  *
1158  * @param v
1159  *     The input parameter.
1160  * @param pos
1161  *     If ``v`` was not 0, this value will contain position of least significant
1162  *     bit within the input parameter.
1163  * @return
1164  *     Returns 0 if ``v`` was 0, otherwise returns 1.
1165  */
1166 static inline int
1167 rte_bsf64_safe(uint64_t v, uint32_t *pos)
1168 {
1169 	if (v == 0)
1170 		return 0;
1171 
1172 	*pos = rte_bsf64(v);
1173 	return 1;
1174 }
1175 
1176 /**
1177  * Return the last (most-significant) bit set.
1178  *
1179  * @note The last (most significant) bit is at position 32.
1180  * @note rte_fls_u32(0) = 0, rte_fls_u32(1) = 1, rte_fls_u32(0x80000000) = 32
1181  *
1182  * @param x
1183  *     The input parameter.
1184  * @return
1185  *     The last (most-significant) bit set, or 0 if the input is 0.
1186  */
1187 static inline uint32_t
1188 rte_fls_u32(uint32_t x)
1189 {
1190 	return (x == 0) ? 0 : 32 - rte_clz32(x);
1191 }
1192 
1193 /**
1194  * Return the last (most-significant) bit set.
1195  *
1196  * @note The last (most significant) bit is at position 64.
1197  * @note rte_fls_u64(0) = 0, rte_fls_u64(1) = 1,
1198  *       rte_fls_u64(0x8000000000000000) = 64
1199  *
1200  * @param x
1201  *     The input parameter.
1202  * @return
1203  *     The last (most-significant) bit set, or 0 if the input is 0.
1204  */
1205 static inline uint32_t
1206 rte_fls_u64(uint64_t x)
1207 {
1208 	return (x == 0) ? 0 : 64 - rte_clz64(x);
1209 }
1210 
1211 /*********** Macros to work with powers of 2 ********/
1212 
1213 /**
1214  * Macro to return 1 if n is a power of 2, 0 otherwise
1215  */
1216 #define RTE_IS_POWER_OF_2(n) ((n) && !(((n) - 1) & (n)))
1217 
1218 /**
1219  * Returns true if n is a power of 2
1220  * @param n
1221  *     Number to check
1222  * @return 1 if true, 0 otherwise
1223  */
1224 static inline int
1225 rte_is_power_of_2(uint32_t n)
1226 {
1227 	return n && !(n & (n - 1));
1228 }
1229 
1230 /**
1231  * Aligns input parameter to the next power of 2
1232  *
1233  * @param x
1234  *   The integer value to align
1235  *
1236  * @return
1237  *   Input parameter aligned to the next power of 2
1238  */
1239 static inline uint32_t
1240 rte_align32pow2(uint32_t x)
1241 {
1242 	x--;
1243 	x = rte_combine32ms1b(x);
1244 
1245 	return x + 1;
1246 }
1247 
1248 /**
1249  * Aligns input parameter to the previous power of 2
1250  *
1251  * @param x
1252  *   The integer value to align
1253  *
1254  * @return
1255  *   Input parameter aligned to the previous power of 2
1256  */
1257 static inline uint32_t
1258 rte_align32prevpow2(uint32_t x)
1259 {
1260 	x = rte_combine32ms1b(x);
1261 
1262 	return x - (x >> 1);
1263 }
1264 
1265 /**
1266  * Aligns 64b input parameter to the next power of 2
1267  *
1268  * @param v
1269  *   The 64b value to align
1270  *
1271  * @return
1272  *   Input parameter aligned to the next power of 2
1273  */
1274 static inline uint64_t
1275 rte_align64pow2(uint64_t v)
1276 {
1277 	v--;
1278 	v = rte_combine64ms1b(v);
1279 
1280 	return v + 1;
1281 }
1282 
1283 /**
1284  * Aligns 64b input parameter to the previous power of 2
1285  *
1286  * @param v
1287  *   The 64b value to align
1288  *
1289  * @return
1290  *   Input parameter aligned to the previous power of 2
1291  */
1292 static inline uint64_t
1293 rte_align64prevpow2(uint64_t v)
1294 {
1295 	v = rte_combine64ms1b(v);
1296 
1297 	return v - (v >> 1);
1298 }
1299 
1300 /**
1301  * Return the rounded-up log2 of a integer.
1302  *
1303  * @note Contrary to the logarithm mathematical operation,
1304  * rte_log2_u32(0) == 0 and not -inf.
1305  *
1306  * @param v
1307  *     The input parameter.
1308  * @return
1309  *     The rounded-up log2 of the input, or 0 if the input is 0.
1310  */
1311 static inline uint32_t
1312 rte_log2_u32(uint32_t v)
1313 {
1314 	if (v == 0)
1315 		return 0;
1316 	v = rte_align32pow2(v);
1317 	return rte_bsf32(v);
1318 }
1319 
1320 /**
1321  * Return the rounded-up log2 of a 64-bit integer.
1322  *
1323  * @note Contrary to the logarithm mathematical operation,
1324  * rte_log2_u64(0) == 0 and not -inf.
1325  *
1326  * @param v
1327  *     The input parameter.
1328  * @return
1329  *     The rounded-up log2 of the input, or 0 if the input is 0.
1330  */
1331 static inline uint32_t
1332 rte_log2_u64(uint64_t v)
1333 {
1334 	if (v == 0)
1335 		return 0;
1336 	v = rte_align64pow2(v);
1337 	/* we checked for v being 0 already, so no undefined behavior */
1338 	return rte_bsf64(v);
1339 }
1340 
1341 #ifdef __cplusplus
1342 }
1343 
1344 /*
1345  * Since C++ doesn't support generic selection (i.e., _Generic),
1346  * function overloading is used instead. Such functions must be
1347  * defined outside 'extern "C"' to be accepted by the compiler.
1348  */
1349 
1350 #undef rte_bit_test
1351 #undef rte_bit_set
1352 #undef rte_bit_clear
1353 #undef rte_bit_assign
1354 #undef rte_bit_flip
1355 
1356 #undef rte_bit_atomic_test
1357 #undef rte_bit_atomic_set
1358 #undef rte_bit_atomic_clear
1359 #undef rte_bit_atomic_assign
1360 #undef rte_bit_atomic_flip
1361 #undef rte_bit_atomic_test_and_set
1362 #undef rte_bit_atomic_test_and_clear
1363 #undef rte_bit_atomic_test_and_assign
1364 
1365 #define __RTE_BIT_OVERLOAD_V_2(family, v, fun, qualifier, size, arg1_type, arg1_name) \
1366 static inline void \
1367 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name) \
1368 { \
1369 	__rte_bit_ ## family ## v ## fun ## size(addr, arg1_name); \
1370 }
1371 
1372 #define __RTE_BIT_OVERLOAD_SZ_2(family, fun, qualifier, size, arg1_type, arg1_name) \
1373 	__RTE_BIT_OVERLOAD_V_2(family,, fun, qualifier, size, arg1_type, arg1_name) \
1374 	__RTE_BIT_OVERLOAD_V_2(family, v_, fun, qualifier volatile, size, arg1_type, arg1_name)
1375 
1376 #define __RTE_BIT_OVERLOAD_2(family, fun, qualifier, arg1_type, arg1_name) \
1377 	__RTE_BIT_OVERLOAD_SZ_2(family, fun, qualifier, 32, arg1_type, arg1_name) \
1378 	__RTE_BIT_OVERLOAD_SZ_2(family, fun, qualifier, 64, arg1_type, arg1_name)
1379 
1380 #define __RTE_BIT_OVERLOAD_V_2R(family, v, fun, qualifier, size, ret_type, arg1_type, arg1_name) \
1381 static inline ret_type \
1382 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name) \
1383 { \
1384 	return __rte_bit_ ## family ## v ## fun ## size(addr, arg1_name); \
1385 }
1386 
1387 #define __RTE_BIT_OVERLOAD_SZ_2R(family, fun, qualifier, size, ret_type, arg1_type, arg1_name) \
1388 	__RTE_BIT_OVERLOAD_V_2R(family,, fun, qualifier, size, ret_type, arg1_type, arg1_name) \
1389 	__RTE_BIT_OVERLOAD_V_2R(family, v_, fun, qualifier volatile, size, ret_type, arg1_type, \
1390 		arg1_name)
1391 
1392 #define __RTE_BIT_OVERLOAD_2R(family, fun, qualifier, ret_type, arg1_type, arg1_name) \
1393 	__RTE_BIT_OVERLOAD_SZ_2R(family, fun, qualifier, 32, ret_type, arg1_type, arg1_name) \
1394 	__RTE_BIT_OVERLOAD_SZ_2R(family, fun, qualifier, 64, ret_type, arg1_type, arg1_name)
1395 
1396 #define __RTE_BIT_OVERLOAD_V_3(family, v, fun, qualifier, size, arg1_type, arg1_name, \
1397 		arg2_type, arg2_name) \
1398 static inline void \
1399 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name, \
1400 		arg2_type arg2_name) \
1401 { \
1402 	__rte_bit_ ## family ## v ## fun ## size(addr, arg1_name, arg2_name); \
1403 }
1404 
1405 #define __RTE_BIT_OVERLOAD_SZ_3(family, fun, qualifier, size, arg1_type, arg1_name, \
1406 		arg2_type, arg2_name) \
1407 	__RTE_BIT_OVERLOAD_V_3(family,, fun, qualifier, size, arg1_type, arg1_name, \
1408 		arg2_type, arg2_name) \
1409 	__RTE_BIT_OVERLOAD_V_3(family, v_, fun, qualifier volatile, size, arg1_type, arg1_name, \
1410 		arg2_type, arg2_name)
1411 
1412 #define __RTE_BIT_OVERLOAD_3(family, fun, qualifier, arg1_type, arg1_name, arg2_type, arg2_name) \
1413 	__RTE_BIT_OVERLOAD_SZ_3(family, fun, qualifier, 32, arg1_type, arg1_name, \
1414 		arg2_type, arg2_name) \
1415 	__RTE_BIT_OVERLOAD_SZ_3(family, fun, qualifier, 64, arg1_type, arg1_name, \
1416 		arg2_type, arg2_name)
1417 
1418 #define __RTE_BIT_OVERLOAD_V_3R(family, v, fun, qualifier, size, ret_type, arg1_type, arg1_name, \
1419 		arg2_type, arg2_name) \
1420 static inline ret_type \
1421 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name, \
1422 		arg2_type arg2_name) \
1423 { \
1424 	return __rte_bit_ ## family ## v ## fun ## size(addr, arg1_name, arg2_name); \
1425 }
1426 
1427 #define __RTE_BIT_OVERLOAD_SZ_3R(family, fun, qualifier, size, ret_type, arg1_type, arg1_name, \
1428 		arg2_type, arg2_name) \
1429 	__RTE_BIT_OVERLOAD_V_3R(family,, fun, qualifier, size, ret_type, arg1_type, arg1_name, \
1430 		arg2_type, arg2_name) \
1431 	__RTE_BIT_OVERLOAD_V_3R(family, v_, fun, qualifier volatile, size, ret_type, \
1432 		arg1_type, arg1_name, arg2_type, arg2_name)
1433 
1434 #define __RTE_BIT_OVERLOAD_3R(family, fun, qualifier, ret_type, arg1_type, arg1_name, \
1435 		arg2_type, arg2_name) \
1436 	__RTE_BIT_OVERLOAD_SZ_3R(family, fun, qualifier, 32, ret_type, arg1_type, arg1_name, \
1437 		arg2_type, arg2_name) \
1438 	__RTE_BIT_OVERLOAD_SZ_3R(family, fun, qualifier, 64, ret_type, arg1_type, arg1_name, \
1439 		arg2_type, arg2_name)
1440 
1441 #define __RTE_BIT_OVERLOAD_V_4(family, v, fun, qualifier, size, arg1_type, arg1_name, \
1442 		arg2_type, arg2_name, arg3_type, arg3_name) \
1443 static inline void \
1444 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name, \
1445 		arg2_type arg2_name, arg3_type arg3_name) \
1446 { \
1447 	__rte_bit_ ## family ## v ## fun ## size(addr, arg1_name, arg2_name, arg3_name); \
1448 }
1449 
1450 #define __RTE_BIT_OVERLOAD_SZ_4(family, fun, qualifier, size, arg1_type, arg1_name, \
1451 		arg2_type, arg2_name, arg3_type, arg3_name) \
1452 	__RTE_BIT_OVERLOAD_V_4(family,, fun, qualifier, size, arg1_type, arg1_name, \
1453 		arg2_type, arg2_name, arg3_type, arg3_name) \
1454 	__RTE_BIT_OVERLOAD_V_4(family, v_, fun, qualifier volatile, size, arg1_type, arg1_name, \
1455 		arg2_type, arg2_name, arg3_type, arg3_name)
1456 
1457 #define __RTE_BIT_OVERLOAD_4(family, fun, qualifier, arg1_type, arg1_name, arg2_type, arg2_name, \
1458 		arg3_type, arg3_name) \
1459 	__RTE_BIT_OVERLOAD_SZ_4(family, fun, qualifier, 32, arg1_type, arg1_name, \
1460 		arg2_type, arg2_name, arg3_type, arg3_name) \
1461 	__RTE_BIT_OVERLOAD_SZ_4(family, fun, qualifier, 64, arg1_type, arg1_name, \
1462 		arg2_type, arg2_name, arg3_type, arg3_name)
1463 
1464 #define __RTE_BIT_OVERLOAD_V_4R(family, v, fun, qualifier, size, ret_type, arg1_type, arg1_name, \
1465 		arg2_type, arg2_name, arg3_type, arg3_name) \
1466 static inline ret_type \
1467 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name, \
1468 		arg2_type arg2_name, arg3_type arg3_name) \
1469 { \
1470 	return __rte_bit_ ## family ## v ## fun ## size(addr, arg1_name, arg2_name, \
1471 		arg3_name); \
1472 }
1473 
1474 #define __RTE_BIT_OVERLOAD_SZ_4R(family, fun, qualifier, size, ret_type, arg1_type, arg1_name, \
1475 		arg2_type, arg2_name, arg3_type, arg3_name) \
1476 	__RTE_BIT_OVERLOAD_V_4R(family,, fun, qualifier, size, ret_type, arg1_type, arg1_name, \
1477 		arg2_type, arg2_name, arg3_type, arg3_name) \
1478 	__RTE_BIT_OVERLOAD_V_4R(family, v_, fun, qualifier volatile, size, ret_type, \
1479 		arg1_type, arg1_name, arg2_type, arg2_name, arg3_type, arg3_name)
1480 
1481 #define __RTE_BIT_OVERLOAD_4R(family, fun, qualifier, ret_type, arg1_type, arg1_name, \
1482 		arg2_type, arg2_name, arg3_type, arg3_name) \
1483 	__RTE_BIT_OVERLOAD_SZ_4R(family, fun, qualifier, 32, ret_type, arg1_type, arg1_name, \
1484 		arg2_type, arg2_name, arg3_type, arg3_name) \
1485 	__RTE_BIT_OVERLOAD_SZ_4R(family, fun, qualifier, 64, ret_type, arg1_type, arg1_name, \
1486 		arg2_type, arg2_name, arg3_type, arg3_name)
1487 
1488 #ifdef ALLOW_EXPERIMENTAL_API
1489 __RTE_BIT_OVERLOAD_2R(, test, const, bool, unsigned int, nr)
1490 __RTE_BIT_OVERLOAD_2(, set,, unsigned int, nr)
1491 __RTE_BIT_OVERLOAD_2(, clear,, unsigned int, nr)
1492 __RTE_BIT_OVERLOAD_3(, assign,, unsigned int, nr, bool, value)
1493 __RTE_BIT_OVERLOAD_2(, flip,, unsigned int, nr)
1494 
1495 __RTE_BIT_OVERLOAD_3R(atomic_, test, const, bool, unsigned int, nr, int, memory_order)
1496 __RTE_BIT_OVERLOAD_3(atomic_, set,, unsigned int, nr, int, memory_order)
1497 __RTE_BIT_OVERLOAD_3(atomic_, clear,, unsigned int, nr, int, memory_order)
1498 __RTE_BIT_OVERLOAD_4(atomic_, assign,, unsigned int, nr, bool, value, int, memory_order)
1499 __RTE_BIT_OVERLOAD_3(atomic_, flip,, unsigned int, nr, int, memory_order)
1500 __RTE_BIT_OVERLOAD_3R(atomic_, test_and_set,, bool, unsigned int, nr, int, memory_order)
1501 __RTE_BIT_OVERLOAD_3R(atomic_, test_and_clear,, bool, unsigned int, nr, int, memory_order)
1502 __RTE_BIT_OVERLOAD_4R(atomic_, test_and_assign,, bool, unsigned int, nr, bool, value,
1503 	int, memory_order)
1504 #endif
1505 
1506 #endif
1507 
1508 #endif /* _RTE_BITOPS_H_ */
1509