xref: /dpdk/lib/eal/x86/rte_power_intrinsics.c (revision 18b5049ab4fecda6ad303606cc265d923b56da14)
199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
299a2dd95SBruce Richardson  * Copyright(c) 2020 Intel Corporation
399a2dd95SBruce Richardson  */
499a2dd95SBruce Richardson 
5e9fd1ebfSTyler Retzlaff #include <stdalign.h>
6e9fd1ebfSTyler Retzlaff 
799a2dd95SBruce Richardson #include <rte_common.h>
899a2dd95SBruce Richardson #include <rte_lcore.h>
9*18b5049aSMattias Rönnblom #include <rte_lcore_var.h>
1066834f29SAnatoly Burakov #include <rte_rtm.h>
1199a2dd95SBruce Richardson #include <rte_spinlock.h>
1299a2dd95SBruce Richardson 
1399a2dd95SBruce Richardson #include "rte_power_intrinsics.h"
1499a2dd95SBruce Richardson 
1599a2dd95SBruce Richardson /*
1699a2dd95SBruce Richardson  * Per-lcore structure holding current status of C0.2 sleeps.
1799a2dd95SBruce Richardson  */
18*18b5049aSMattias Rönnblom struct power_wait_status {
1999a2dd95SBruce Richardson 	rte_spinlock_t lock;
2099a2dd95SBruce Richardson 	volatile void *monitor_addr; /**< NULL if not currently sleeping */
21*18b5049aSMattias Rönnblom };
22*18b5049aSMattias Rönnblom 
23*18b5049aSMattias Rönnblom RTE_LCORE_VAR_HANDLE(struct power_wait_status, wait_status);
24*18b5049aSMattias Rönnblom 
25*18b5049aSMattias Rönnblom RTE_LCORE_VAR_INIT(wait_status);
2699a2dd95SBruce Richardson 
27c7ed1ce0SSivaprasad Tummala /*
28c7ed1ce0SSivaprasad Tummala  * This function uses UMONITOR/UMWAIT instructions and will enter C0.2 state.
29c7ed1ce0SSivaprasad Tummala  * For more information about usage of these instructions, please refer to
30c7ed1ce0SSivaprasad Tummala  * Intel(R) 64 and IA-32 Architectures Software Developer's Manual.
31c7ed1ce0SSivaprasad Tummala  */
32c7ed1ce0SSivaprasad Tummala static void intel_umonitor(volatile void *addr)
33c7ed1ce0SSivaprasad Tummala {
34c7ed1ce0SSivaprasad Tummala #if defined(RTE_TOOLCHAIN_MSVC) || defined(__WAITPKG__)
35c7ed1ce0SSivaprasad Tummala 	/* cast away "volatile" when using the intrinsic */
36c7ed1ce0SSivaprasad Tummala 	_umonitor((void *)(uintptr_t)addr);
37c7ed1ce0SSivaprasad Tummala #else
38c7ed1ce0SSivaprasad Tummala 	/*
39c7ed1ce0SSivaprasad Tummala 	 * we're using raw byte codes for compiler versions which
40c7ed1ce0SSivaprasad Tummala 	 * don't support this instruction natively.
41c7ed1ce0SSivaprasad Tummala 	 */
42c7ed1ce0SSivaprasad Tummala 	asm volatile(".byte 0xf3, 0x0f, 0xae, 0xf7;"
43c7ed1ce0SSivaprasad Tummala 			:
44c7ed1ce0SSivaprasad Tummala 			: "D"(addr));
45c7ed1ce0SSivaprasad Tummala #endif
46c7ed1ce0SSivaprasad Tummala }
47c7ed1ce0SSivaprasad Tummala 
48c7ed1ce0SSivaprasad Tummala static void intel_umwait(const uint64_t timeout)
49c7ed1ce0SSivaprasad Tummala {
50b4a2fd23SDavid Hunt #if defined(RTE_TOOLCHAIN_MSVC) || defined(__WAITPKG__)
51b4a2fd23SDavid Hunt 	_umwait(0, timeout);
52b4a2fd23SDavid Hunt #else
53c7ed1ce0SSivaprasad Tummala 	const uint32_t tsc_l = (uint32_t)timeout;
54c7ed1ce0SSivaprasad Tummala 	const uint32_t tsc_h = (uint32_t)(timeout >> 32);
55c7ed1ce0SSivaprasad Tummala 
56c7ed1ce0SSivaprasad Tummala 	asm volatile(".byte 0xf2, 0x0f, 0xae, 0xf7;"
57c7ed1ce0SSivaprasad Tummala 			: /* ignore rflags */
58c7ed1ce0SSivaprasad Tummala 			: "D"(0), /* enter C0.2 */
59c7ed1ce0SSivaprasad Tummala 			  "a"(tsc_l), "d"(tsc_h));
60c7ed1ce0SSivaprasad Tummala #endif
61c7ed1ce0SSivaprasad Tummala }
62c7ed1ce0SSivaprasad Tummala 
63c7ed1ce0SSivaprasad Tummala /*
64c7ed1ce0SSivaprasad Tummala  * This function uses MONITORX/MWAITX instructions and will enter C1 state.
65c7ed1ce0SSivaprasad Tummala  * For more information about usage of these instructions, please refer to
66c7ed1ce0SSivaprasad Tummala  * AMD64 Architecture Programmer’s Manual.
67c7ed1ce0SSivaprasad Tummala  */
68c7ed1ce0SSivaprasad Tummala static void amd_monitorx(volatile void *addr)
69c7ed1ce0SSivaprasad Tummala {
70656dfad4STyler Retzlaff #if defined(RTE_TOOLCHAIN_MSVC) || defined(__MWAITX__)
71c7ed1ce0SSivaprasad Tummala 	/* cast away "volatile" when using the intrinsic */
72c7ed1ce0SSivaprasad Tummala 	_mm_monitorx((void *)(uintptr_t)addr, 0, 0);
73c7ed1ce0SSivaprasad Tummala #else
74c7ed1ce0SSivaprasad Tummala 	asm volatile(".byte 0x0f, 0x01, 0xfa;"
75c7ed1ce0SSivaprasad Tummala 			:
76c7ed1ce0SSivaprasad Tummala 			: "a"(addr),
77c7ed1ce0SSivaprasad Tummala 			"c"(0),  /* no extensions */
78c7ed1ce0SSivaprasad Tummala 			"d"(0)); /* no hints */
79c7ed1ce0SSivaprasad Tummala #endif
80c7ed1ce0SSivaprasad Tummala }
81c7ed1ce0SSivaprasad Tummala 
82c7ed1ce0SSivaprasad Tummala static void amd_mwaitx(const uint64_t timeout)
83c7ed1ce0SSivaprasad Tummala {
84c7ed1ce0SSivaprasad Tummala 	RTE_SET_USED(timeout);
85656dfad4STyler Retzlaff #if defined(RTE_TOOLCHAIN_MSVC) || defined(__MWAITX__)
86c7ed1ce0SSivaprasad Tummala 	_mm_mwaitx(0, 0, 0);
87c7ed1ce0SSivaprasad Tummala #else
88c7ed1ce0SSivaprasad Tummala 	asm volatile(".byte 0x0f, 0x01, 0xfb;"
89c7ed1ce0SSivaprasad Tummala 			: /* ignore rflags */
90c7ed1ce0SSivaprasad Tummala 			: "a"(0), /* enter C1 */
91c7ed1ce0SSivaprasad Tummala 			"c"(0)); /* no time-out */
92c7ed1ce0SSivaprasad Tummala #endif
93c7ed1ce0SSivaprasad Tummala }
94c7ed1ce0SSivaprasad Tummala 
95e9fd1ebfSTyler Retzlaff static alignas(RTE_CACHE_LINE_SIZE) struct {
96c7ed1ce0SSivaprasad Tummala 	void (*mmonitor)(volatile void *addr);
97c7ed1ce0SSivaprasad Tummala 	void (*mwait)(const uint64_t timeout);
98e9fd1ebfSTyler Retzlaff } power_monitor_ops;
99c7ed1ce0SSivaprasad Tummala 
10099a2dd95SBruce Richardson static inline void
10199a2dd95SBruce Richardson __umwait_wakeup(volatile void *addr)
10299a2dd95SBruce Richardson {
10399a2dd95SBruce Richardson 	uint64_t val;
10499a2dd95SBruce Richardson 
10599a2dd95SBruce Richardson 	/* trigger a write but don't change the value */
1069290f8beSTyler Retzlaff 	val = rte_atomic_load_explicit((volatile __rte_atomic uint64_t *)addr,
1079290f8beSTyler Retzlaff 			rte_memory_order_relaxed);
1089290f8beSTyler Retzlaff 	rte_atomic_compare_exchange_strong_explicit((volatile __rte_atomic uint64_t *)addr,
1099290f8beSTyler Retzlaff 			&val, val, rte_memory_order_relaxed, rte_memory_order_relaxed);
11099a2dd95SBruce Richardson }
11199a2dd95SBruce Richardson 
11299a2dd95SBruce Richardson static bool wait_supported;
11366834f29SAnatoly Burakov static bool wait_multi_supported;
114704f36a0SSivaprasad Tummala static bool monitor_supported;
11599a2dd95SBruce Richardson 
11699a2dd95SBruce Richardson static inline uint64_t
11799a2dd95SBruce Richardson __get_umwait_val(const volatile void *p, const uint8_t sz)
11899a2dd95SBruce Richardson {
11999a2dd95SBruce Richardson 	switch (sz) {
12099a2dd95SBruce Richardson 	case sizeof(uint8_t):
12199a2dd95SBruce Richardson 		return *(const volatile uint8_t *)p;
12299a2dd95SBruce Richardson 	case sizeof(uint16_t):
12399a2dd95SBruce Richardson 		return *(const volatile uint16_t *)p;
12499a2dd95SBruce Richardson 	case sizeof(uint32_t):
12599a2dd95SBruce Richardson 		return *(const volatile uint32_t *)p;
12699a2dd95SBruce Richardson 	case sizeof(uint64_t):
12799a2dd95SBruce Richardson 		return *(const volatile uint64_t *)p;
12899a2dd95SBruce Richardson 	default:
12999a2dd95SBruce Richardson 		/* shouldn't happen */
13099a2dd95SBruce Richardson 		RTE_ASSERT(0);
13199a2dd95SBruce Richardson 		return 0;
13299a2dd95SBruce Richardson 	}
13399a2dd95SBruce Richardson }
13499a2dd95SBruce Richardson 
13599a2dd95SBruce Richardson static inline int
13699a2dd95SBruce Richardson __check_val_size(const uint8_t sz)
13799a2dd95SBruce Richardson {
13899a2dd95SBruce Richardson 	switch (sz) {
13999a2dd95SBruce Richardson 	case sizeof(uint8_t):  /* fall-through */
14099a2dd95SBruce Richardson 	case sizeof(uint16_t): /* fall-through */
14199a2dd95SBruce Richardson 	case sizeof(uint32_t): /* fall-through */
14299a2dd95SBruce Richardson 	case sizeof(uint64_t): /* fall-through */
14399a2dd95SBruce Richardson 		return 0;
14499a2dd95SBruce Richardson 	default:
14599a2dd95SBruce Richardson 		/* unexpected size */
14699a2dd95SBruce Richardson 		return -1;
14799a2dd95SBruce Richardson 	}
14899a2dd95SBruce Richardson }
14999a2dd95SBruce Richardson 
15099a2dd95SBruce Richardson /**
15199a2dd95SBruce Richardson  * This function uses UMONITOR/UMWAIT instructions and will enter C0.2 state.
15299a2dd95SBruce Richardson  * For more information about usage of these instructions, please refer to
15399a2dd95SBruce Richardson  * Intel(R) 64 and IA-32 Architectures Software Developer's Manual.
15499a2dd95SBruce Richardson  */
15599a2dd95SBruce Richardson int
15699a2dd95SBruce Richardson rte_power_monitor(const struct rte_power_monitor_cond *pmc,
15799a2dd95SBruce Richardson 		const uint64_t tsc_timestamp)
15899a2dd95SBruce Richardson {
15999a2dd95SBruce Richardson 	const unsigned int lcore_id = rte_lcore_id();
16099a2dd95SBruce Richardson 	struct power_wait_status *s;
1616afc4bafSAnatoly Burakov 	uint64_t cur_value;
16299a2dd95SBruce Richardson 
16399a2dd95SBruce Richardson 	/* prevent user from running this instruction if it's not supported */
164704f36a0SSivaprasad Tummala 	if (!monitor_supported)
16599a2dd95SBruce Richardson 		return -ENOTSUP;
16699a2dd95SBruce Richardson 
16799a2dd95SBruce Richardson 	/* prevent non-EAL thread from using this API */
16899a2dd95SBruce Richardson 	if (lcore_id >= RTE_MAX_LCORE)
16999a2dd95SBruce Richardson 		return -EINVAL;
17099a2dd95SBruce Richardson 
17199a2dd95SBruce Richardson 	if (pmc == NULL)
17299a2dd95SBruce Richardson 		return -EINVAL;
17399a2dd95SBruce Richardson 
17499a2dd95SBruce Richardson 	if (__check_val_size(pmc->size) < 0)
17599a2dd95SBruce Richardson 		return -EINVAL;
17699a2dd95SBruce Richardson 
1776afc4bafSAnatoly Burakov 	if (pmc->fn == NULL)
1786afc4bafSAnatoly Burakov 		return -EINVAL;
1796afc4bafSAnatoly Burakov 
180*18b5049aSMattias Rönnblom 	s = RTE_LCORE_VAR_LCORE(lcore_id, wait_status);
18199a2dd95SBruce Richardson 
18299a2dd95SBruce Richardson 	/* update sleep address */
18399a2dd95SBruce Richardson 	rte_spinlock_lock(&s->lock);
18499a2dd95SBruce Richardson 	s->monitor_addr = pmc->addr;
18599a2dd95SBruce Richardson 
186c7ed1ce0SSivaprasad Tummala 	/* set address for memory monitor */
187c7ed1ce0SSivaprasad Tummala 	power_monitor_ops.mmonitor(pmc->addr);
18899a2dd95SBruce Richardson 
18999a2dd95SBruce Richardson 	/* now that we've put this address into monitor, we can unlock */
19099a2dd95SBruce Richardson 	rte_spinlock_unlock(&s->lock);
19199a2dd95SBruce Richardson 
1926afc4bafSAnatoly Burakov 	cur_value = __get_umwait_val(pmc->addr, pmc->size);
19399a2dd95SBruce Richardson 
1946afc4bafSAnatoly Burakov 	/* check if callback indicates we should abort */
1956afc4bafSAnatoly Burakov 	if (pmc->fn(cur_value, pmc->opaque) != 0)
19699a2dd95SBruce Richardson 		goto end;
19799a2dd95SBruce Richardson 
198c7ed1ce0SSivaprasad Tummala 	/* execute mwait */
199c7ed1ce0SSivaprasad Tummala 	power_monitor_ops.mwait(tsc_timestamp);
20099a2dd95SBruce Richardson 
20199a2dd95SBruce Richardson end:
20299a2dd95SBruce Richardson 	/* erase sleep address */
20399a2dd95SBruce Richardson 	rte_spinlock_lock(&s->lock);
20499a2dd95SBruce Richardson 	s->monitor_addr = NULL;
20599a2dd95SBruce Richardson 	rte_spinlock_unlock(&s->lock);
20699a2dd95SBruce Richardson 
20799a2dd95SBruce Richardson 	return 0;
20899a2dd95SBruce Richardson }
20999a2dd95SBruce Richardson 
21099a2dd95SBruce Richardson /**
21199a2dd95SBruce Richardson  * This function uses TPAUSE instruction  and will enter C0.2 state. For more
21299a2dd95SBruce Richardson  * information about usage of this instruction, please refer to Intel(R) 64 and
21399a2dd95SBruce Richardson  * IA-32 Architectures Software Developer's Manual.
21499a2dd95SBruce Richardson  */
21599a2dd95SBruce Richardson int
21699a2dd95SBruce Richardson rte_power_pause(const uint64_t tsc_timestamp)
21799a2dd95SBruce Richardson {
21899a2dd95SBruce Richardson 	/* prevent user from running this instruction if it's not supported */
21999a2dd95SBruce Richardson 	if (!wait_supported)
22099a2dd95SBruce Richardson 		return -ENOTSUP;
22199a2dd95SBruce Richardson 
22299a2dd95SBruce Richardson 	/* execute TPAUSE */
22360943c04STyler Retzlaff #if defined(RTE_TOOLCHAIN_MSVC) || defined(__WAITPKG__)
224b4a2fd23SDavid Hunt 	_tpause(0, tsc_timestamp);
22560943c04STyler Retzlaff #else
226b4a2fd23SDavid Hunt 	const uint32_t tsc_l = (uint32_t)tsc_timestamp;
227b4a2fd23SDavid Hunt 	const uint32_t tsc_h = (uint32_t)(tsc_timestamp >> 32);
228b4a2fd23SDavid Hunt 
22999a2dd95SBruce Richardson 	asm volatile(".byte 0x66, 0x0f, 0xae, 0xf7;"
23099a2dd95SBruce Richardson 			: /* ignore rflags */
23199a2dd95SBruce Richardson 			: "D"(0), /* enter C0.2 */
23299a2dd95SBruce Richardson 			"a"(tsc_l), "d"(tsc_h));
23360943c04STyler Retzlaff #endif
23499a2dd95SBruce Richardson 
23599a2dd95SBruce Richardson 	return 0;
23699a2dd95SBruce Richardson }
23799a2dd95SBruce Richardson 
23899a2dd95SBruce Richardson RTE_INIT(rte_power_intrinsics_init) {
23999a2dd95SBruce Richardson 	struct rte_cpu_intrinsics i;
24099a2dd95SBruce Richardson 
24199a2dd95SBruce Richardson 	rte_cpu_get_intrinsics_support(&i);
24299a2dd95SBruce Richardson 
24399a2dd95SBruce Richardson 	if (i.power_monitor && i.power_pause)
24499a2dd95SBruce Richardson 		wait_supported = 1;
24566834f29SAnatoly Burakov 	if (i.power_monitor_multi)
24666834f29SAnatoly Burakov 		wait_multi_supported = 1;
247704f36a0SSivaprasad Tummala 	if (i.power_monitor)
248704f36a0SSivaprasad Tummala 		monitor_supported = 1;
249c7ed1ce0SSivaprasad Tummala 
250c7ed1ce0SSivaprasad Tummala 	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_MONITORX)) {
251c7ed1ce0SSivaprasad Tummala 		power_monitor_ops.mmonitor = &amd_monitorx;
252c7ed1ce0SSivaprasad Tummala 		power_monitor_ops.mwait = &amd_mwaitx;
253c7ed1ce0SSivaprasad Tummala 	} else {
254c7ed1ce0SSivaprasad Tummala 		power_monitor_ops.mmonitor = &intel_umonitor;
255c7ed1ce0SSivaprasad Tummala 		power_monitor_ops.mwait = &intel_umwait;
256c7ed1ce0SSivaprasad Tummala 	}
25799a2dd95SBruce Richardson }
25899a2dd95SBruce Richardson 
25999a2dd95SBruce Richardson int
26099a2dd95SBruce Richardson rte_power_monitor_wakeup(const unsigned int lcore_id)
26199a2dd95SBruce Richardson {
26299a2dd95SBruce Richardson 	struct power_wait_status *s;
26399a2dd95SBruce Richardson 
26499a2dd95SBruce Richardson 	/* prevent user from running this instruction if it's not supported */
265704f36a0SSivaprasad Tummala 	if (!monitor_supported)
26699a2dd95SBruce Richardson 		return -ENOTSUP;
26799a2dd95SBruce Richardson 
26899a2dd95SBruce Richardson 	/* prevent buffer overrun */
26999a2dd95SBruce Richardson 	if (lcore_id >= RTE_MAX_LCORE)
27099a2dd95SBruce Richardson 		return -EINVAL;
27199a2dd95SBruce Richardson 
272*18b5049aSMattias Rönnblom 	s = RTE_LCORE_VAR_LCORE(lcore_id, wait_status);
27399a2dd95SBruce Richardson 
27499a2dd95SBruce Richardson 	/*
27599a2dd95SBruce Richardson 	 * There is a race condition between sleep, wakeup and locking, but we
27699a2dd95SBruce Richardson 	 * don't need to handle it.
27799a2dd95SBruce Richardson 	 *
27899a2dd95SBruce Richardson 	 * Possible situations:
27999a2dd95SBruce Richardson 	 *
28099a2dd95SBruce Richardson 	 * 1. T1 locks, sets address, unlocks
28199a2dd95SBruce Richardson 	 * 2. T2 locks, triggers wakeup, unlocks
28299a2dd95SBruce Richardson 	 * 3. T1 sleeps
28399a2dd95SBruce Richardson 	 *
28499a2dd95SBruce Richardson 	 * In this case, because T1 has already set the address for monitoring,
28599a2dd95SBruce Richardson 	 * we will wake up immediately even if T2 triggers wakeup before T1
28699a2dd95SBruce Richardson 	 * goes to sleep.
28799a2dd95SBruce Richardson 	 *
28899a2dd95SBruce Richardson 	 * 1. T1 locks, sets address, unlocks, goes to sleep, and wakes up
28999a2dd95SBruce Richardson 	 * 2. T2 locks, triggers wakeup, and unlocks
29099a2dd95SBruce Richardson 	 * 3. T1 locks, erases address, and unlocks
29199a2dd95SBruce Richardson 	 *
29299a2dd95SBruce Richardson 	 * In this case, since we've already woken up, the "wakeup" was
29399a2dd95SBruce Richardson 	 * unneeded, and since T1 is still waiting on T2 releasing the lock, the
29499a2dd95SBruce Richardson 	 * wakeup address is still valid so it's perfectly safe to write it.
29566834f29SAnatoly Burakov 	 *
29666834f29SAnatoly Burakov 	 * For multi-monitor case, the act of locking will in itself trigger the
29766834f29SAnatoly Burakov 	 * wakeup, so no additional writes necessary.
29899a2dd95SBruce Richardson 	 */
29999a2dd95SBruce Richardson 	rte_spinlock_lock(&s->lock);
30099a2dd95SBruce Richardson 	if (s->monitor_addr != NULL)
30199a2dd95SBruce Richardson 		__umwait_wakeup(s->monitor_addr);
30299a2dd95SBruce Richardson 	rte_spinlock_unlock(&s->lock);
30399a2dd95SBruce Richardson 
30499a2dd95SBruce Richardson 	return 0;
30599a2dd95SBruce Richardson }
30666834f29SAnatoly Burakov 
30766834f29SAnatoly Burakov int
30866834f29SAnatoly Burakov rte_power_monitor_multi(const struct rte_power_monitor_cond pmc[],
30966834f29SAnatoly Burakov 		const uint32_t num, const uint64_t tsc_timestamp)
31066834f29SAnatoly Burakov {
311*18b5049aSMattias Rönnblom 	struct power_wait_status *s = RTE_LCORE_VAR(wait_status);
31266834f29SAnatoly Burakov 	uint32_t i, rc;
31366834f29SAnatoly Burakov 
31466834f29SAnatoly Burakov 	/* check if supported */
31566834f29SAnatoly Burakov 	if (!wait_multi_supported)
31666834f29SAnatoly Burakov 		return -ENOTSUP;
31766834f29SAnatoly Burakov 
31866834f29SAnatoly Burakov 	if (pmc == NULL || num == 0)
31966834f29SAnatoly Burakov 		return -EINVAL;
32066834f29SAnatoly Burakov 
32166834f29SAnatoly Burakov 	/* we are already inside transaction region, return */
32266834f29SAnatoly Burakov 	if (rte_xtest() != 0)
32366834f29SAnatoly Burakov 		return 0;
32466834f29SAnatoly Burakov 
32566834f29SAnatoly Burakov 	/* start new transaction region */
32666834f29SAnatoly Burakov 	rc = rte_xbegin();
32766834f29SAnatoly Burakov 
32866834f29SAnatoly Burakov 	/* transaction abort, possible write to one of wait addresses */
32966834f29SAnatoly Burakov 	if (rc != RTE_XBEGIN_STARTED)
33066834f29SAnatoly Burakov 		return 0;
33166834f29SAnatoly Burakov 
33266834f29SAnatoly Burakov 	/*
33366834f29SAnatoly Burakov 	 * the mere act of reading the lock status here adds the lock to
33466834f29SAnatoly Burakov 	 * the read set. This means that when we trigger a wakeup from another
33566834f29SAnatoly Burakov 	 * thread, even if we don't have a defined wakeup address and thus don't
33666834f29SAnatoly Burakov 	 * actually cause any writes, the act of locking our lock will itself
33766834f29SAnatoly Burakov 	 * trigger the wakeup and abort the transaction.
33866834f29SAnatoly Burakov 	 */
33966834f29SAnatoly Burakov 	rte_spinlock_is_locked(&s->lock);
34066834f29SAnatoly Burakov 
34166834f29SAnatoly Burakov 	/*
34266834f29SAnatoly Burakov 	 * add all addresses to wait on into transaction read-set and check if
34366834f29SAnatoly Burakov 	 * any of wakeup conditions are already met.
34466834f29SAnatoly Burakov 	 */
34566834f29SAnatoly Burakov 	rc = 0;
34666834f29SAnatoly Burakov 	for (i = 0; i < num; i++) {
34766834f29SAnatoly Burakov 		const struct rte_power_monitor_cond *c = &pmc[i];
34866834f29SAnatoly Burakov 
34966834f29SAnatoly Burakov 		/* cannot be NULL */
35066834f29SAnatoly Burakov 		if (c->fn == NULL) {
35166834f29SAnatoly Burakov 			rc = -EINVAL;
35266834f29SAnatoly Burakov 			break;
35366834f29SAnatoly Burakov 		}
35466834f29SAnatoly Burakov 
35566834f29SAnatoly Burakov 		const uint64_t val = __get_umwait_val(c->addr, c->size);
35666834f29SAnatoly Burakov 
35766834f29SAnatoly Burakov 		/* abort if callback indicates that we need to stop */
35866834f29SAnatoly Burakov 		if (c->fn(val, c->opaque) != 0)
35966834f29SAnatoly Burakov 			break;
36066834f29SAnatoly Burakov 	}
36166834f29SAnatoly Burakov 
36266834f29SAnatoly Burakov 	/* none of the conditions were met, sleep until timeout */
36366834f29SAnatoly Burakov 	if (i == num)
36466834f29SAnatoly Burakov 		rte_power_pause(tsc_timestamp);
36566834f29SAnatoly Burakov 
36666834f29SAnatoly Burakov 	/* end transaction region */
36766834f29SAnatoly Burakov 	rte_xend();
36866834f29SAnatoly Burakov 
36966834f29SAnatoly Burakov 	return rc;
37066834f29SAnatoly Burakov }
371