xref: /dpdk/lib/power/rte_power_pmd_mgmt.c (revision 13064331957930f6b6c49ad02a638d7d5516c88f)
199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
299a2dd95SBruce Richardson  * Copyright(c) 2020 Intel Corporation
399a2dd95SBruce Richardson  */
499a2dd95SBruce Richardson 
572b452c5SDmitry Kozlyuk #include <stdlib.h>
672b452c5SDmitry Kozlyuk 
799a2dd95SBruce Richardson #include <rte_lcore.h>
8*13064331SMattias Rönnblom #include <rte_lcore_var.h>
999a2dd95SBruce Richardson #include <rte_cycles.h>
1099a2dd95SBruce Richardson #include <rte_cpuflags.h>
1199a2dd95SBruce Richardson #include <rte_malloc.h>
1299a2dd95SBruce Richardson #include <rte_ethdev.h>
1399a2dd95SBruce Richardson #include <rte_power_intrinsics.h>
1499a2dd95SBruce Richardson 
1599a2dd95SBruce Richardson #include "rte_power_pmd_mgmt.h"
1642651168SKevin Laatz #include "power_common.h"
1799a2dd95SBruce Richardson 
189e9e945bSKevin Laatz unsigned int emptypoll_max;
194a8fbc28SKevin Laatz unsigned int pause_duration;
2042651168SKevin Laatz unsigned int scale_freq_min[RTE_MAX_LCORE];
2142651168SKevin Laatz unsigned int scale_freq_max[RTE_MAX_LCORE];
2299a2dd95SBruce Richardson 
2399a2dd95SBruce Richardson /* store some internal state */
2499a2dd95SBruce Richardson static struct pmd_conf_data {
2599a2dd95SBruce Richardson 	/** what do we support? */
2699a2dd95SBruce Richardson 	struct rte_cpu_intrinsics intrinsics_support;
2799a2dd95SBruce Richardson 	/** pre-calculated tsc diff for 1us */
2899a2dd95SBruce Richardson 	uint64_t tsc_per_us;
2999a2dd95SBruce Richardson 	/** how many rte_pause can we fit in a microsecond? */
3099a2dd95SBruce Richardson 	uint64_t pause_per_us;
3199a2dd95SBruce Richardson } global_data;
3299a2dd95SBruce Richardson 
3399a2dd95SBruce Richardson /**
3499a2dd95SBruce Richardson  * Possible power management states of an ethdev port.
3599a2dd95SBruce Richardson  */
3699a2dd95SBruce Richardson enum pmd_mgmt_state {
3799a2dd95SBruce Richardson 	/** Device power management is disabled. */
3899a2dd95SBruce Richardson 	PMD_MGMT_DISABLED = 0,
3999a2dd95SBruce Richardson 	/** Device power management is enabled. */
4099a2dd95SBruce Richardson 	PMD_MGMT_ENABLED
4199a2dd95SBruce Richardson };
4299a2dd95SBruce Richardson 
435dff9a72SAnatoly Burakov union queue {
445dff9a72SAnatoly Burakov 	uint32_t val;
455dff9a72SAnatoly Burakov 	struct {
465dff9a72SAnatoly Burakov 		uint16_t portid;
475dff9a72SAnatoly Burakov 		uint16_t qid;
485dff9a72SAnatoly Burakov 	};
495dff9a72SAnatoly Burakov };
505dff9a72SAnatoly Burakov 
515dff9a72SAnatoly Burakov struct queue_list_entry {
525dff9a72SAnatoly Burakov 	TAILQ_ENTRY(queue_list_entry) next;
535dff9a72SAnatoly Burakov 	union queue queue;
545dff9a72SAnatoly Burakov 	uint64_t n_empty_polls;
555dff9a72SAnatoly Burakov 	uint64_t n_sleeps;
565dff9a72SAnatoly Burakov 	const struct rte_eth_rxtx_callback *cb;
575dff9a72SAnatoly Burakov };
585dff9a72SAnatoly Burakov 
59c6552d9aSTyler Retzlaff struct __rte_cache_aligned pmd_core_cfg {
605dff9a72SAnatoly Burakov 	TAILQ_HEAD(queue_list_head, queue_list_entry) head;
615dff9a72SAnatoly Burakov 	/**< List of queues associated with this lcore */
625dff9a72SAnatoly Burakov 	size_t n_queues;
635dff9a72SAnatoly Burakov 	/**< How many queues are in the list? */
6499a2dd95SBruce Richardson 	volatile enum pmd_mgmt_state pwr_mgmt_state;
6599a2dd95SBruce Richardson 	/**< State of power management for this queue */
6699a2dd95SBruce Richardson 	enum rte_power_pmd_mgmt_type cb_mode;
6799a2dd95SBruce Richardson 	/**< Callback mode for this queue */
685dff9a72SAnatoly Burakov 	uint64_t n_queues_ready_to_sleep;
695dff9a72SAnatoly Burakov 	/**< Number of queues ready to enter power optimized state */
705dff9a72SAnatoly Burakov 	uint64_t sleep_target;
715dff9a72SAnatoly Burakov 	/**< Prevent a queue from triggering sleep multiple times */
72c6552d9aSTyler Retzlaff };
73*13064331SMattias Rönnblom static RTE_LCORE_VAR_HANDLE(struct pmd_core_cfg, lcore_cfgs);
7499a2dd95SBruce Richardson 
755dff9a72SAnatoly Burakov static inline bool
765dff9a72SAnatoly Burakov queue_equal(const union queue *l, const union queue *r)
775dff9a72SAnatoly Burakov {
785dff9a72SAnatoly Burakov 	return l->val == r->val;
795dff9a72SAnatoly Burakov }
805dff9a72SAnatoly Burakov 
815dff9a72SAnatoly Burakov static inline void
825dff9a72SAnatoly Burakov queue_copy(union queue *dst, const union queue *src)
835dff9a72SAnatoly Burakov {
845dff9a72SAnatoly Burakov 	dst->val = src->val;
855dff9a72SAnatoly Burakov }
865dff9a72SAnatoly Burakov 
875dff9a72SAnatoly Burakov static struct queue_list_entry *
885dff9a72SAnatoly Burakov queue_list_find(const struct pmd_core_cfg *cfg, const union queue *q)
895dff9a72SAnatoly Burakov {
905dff9a72SAnatoly Burakov 	struct queue_list_entry *cur;
915dff9a72SAnatoly Burakov 
925dff9a72SAnatoly Burakov 	TAILQ_FOREACH(cur, &cfg->head, next) {
935dff9a72SAnatoly Burakov 		if (queue_equal(&cur->queue, q))
945dff9a72SAnatoly Burakov 			return cur;
955dff9a72SAnatoly Burakov 	}
965dff9a72SAnatoly Burakov 	return NULL;
975dff9a72SAnatoly Burakov }
985dff9a72SAnatoly Burakov 
995dff9a72SAnatoly Burakov static int
1005dff9a72SAnatoly Burakov queue_list_add(struct pmd_core_cfg *cfg, const union queue *q)
1015dff9a72SAnatoly Burakov {
1025dff9a72SAnatoly Burakov 	struct queue_list_entry *qle;
1035dff9a72SAnatoly Burakov 
1045dff9a72SAnatoly Burakov 	/* is it already in the list? */
1055dff9a72SAnatoly Burakov 	if (queue_list_find(cfg, q) != NULL)
1065dff9a72SAnatoly Burakov 		return -EEXIST;
1075dff9a72SAnatoly Burakov 
1085dff9a72SAnatoly Burakov 	qle = malloc(sizeof(*qle));
1095dff9a72SAnatoly Burakov 	if (qle == NULL)
1105dff9a72SAnatoly Burakov 		return -ENOMEM;
1115dff9a72SAnatoly Burakov 	memset(qle, 0, sizeof(*qle));
1125dff9a72SAnatoly Burakov 
1135dff9a72SAnatoly Burakov 	queue_copy(&qle->queue, q);
1145dff9a72SAnatoly Burakov 	TAILQ_INSERT_TAIL(&cfg->head, qle, next);
1155dff9a72SAnatoly Burakov 	cfg->n_queues++;
1165dff9a72SAnatoly Burakov 
1175dff9a72SAnatoly Burakov 	return 0;
1185dff9a72SAnatoly Burakov }
1195dff9a72SAnatoly Burakov 
1205dff9a72SAnatoly Burakov static struct queue_list_entry *
1215dff9a72SAnatoly Burakov queue_list_take(struct pmd_core_cfg *cfg, const union queue *q)
1225dff9a72SAnatoly Burakov {
1235dff9a72SAnatoly Burakov 	struct queue_list_entry *found;
1245dff9a72SAnatoly Burakov 
1255dff9a72SAnatoly Burakov 	found = queue_list_find(cfg, q);
1265dff9a72SAnatoly Burakov 	if (found == NULL)
1275dff9a72SAnatoly Burakov 		return NULL;
1285dff9a72SAnatoly Burakov 
1295dff9a72SAnatoly Burakov 	TAILQ_REMOVE(&cfg->head, found, next);
1305dff9a72SAnatoly Burakov 	cfg->n_queues--;
1315dff9a72SAnatoly Burakov 
1325dff9a72SAnatoly Burakov 	/* freeing is responsibility of the caller */
1335dff9a72SAnatoly Burakov 	return found;
1345dff9a72SAnatoly Burakov }
13599a2dd95SBruce Richardson 
136f53fe635SAnatoly Burakov static inline int
137f53fe635SAnatoly Burakov get_monitor_addresses(struct pmd_core_cfg *cfg,
138f53fe635SAnatoly Burakov 		struct rte_power_monitor_cond *pmc, size_t len)
139f53fe635SAnatoly Burakov {
140f53fe635SAnatoly Burakov 	const struct queue_list_entry *qle;
141f53fe635SAnatoly Burakov 	size_t i = 0;
142f53fe635SAnatoly Burakov 	int ret;
143f53fe635SAnatoly Burakov 
144f53fe635SAnatoly Burakov 	TAILQ_FOREACH(qle, &cfg->head, next) {
145f53fe635SAnatoly Burakov 		const union queue *q = &qle->queue;
146f53fe635SAnatoly Burakov 		struct rte_power_monitor_cond *cur;
147f53fe635SAnatoly Burakov 
148f53fe635SAnatoly Burakov 		/* attempted out of bounds access */
149f53fe635SAnatoly Burakov 		if (i >= len) {
150ae67895bSDavid Marchand 			POWER_LOG(ERR, "Too many queues being monitored");
151f53fe635SAnatoly Burakov 			return -1;
152f53fe635SAnatoly Burakov 		}
153f53fe635SAnatoly Burakov 
154f53fe635SAnatoly Burakov 		cur = &pmc[i++];
155f53fe635SAnatoly Burakov 		ret = rte_eth_get_monitor_addr(q->portid, q->qid, cur);
156f53fe635SAnatoly Burakov 		if (ret < 0)
157f53fe635SAnatoly Burakov 			return ret;
158f53fe635SAnatoly Burakov 	}
159f53fe635SAnatoly Burakov 	return 0;
160f53fe635SAnatoly Burakov }
161f53fe635SAnatoly Burakov 
16299a2dd95SBruce Richardson static void
16399a2dd95SBruce Richardson calc_tsc(void)
16499a2dd95SBruce Richardson {
16599a2dd95SBruce Richardson 	const uint64_t hz = rte_get_timer_hz();
16699a2dd95SBruce Richardson 	const uint64_t tsc_per_us = hz / US_PER_S; /* 1us */
16799a2dd95SBruce Richardson 
16899a2dd95SBruce Richardson 	global_data.tsc_per_us = tsc_per_us;
16999a2dd95SBruce Richardson 
17099a2dd95SBruce Richardson 	/* only do this if we don't have tpause */
17199a2dd95SBruce Richardson 	if (!global_data.intrinsics_support.power_pause) {
17299a2dd95SBruce Richardson 		const uint64_t start = rte_rdtsc_precise();
17399a2dd95SBruce Richardson 		const uint32_t n_pauses = 10000;
17499a2dd95SBruce Richardson 		double us, us_per_pause;
17599a2dd95SBruce Richardson 		uint64_t end;
17699a2dd95SBruce Richardson 		unsigned int i;
17799a2dd95SBruce Richardson 
17899a2dd95SBruce Richardson 		/* estimate number of rte_pause() calls per us*/
17999a2dd95SBruce Richardson 		for (i = 0; i < n_pauses; i++)
18099a2dd95SBruce Richardson 			rte_pause();
18199a2dd95SBruce Richardson 
18299a2dd95SBruce Richardson 		end = rte_rdtsc_precise();
18399a2dd95SBruce Richardson 		us = (end - start) / (double)tsc_per_us;
18499a2dd95SBruce Richardson 		us_per_pause = us / n_pauses;
18599a2dd95SBruce Richardson 
18699a2dd95SBruce Richardson 		global_data.pause_per_us = (uint64_t)(1.0 / us_per_pause);
18799a2dd95SBruce Richardson 	}
18899a2dd95SBruce Richardson }
18999a2dd95SBruce Richardson 
1905dff9a72SAnatoly Burakov static inline void
1915dff9a72SAnatoly Burakov queue_reset(struct pmd_core_cfg *cfg, struct queue_list_entry *qcfg)
1925dff9a72SAnatoly Burakov {
1935dff9a72SAnatoly Burakov 	const bool is_ready_to_sleep = qcfg->n_sleeps == cfg->sleep_target;
1945dff9a72SAnatoly Burakov 
1955dff9a72SAnatoly Burakov 	/* reset empty poll counter for this queue */
1965dff9a72SAnatoly Burakov 	qcfg->n_empty_polls = 0;
1975dff9a72SAnatoly Burakov 	/* reset the queue sleep counter as well */
1985dff9a72SAnatoly Burakov 	qcfg->n_sleeps = 0;
1995dff9a72SAnatoly Burakov 	/* remove the queue from list of queues ready to sleep */
2005dff9a72SAnatoly Burakov 	if (is_ready_to_sleep)
2015dff9a72SAnatoly Burakov 		cfg->n_queues_ready_to_sleep--;
2025dff9a72SAnatoly Burakov 	/*
2035dff9a72SAnatoly Burakov 	 * no need change the lcore sleep target counter because this lcore will
2045dff9a72SAnatoly Burakov 	 * reach the n_sleeps anyway, and the other cores are already counted so
2055dff9a72SAnatoly Burakov 	 * there's no need to do anything else.
2065dff9a72SAnatoly Burakov 	 */
2075dff9a72SAnatoly Burakov }
2085dff9a72SAnatoly Burakov 
2095dff9a72SAnatoly Burakov static inline bool
2105dff9a72SAnatoly Burakov queue_can_sleep(struct pmd_core_cfg *cfg, struct queue_list_entry *qcfg)
2115dff9a72SAnatoly Burakov {
2125dff9a72SAnatoly Burakov 	/* this function is called - that means we have an empty poll */
2135dff9a72SAnatoly Burakov 	qcfg->n_empty_polls++;
2145dff9a72SAnatoly Burakov 
2155dff9a72SAnatoly Burakov 	/* if we haven't reached threshold for empty polls, we can't sleep */
2169e9e945bSKevin Laatz 	if (qcfg->n_empty_polls <= emptypoll_max)
2175dff9a72SAnatoly Burakov 		return false;
2185dff9a72SAnatoly Burakov 
2195dff9a72SAnatoly Burakov 	/*
2205dff9a72SAnatoly Burakov 	 * we've reached a point where we are able to sleep, but we still need
2215dff9a72SAnatoly Burakov 	 * to check if this queue has already been marked for sleeping.
2225dff9a72SAnatoly Burakov 	 */
2235dff9a72SAnatoly Burakov 	if (qcfg->n_sleeps == cfg->sleep_target)
2245dff9a72SAnatoly Burakov 		return true;
2255dff9a72SAnatoly Burakov 
2265dff9a72SAnatoly Burakov 	/* mark this queue as ready for sleep */
2275dff9a72SAnatoly Burakov 	qcfg->n_sleeps = cfg->sleep_target;
2285dff9a72SAnatoly Burakov 	cfg->n_queues_ready_to_sleep++;
2295dff9a72SAnatoly Burakov 
2305dff9a72SAnatoly Burakov 	return true;
2315dff9a72SAnatoly Burakov }
2325dff9a72SAnatoly Burakov 
2335dff9a72SAnatoly Burakov static inline bool
2345dff9a72SAnatoly Burakov lcore_can_sleep(struct pmd_core_cfg *cfg)
2355dff9a72SAnatoly Burakov {
2365dff9a72SAnatoly Burakov 	/* are all queues ready to sleep? */
2375dff9a72SAnatoly Burakov 	if (cfg->n_queues_ready_to_sleep != cfg->n_queues)
2385dff9a72SAnatoly Burakov 		return false;
2395dff9a72SAnatoly Burakov 
2405dff9a72SAnatoly Burakov 	/* we've reached an iteration where we can sleep, reset sleep counter */
2415dff9a72SAnatoly Burakov 	cfg->n_queues_ready_to_sleep = 0;
2425dff9a72SAnatoly Burakov 	cfg->sleep_target++;
2435dff9a72SAnatoly Burakov 	/*
2445dff9a72SAnatoly Burakov 	 * we do not reset any individual queue empty poll counters, because
2455dff9a72SAnatoly Burakov 	 * we want to keep sleeping on every poll until we actually get traffic.
2465dff9a72SAnatoly Burakov 	 */
2475dff9a72SAnatoly Burakov 
2485dff9a72SAnatoly Burakov 	return true;
2495dff9a72SAnatoly Burakov }
2505dff9a72SAnatoly Burakov 
25199a2dd95SBruce Richardson static uint16_t
252f53fe635SAnatoly Burakov clb_multiwait(uint16_t port_id __rte_unused, uint16_t qidx __rte_unused,
253f53fe635SAnatoly Burakov 		struct rte_mbuf **pkts __rte_unused, uint16_t nb_rx,
254f53fe635SAnatoly Burakov 		uint16_t max_pkts __rte_unused, void *arg)
255f53fe635SAnatoly Burakov {
256f53fe635SAnatoly Burakov 	struct queue_list_entry *queue_conf = arg;
257f53fe635SAnatoly Burakov 	struct pmd_core_cfg *lcore_conf;
258f53fe635SAnatoly Burakov 	const bool empty = nb_rx == 0;
259f53fe635SAnatoly Burakov 
260*13064331SMattias Rönnblom 	lcore_conf = RTE_LCORE_VAR(lcore_cfgs);
261f53fe635SAnatoly Burakov 
262f53fe635SAnatoly Burakov 	/* early exit */
263f53fe635SAnatoly Burakov 	if (likely(!empty))
264f53fe635SAnatoly Burakov 		/* early exit */
265f53fe635SAnatoly Burakov 		queue_reset(lcore_conf, queue_conf);
266f53fe635SAnatoly Burakov 	else {
267f53fe635SAnatoly Burakov 		struct rte_power_monitor_cond pmc[lcore_conf->n_queues];
268f53fe635SAnatoly Burakov 		int ret;
269f53fe635SAnatoly Burakov 
270f53fe635SAnatoly Burakov 		/* can this queue sleep? */
271f53fe635SAnatoly Burakov 		if (!queue_can_sleep(lcore_conf, queue_conf))
272f53fe635SAnatoly Burakov 			return nb_rx;
273f53fe635SAnatoly Burakov 
274f53fe635SAnatoly Burakov 		/* can this lcore sleep? */
275f53fe635SAnatoly Burakov 		if (!lcore_can_sleep(lcore_conf))
276f53fe635SAnatoly Burakov 			return nb_rx;
277f53fe635SAnatoly Burakov 
278f53fe635SAnatoly Burakov 		/* gather all monitoring conditions */
279f53fe635SAnatoly Burakov 		ret = get_monitor_addresses(lcore_conf, pmc,
280f53fe635SAnatoly Burakov 				lcore_conf->n_queues);
281f53fe635SAnatoly Burakov 		if (ret < 0)
282f53fe635SAnatoly Burakov 			return nb_rx;
283f53fe635SAnatoly Burakov 
284f53fe635SAnatoly Burakov 		rte_power_monitor_multi(pmc, lcore_conf->n_queues, UINT64_MAX);
285f53fe635SAnatoly Burakov 	}
286f53fe635SAnatoly Burakov 
287f53fe635SAnatoly Burakov 	return nb_rx;
288f53fe635SAnatoly Burakov }
289f53fe635SAnatoly Burakov 
290f53fe635SAnatoly Burakov static uint16_t
29199a2dd95SBruce Richardson clb_umwait(uint16_t port_id, uint16_t qidx, struct rte_mbuf **pkts __rte_unused,
2925dff9a72SAnatoly Burakov 		uint16_t nb_rx, uint16_t max_pkts __rte_unused, void *arg)
29399a2dd95SBruce Richardson {
2945dff9a72SAnatoly Burakov 	struct queue_list_entry *queue_conf = arg;
29599a2dd95SBruce Richardson 
2965dff9a72SAnatoly Burakov 	/* this callback can't do more than one queue, omit multiqueue logic */
29799a2dd95SBruce Richardson 	if (unlikely(nb_rx == 0)) {
2985dff9a72SAnatoly Burakov 		queue_conf->n_empty_polls++;
2999e9e945bSKevin Laatz 		if (unlikely(queue_conf->n_empty_polls > emptypoll_max)) {
30099a2dd95SBruce Richardson 			struct rte_power_monitor_cond pmc;
3015dff9a72SAnatoly Burakov 			int ret;
30299a2dd95SBruce Richardson 
30399a2dd95SBruce Richardson 			/* use monitoring condition to sleep */
30499a2dd95SBruce Richardson 			ret = rte_eth_get_monitor_addr(port_id, qidx,
30599a2dd95SBruce Richardson 					&pmc);
30699a2dd95SBruce Richardson 			if (ret == 0)
30799a2dd95SBruce Richardson 				rte_power_monitor(&pmc, UINT64_MAX);
30899a2dd95SBruce Richardson 		}
30999a2dd95SBruce Richardson 	} else
3105dff9a72SAnatoly Burakov 		queue_conf->n_empty_polls = 0;
31199a2dd95SBruce Richardson 
31299a2dd95SBruce Richardson 	return nb_rx;
31399a2dd95SBruce Richardson }
31499a2dd95SBruce Richardson 
31599a2dd95SBruce Richardson static uint16_t
3165dff9a72SAnatoly Burakov clb_pause(uint16_t port_id __rte_unused, uint16_t qidx __rte_unused,
3175dff9a72SAnatoly Burakov 		struct rte_mbuf **pkts __rte_unused, uint16_t nb_rx,
3185dff9a72SAnatoly Burakov 		uint16_t max_pkts __rte_unused, void *arg)
31999a2dd95SBruce Richardson {
3205dff9a72SAnatoly Burakov 	struct queue_list_entry *queue_conf = arg;
3215dff9a72SAnatoly Burakov 	struct pmd_core_cfg *lcore_conf;
3225dff9a72SAnatoly Burakov 	const bool empty = nb_rx == 0;
3234a8fbc28SKevin Laatz 	uint32_t pause_duration = rte_power_pmd_mgmt_get_pause_duration();
32499a2dd95SBruce Richardson 
325*13064331SMattias Rönnblom 	lcore_conf = RTE_LCORE_VAR(lcore_cfgs);
32699a2dd95SBruce Richardson 
3275dff9a72SAnatoly Burakov 	if (likely(!empty))
3285dff9a72SAnatoly Burakov 		/* early exit */
3295dff9a72SAnatoly Burakov 		queue_reset(lcore_conf, queue_conf);
3305dff9a72SAnatoly Burakov 	else {
3315dff9a72SAnatoly Burakov 		/* can this queue sleep? */
3325dff9a72SAnatoly Burakov 		if (!queue_can_sleep(lcore_conf, queue_conf))
3335dff9a72SAnatoly Burakov 			return nb_rx;
3345dff9a72SAnatoly Burakov 
3355dff9a72SAnatoly Burakov 		/* can this lcore sleep? */
3365dff9a72SAnatoly Burakov 		if (!lcore_can_sleep(lcore_conf))
3375dff9a72SAnatoly Burakov 			return nb_rx;
3385dff9a72SAnatoly Burakov 
3395dff9a72SAnatoly Burakov 		/* sleep for 1 microsecond, use tpause if we have it */
34099a2dd95SBruce Richardson 		if (global_data.intrinsics_support.power_pause) {
34199a2dd95SBruce Richardson 			const uint64_t cur = rte_rdtsc();
34299a2dd95SBruce Richardson 			const uint64_t wait_tsc =
3434a8fbc28SKevin Laatz 					cur + global_data.tsc_per_us * pause_duration;
34499a2dd95SBruce Richardson 			rte_power_pause(wait_tsc);
34599a2dd95SBruce Richardson 		} else {
34699a2dd95SBruce Richardson 			uint64_t i;
3474a8fbc28SKevin Laatz 			for (i = 0; i < global_data.pause_per_us * pause_duration; i++)
34899a2dd95SBruce Richardson 				rte_pause();
34999a2dd95SBruce Richardson 		}
35099a2dd95SBruce Richardson 	}
35199a2dd95SBruce Richardson 
35299a2dd95SBruce Richardson 	return nb_rx;
35399a2dd95SBruce Richardson }
35499a2dd95SBruce Richardson 
35599a2dd95SBruce Richardson static uint16_t
3565dff9a72SAnatoly Burakov clb_scale_freq(uint16_t port_id __rte_unused, uint16_t qidx __rte_unused,
35799a2dd95SBruce Richardson 		struct rte_mbuf **pkts __rte_unused, uint16_t nb_rx,
3585dff9a72SAnatoly Burakov 		uint16_t max_pkts __rte_unused, void *arg)
35999a2dd95SBruce Richardson {
3605dff9a72SAnatoly Burakov 	const bool empty = nb_rx == 0;
361*13064331SMattias Rönnblom 	struct pmd_core_cfg *lcore_conf = RTE_LCORE_VAR(lcore_cfgs);
3625dff9a72SAnatoly Burakov 	struct queue_list_entry *queue_conf = arg;
36399a2dd95SBruce Richardson 
3645dff9a72SAnatoly Burakov 	if (likely(!empty)) {
3655dff9a72SAnatoly Burakov 		/* early exit */
3665dff9a72SAnatoly Burakov 		queue_reset(lcore_conf, queue_conf);
36799a2dd95SBruce Richardson 
3685dff9a72SAnatoly Burakov 		/* scale up freq immediately */
36999a2dd95SBruce Richardson 		rte_power_freq_max(rte_lcore_id());
3705dff9a72SAnatoly Burakov 	} else {
3715dff9a72SAnatoly Burakov 		/* can this queue sleep? */
3725dff9a72SAnatoly Burakov 		if (!queue_can_sleep(lcore_conf, queue_conf))
3735dff9a72SAnatoly Burakov 			return nb_rx;
3745dff9a72SAnatoly Burakov 
3755dff9a72SAnatoly Burakov 		/* can this lcore sleep? */
3765dff9a72SAnatoly Burakov 		if (!lcore_can_sleep(lcore_conf))
3775dff9a72SAnatoly Burakov 			return nb_rx;
3785dff9a72SAnatoly Burakov 
3795dff9a72SAnatoly Burakov 		rte_power_freq_min(rte_lcore_id());
38099a2dd95SBruce Richardson 	}
38199a2dd95SBruce Richardson 
38299a2dd95SBruce Richardson 	return nb_rx;
38399a2dd95SBruce Richardson }
38499a2dd95SBruce Richardson 
385209fd585SAnatoly Burakov static int
386209fd585SAnatoly Burakov queue_stopped(const uint16_t port_id, const uint16_t queue_id)
387209fd585SAnatoly Burakov {
388209fd585SAnatoly Burakov 	struct rte_eth_rxq_info qinfo;
389209fd585SAnatoly Burakov 
390c6e30514SMiao Li 	int ret = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
391c6e30514SMiao Li 	if (ret < 0) {
392c6e30514SMiao Li 		if (ret == -ENOTSUP)
393c6e30514SMiao Li 			return 1;
394c6e30514SMiao Li 		else
395209fd585SAnatoly Burakov 			return -1;
396c6e30514SMiao Li 	}
397209fd585SAnatoly Burakov 
398209fd585SAnatoly Burakov 	return qinfo.queue_state == RTE_ETH_QUEUE_STATE_STOPPED;
399209fd585SAnatoly Burakov }
400209fd585SAnatoly Burakov 
4015dff9a72SAnatoly Burakov static int
4025dff9a72SAnatoly Burakov cfg_queues_stopped(struct pmd_core_cfg *queue_cfg)
4035dff9a72SAnatoly Burakov {
4045dff9a72SAnatoly Burakov 	const struct queue_list_entry *entry;
4055dff9a72SAnatoly Burakov 
4065dff9a72SAnatoly Burakov 	TAILQ_FOREACH(entry, &queue_cfg->head, next) {
4075dff9a72SAnatoly Burakov 		const union queue *q = &entry->queue;
4085dff9a72SAnatoly Burakov 		int ret = queue_stopped(q->portid, q->qid);
4095dff9a72SAnatoly Burakov 		if (ret != 1)
4105dff9a72SAnatoly Burakov 			return ret;
4115dff9a72SAnatoly Burakov 	}
4125dff9a72SAnatoly Burakov 	return 1;
4135dff9a72SAnatoly Burakov }
4145dff9a72SAnatoly Burakov 
4155dff9a72SAnatoly Burakov static int
4165dff9a72SAnatoly Burakov check_scale(unsigned int lcore)
4175dff9a72SAnatoly Burakov {
4185dff9a72SAnatoly Burakov 	enum power_management_env env;
4195dff9a72SAnatoly Burakov 
42035220c7cSWathsala Vithanage 	/* only PSTATE, AMD-PSTATE, ACPI and CPPC modes are supported */
4215dff9a72SAnatoly Burakov 	if (!rte_power_check_env_supported(PM_ENV_ACPI_CPUFREQ) &&
4221ed04d33SSivaprasad Tummala 			!rte_power_check_env_supported(PM_ENV_PSTATE_CPUFREQ) &&
42335220c7cSWathsala Vithanage 			!rte_power_check_env_supported(PM_ENV_AMD_PSTATE_CPUFREQ) &&
42435220c7cSWathsala Vithanage 			!rte_power_check_env_supported(PM_ENV_CPPC_CPUFREQ)) {
42535220c7cSWathsala Vithanage 		POWER_LOG(DEBUG, "Only ACPI, PSTATE, AMD-PSTATE, or CPPC modes are supported");
4265dff9a72SAnatoly Burakov 		return -ENOTSUP;
4275dff9a72SAnatoly Burakov 	}
4285dff9a72SAnatoly Burakov 	/* ensure we could initialize the power library */
4295dff9a72SAnatoly Burakov 	if (rte_power_init(lcore))
4305dff9a72SAnatoly Burakov 		return -EINVAL;
4315dff9a72SAnatoly Burakov 
4325dff9a72SAnatoly Burakov 	/* ensure we initialized the correct env */
4335dff9a72SAnatoly Burakov 	env = rte_power_get_env();
4341ed04d33SSivaprasad Tummala 	if (env != PM_ENV_ACPI_CPUFREQ && env != PM_ENV_PSTATE_CPUFREQ &&
43535220c7cSWathsala Vithanage 			env != PM_ENV_AMD_PSTATE_CPUFREQ && env != PM_ENV_CPPC_CPUFREQ) {
43635220c7cSWathsala Vithanage 		POWER_LOG(DEBUG, "Unable to initialize ACPI, PSTATE, AMD-PSTATE, or CPPC modes");
4375dff9a72SAnatoly Burakov 		return -ENOTSUP;
4385dff9a72SAnatoly Burakov 	}
4395dff9a72SAnatoly Burakov 
4405dff9a72SAnatoly Burakov 	/* we're done */
4415dff9a72SAnatoly Burakov 	return 0;
4425dff9a72SAnatoly Burakov }
4435dff9a72SAnatoly Burakov 
4445dff9a72SAnatoly Burakov static int
4455dff9a72SAnatoly Burakov check_monitor(struct pmd_core_cfg *cfg, const union queue *qdata)
4465dff9a72SAnatoly Burakov {
4475dff9a72SAnatoly Burakov 	struct rte_power_monitor_cond dummy;
448f53fe635SAnatoly Burakov 	bool multimonitor_supported;
4495dff9a72SAnatoly Burakov 
4505dff9a72SAnatoly Burakov 	/* check if rte_power_monitor is supported */
4515dff9a72SAnatoly Burakov 	if (!global_data.intrinsics_support.power_monitor) {
452ae67895bSDavid Marchand 		POWER_LOG(DEBUG, "Monitoring intrinsics are not supported");
4535dff9a72SAnatoly Burakov 		return -ENOTSUP;
4545dff9a72SAnatoly Burakov 	}
455f53fe635SAnatoly Burakov 	/* check if multi-monitor is supported */
456f53fe635SAnatoly Burakov 	multimonitor_supported =
457f53fe635SAnatoly Burakov 			global_data.intrinsics_support.power_monitor_multi;
4585dff9a72SAnatoly Burakov 
459f53fe635SAnatoly Burakov 	/* if we're adding a new queue, do we support multiple queues? */
460f53fe635SAnatoly Burakov 	if (cfg->n_queues > 0 && !multimonitor_supported) {
461ae67895bSDavid Marchand 		POWER_LOG(DEBUG, "Monitoring multiple queues is not supported");
4625dff9a72SAnatoly Burakov 		return -ENOTSUP;
4635dff9a72SAnatoly Burakov 	}
4645dff9a72SAnatoly Burakov 
4655dff9a72SAnatoly Burakov 	/* check if the device supports the necessary PMD API */
4665dff9a72SAnatoly Burakov 	if (rte_eth_get_monitor_addr(qdata->portid, qdata->qid,
4675dff9a72SAnatoly Burakov 			&dummy) == -ENOTSUP) {
468ae67895bSDavid Marchand 		POWER_LOG(DEBUG, "The device does not support rte_eth_get_monitor_addr");
4695dff9a72SAnatoly Burakov 		return -ENOTSUP;
4705dff9a72SAnatoly Burakov 	}
4715dff9a72SAnatoly Burakov 
4725dff9a72SAnatoly Burakov 	/* we're done */
4735dff9a72SAnatoly Burakov 	return 0;
4745dff9a72SAnatoly Burakov }
4755dff9a72SAnatoly Burakov 
476f53fe635SAnatoly Burakov static inline rte_rx_callback_fn
477f53fe635SAnatoly Burakov get_monitor_callback(void)
478f53fe635SAnatoly Burakov {
479f53fe635SAnatoly Burakov 	return global_data.intrinsics_support.power_monitor_multi ?
480f53fe635SAnatoly Burakov 		clb_multiwait : clb_umwait;
481f53fe635SAnatoly Burakov }
482f53fe635SAnatoly Burakov 
48399a2dd95SBruce Richardson int
48499a2dd95SBruce Richardson rte_power_ethdev_pmgmt_queue_enable(unsigned int lcore_id, uint16_t port_id,
48599a2dd95SBruce Richardson 		uint16_t queue_id, enum rte_power_pmd_mgmt_type mode)
48699a2dd95SBruce Richardson {
4875dff9a72SAnatoly Burakov 	const union queue qdata = {.portid = port_id, .qid = queue_id};
4885dff9a72SAnatoly Burakov 	struct pmd_core_cfg *lcore_cfg;
4895dff9a72SAnatoly Burakov 	struct queue_list_entry *queue_cfg;
49099a2dd95SBruce Richardson 	struct rte_eth_dev_info info;
491209fd585SAnatoly Burakov 	rte_rx_callback_fn clb;
49299a2dd95SBruce Richardson 	int ret;
49399a2dd95SBruce Richardson 
49499a2dd95SBruce Richardson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
49599a2dd95SBruce Richardson 
49699a2dd95SBruce Richardson 	if (queue_id >= RTE_MAX_QUEUES_PER_PORT || lcore_id >= RTE_MAX_LCORE) {
49799a2dd95SBruce Richardson 		ret = -EINVAL;
49899a2dd95SBruce Richardson 		goto end;
49999a2dd95SBruce Richardson 	}
50099a2dd95SBruce Richardson 
50199a2dd95SBruce Richardson 	if (rte_eth_dev_info_get(port_id, &info) < 0) {
50299a2dd95SBruce Richardson 		ret = -EINVAL;
50399a2dd95SBruce Richardson 		goto end;
50499a2dd95SBruce Richardson 	}
50599a2dd95SBruce Richardson 
50699a2dd95SBruce Richardson 	/* check if queue id is valid */
50799a2dd95SBruce Richardson 	if (queue_id >= info.nb_rx_queues) {
50899a2dd95SBruce Richardson 		ret = -EINVAL;
50999a2dd95SBruce Richardson 		goto end;
51099a2dd95SBruce Richardson 	}
51199a2dd95SBruce Richardson 
512209fd585SAnatoly Burakov 	/* check if the queue is stopped */
513209fd585SAnatoly Burakov 	ret = queue_stopped(port_id, queue_id);
514209fd585SAnatoly Burakov 	if (ret != 1) {
515209fd585SAnatoly Burakov 		/* error means invalid queue, 0 means queue wasn't stopped */
516209fd585SAnatoly Burakov 		ret = ret < 0 ? -EINVAL : -EBUSY;
517209fd585SAnatoly Burakov 		goto end;
518209fd585SAnatoly Burakov 	}
519209fd585SAnatoly Burakov 
520*13064331SMattias Rönnblom 	lcore_cfg = RTE_LCORE_VAR_LCORE(lcore_id, lcore_cfgs);
52199a2dd95SBruce Richardson 
5225dff9a72SAnatoly Burakov 	/* check if other queues are stopped as well */
5235dff9a72SAnatoly Burakov 	ret = cfg_queues_stopped(lcore_cfg);
5245dff9a72SAnatoly Burakov 	if (ret != 1) {
5255dff9a72SAnatoly Burakov 		/* error means invalid queue, 0 means queue wasn't stopped */
5265dff9a72SAnatoly Burakov 		ret = ret < 0 ? -EINVAL : -EBUSY;
5275dff9a72SAnatoly Burakov 		goto end;
5285dff9a72SAnatoly Burakov 	}
5295dff9a72SAnatoly Burakov 
5305dff9a72SAnatoly Burakov 	/* if callback was already enabled, check current callback type */
5315dff9a72SAnatoly Burakov 	if (lcore_cfg->pwr_mgmt_state != PMD_MGMT_DISABLED &&
5325dff9a72SAnatoly Burakov 			lcore_cfg->cb_mode != mode) {
53399a2dd95SBruce Richardson 		ret = -EINVAL;
53499a2dd95SBruce Richardson 		goto end;
53599a2dd95SBruce Richardson 	}
53699a2dd95SBruce Richardson 
53799a2dd95SBruce Richardson 	/* we need this in various places */
53899a2dd95SBruce Richardson 	rte_cpu_get_intrinsics_support(&global_data.intrinsics_support);
53999a2dd95SBruce Richardson 
54099a2dd95SBruce Richardson 	switch (mode) {
54199a2dd95SBruce Richardson 	case RTE_POWER_MGMT_TYPE_MONITOR:
5425dff9a72SAnatoly Burakov 		/* check if we can add a new queue */
5435dff9a72SAnatoly Burakov 		ret = check_monitor(lcore_cfg, &qdata);
5445dff9a72SAnatoly Burakov 		if (ret < 0)
54599a2dd95SBruce Richardson 			goto end;
54699a2dd95SBruce Richardson 
547f53fe635SAnatoly Burakov 		clb = get_monitor_callback();
54899a2dd95SBruce Richardson 		break;
54999a2dd95SBruce Richardson 	case RTE_POWER_MGMT_TYPE_SCALE:
550565d0122SAnatoly Burakov 		clb = clb_scale_freq;
551565d0122SAnatoly Burakov 
552565d0122SAnatoly Burakov 		/* we only have to check this when enabling first queue */
553565d0122SAnatoly Burakov 		if (lcore_cfg->pwr_mgmt_state != PMD_MGMT_DISABLED)
554565d0122SAnatoly Burakov 			break;
5555dff9a72SAnatoly Burakov 		/* check if we can add a new queue */
5565dff9a72SAnatoly Burakov 		ret = check_scale(lcore_id);
5575dff9a72SAnatoly Burakov 		if (ret < 0)
55899a2dd95SBruce Richardson 			goto end;
55999a2dd95SBruce Richardson 		break;
56099a2dd95SBruce Richardson 	case RTE_POWER_MGMT_TYPE_PAUSE:
56199a2dd95SBruce Richardson 		/* figure out various time-to-tsc conversions */
56299a2dd95SBruce Richardson 		if (global_data.tsc_per_us == 0)
56399a2dd95SBruce Richardson 			calc_tsc();
56499a2dd95SBruce Richardson 
565209fd585SAnatoly Burakov 		clb = clb_pause;
566209fd585SAnatoly Burakov 		break;
567209fd585SAnatoly Burakov 	default:
568ae67895bSDavid Marchand 		POWER_LOG(DEBUG, "Invalid power management type");
569209fd585SAnatoly Burakov 		ret = -EINVAL;
570209fd585SAnatoly Burakov 		goto end;
571209fd585SAnatoly Burakov 	}
5725dff9a72SAnatoly Burakov 	/* add this queue to the list */
5735dff9a72SAnatoly Burakov 	ret = queue_list_add(lcore_cfg, &qdata);
5745dff9a72SAnatoly Burakov 	if (ret < 0) {
575ae67895bSDavid Marchand 		POWER_LOG(DEBUG, "Failed to add queue to list: %s",
5765dff9a72SAnatoly Burakov 				strerror(-ret));
5775dff9a72SAnatoly Burakov 		goto end;
5785dff9a72SAnatoly Burakov 	}
5795dff9a72SAnatoly Burakov 	/* new queue is always added last */
5805dff9a72SAnatoly Burakov 	queue_cfg = TAILQ_LAST(&lcore_cfg->head, queue_list_head);
5815dff9a72SAnatoly Burakov 
5825dff9a72SAnatoly Burakov 	/* when enabling first queue, ensure sleep target is not 0 */
5835dff9a72SAnatoly Burakov 	if (lcore_cfg->n_queues == 1 && lcore_cfg->sleep_target == 0)
5845dff9a72SAnatoly Burakov 		lcore_cfg->sleep_target = 1;
585209fd585SAnatoly Burakov 
58699a2dd95SBruce Richardson 	/* initialize data before enabling the callback */
5875dff9a72SAnatoly Burakov 	if (lcore_cfg->n_queues == 1) {
5885dff9a72SAnatoly Burakov 		lcore_cfg->cb_mode = mode;
5895dff9a72SAnatoly Burakov 		lcore_cfg->pwr_mgmt_state = PMD_MGMT_ENABLED;
5905dff9a72SAnatoly Burakov 	}
5915dff9a72SAnatoly Burakov 	queue_cfg->cb = rte_eth_add_rx_callback(port_id, queue_id,
5925dff9a72SAnatoly Burakov 			clb, queue_cfg);
593209fd585SAnatoly Burakov 
59499a2dd95SBruce Richardson 	ret = 0;
59599a2dd95SBruce Richardson end:
59699a2dd95SBruce Richardson 	return ret;
59799a2dd95SBruce Richardson }
59899a2dd95SBruce Richardson 
59999a2dd95SBruce Richardson int
60099a2dd95SBruce Richardson rte_power_ethdev_pmgmt_queue_disable(unsigned int lcore_id,
60199a2dd95SBruce Richardson 		uint16_t port_id, uint16_t queue_id)
60299a2dd95SBruce Richardson {
6035dff9a72SAnatoly Burakov 	const union queue qdata = {.portid = port_id, .qid = queue_id};
6045dff9a72SAnatoly Burakov 	struct pmd_core_cfg *lcore_cfg;
6055dff9a72SAnatoly Burakov 	struct queue_list_entry *queue_cfg;
606209fd585SAnatoly Burakov 	int ret;
60799a2dd95SBruce Richardson 
60899a2dd95SBruce Richardson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
60999a2dd95SBruce Richardson 
61099a2dd95SBruce Richardson 	if (lcore_id >= RTE_MAX_LCORE || queue_id >= RTE_MAX_QUEUES_PER_PORT)
61199a2dd95SBruce Richardson 		return -EINVAL;
61299a2dd95SBruce Richardson 
613209fd585SAnatoly Burakov 	/* check if the queue is stopped */
614209fd585SAnatoly Burakov 	ret = queue_stopped(port_id, queue_id);
615209fd585SAnatoly Burakov 	if (ret != 1) {
616209fd585SAnatoly Burakov 		/* error means invalid queue, 0 means queue wasn't stopped */
617209fd585SAnatoly Burakov 		return ret < 0 ? -EINVAL : -EBUSY;
618209fd585SAnatoly Burakov 	}
619209fd585SAnatoly Burakov 
62099a2dd95SBruce Richardson 	/* no need to check queue id as wrong queue id would not be enabled */
621*13064331SMattias Rönnblom 	lcore_cfg = RTE_LCORE_VAR_LCORE(lcore_id, lcore_cfgs);
62299a2dd95SBruce Richardson 
6235dff9a72SAnatoly Burakov 	/* check if other queues are stopped as well */
6245dff9a72SAnatoly Burakov 	ret = cfg_queues_stopped(lcore_cfg);
6255dff9a72SAnatoly Burakov 	if (ret != 1) {
6265dff9a72SAnatoly Burakov 		/* error means invalid queue, 0 means queue wasn't stopped */
6275dff9a72SAnatoly Burakov 		return ret < 0 ? -EINVAL : -EBUSY;
6285dff9a72SAnatoly Burakov 	}
6295dff9a72SAnatoly Burakov 
6305dff9a72SAnatoly Burakov 	if (lcore_cfg->pwr_mgmt_state != PMD_MGMT_ENABLED)
63199a2dd95SBruce Richardson 		return -EINVAL;
63299a2dd95SBruce Richardson 
6335dff9a72SAnatoly Burakov 	/*
6345dff9a72SAnatoly Burakov 	 * There is no good/easy way to do this without race conditions, so we
6355dff9a72SAnatoly Burakov 	 * are just going to throw our hands in the air and hope that the user
6365dff9a72SAnatoly Burakov 	 * has read the documentation and has ensured that ports are stopped at
6375dff9a72SAnatoly Burakov 	 * the time we enter the API functions.
6385dff9a72SAnatoly Burakov 	 */
6395dff9a72SAnatoly Burakov 	queue_cfg = queue_list_take(lcore_cfg, &qdata);
6405dff9a72SAnatoly Burakov 	if (queue_cfg == NULL)
6415dff9a72SAnatoly Burakov 		return -ENOENT;
64299a2dd95SBruce Richardson 
6435dff9a72SAnatoly Burakov 	/* if we've removed all queues from the lists, set state to disabled */
6445dff9a72SAnatoly Burakov 	if (lcore_cfg->n_queues == 0)
6455dff9a72SAnatoly Burakov 		lcore_cfg->pwr_mgmt_state = PMD_MGMT_DISABLED;
6465dff9a72SAnatoly Burakov 
6475dff9a72SAnatoly Burakov 	switch (lcore_cfg->cb_mode) {
648209fd585SAnatoly Burakov 	case RTE_POWER_MGMT_TYPE_MONITOR: /* fall-through */
64999a2dd95SBruce Richardson 	case RTE_POWER_MGMT_TYPE_PAUSE:
6505dff9a72SAnatoly Burakov 		rte_eth_remove_rx_callback(port_id, queue_id, queue_cfg->cb);
65199a2dd95SBruce Richardson 		break;
65299a2dd95SBruce Richardson 	case RTE_POWER_MGMT_TYPE_SCALE:
6535dff9a72SAnatoly Burakov 		rte_eth_remove_rx_callback(port_id, queue_id, queue_cfg->cb);
654565d0122SAnatoly Burakov 		/* disable power library on this lcore if this was last queue */
655565d0122SAnatoly Burakov 		if (lcore_cfg->pwr_mgmt_state == PMD_MGMT_DISABLED) {
656565d0122SAnatoly Burakov 			rte_power_freq_max(lcore_id);
65799a2dd95SBruce Richardson 			rte_power_exit(lcore_id);
658565d0122SAnatoly Burakov 		}
65999a2dd95SBruce Richardson 		break;
66099a2dd95SBruce Richardson 	}
66199a2dd95SBruce Richardson 	/*
662209fd585SAnatoly Burakov 	 * the API doc mandates that the user stops all processing on affected
663209fd585SAnatoly Burakov 	 * ports before calling any of these API's, so we can assume that the
664209fd585SAnatoly Burakov 	 * callbacks can be freed. we're intentionally casting away const-ness.
66599a2dd95SBruce Richardson 	 */
66674efd38bSStephen Hemminger 	rte_free((void *)(uintptr_t)queue_cfg->cb);
6675dff9a72SAnatoly Burakov 	free(queue_cfg);
66899a2dd95SBruce Richardson 
66999a2dd95SBruce Richardson 	return 0;
67099a2dd95SBruce Richardson }
6715dff9a72SAnatoly Burakov 
6729e9e945bSKevin Laatz void
6739e9e945bSKevin Laatz rte_power_pmd_mgmt_set_emptypoll_max(unsigned int max)
6749e9e945bSKevin Laatz {
6759e9e945bSKevin Laatz 	emptypoll_max = max;
6769e9e945bSKevin Laatz }
6779e9e945bSKevin Laatz 
6789e9e945bSKevin Laatz unsigned int
6799e9e945bSKevin Laatz rte_power_pmd_mgmt_get_emptypoll_max(void)
6809e9e945bSKevin Laatz {
6819e9e945bSKevin Laatz 	return emptypoll_max;
6829e9e945bSKevin Laatz }
6839e9e945bSKevin Laatz 
6844a8fbc28SKevin Laatz int
6854a8fbc28SKevin Laatz rte_power_pmd_mgmt_set_pause_duration(unsigned int duration)
6864a8fbc28SKevin Laatz {
6874a8fbc28SKevin Laatz 	if (duration == 0) {
688ae67895bSDavid Marchand 		POWER_LOG(ERR, "Pause duration must be greater than 0, value unchanged");
6894a8fbc28SKevin Laatz 		return -EINVAL;
6904a8fbc28SKevin Laatz 	}
6914a8fbc28SKevin Laatz 	pause_duration = duration;
6924a8fbc28SKevin Laatz 
6934a8fbc28SKevin Laatz 	return 0;
6944a8fbc28SKevin Laatz }
6954a8fbc28SKevin Laatz 
6964a8fbc28SKevin Laatz unsigned int
6974a8fbc28SKevin Laatz rte_power_pmd_mgmt_get_pause_duration(void)
6984a8fbc28SKevin Laatz {
6994a8fbc28SKevin Laatz 	return pause_duration;
7004a8fbc28SKevin Laatz }
7014a8fbc28SKevin Laatz 
70242651168SKevin Laatz int
70342651168SKevin Laatz rte_power_pmd_mgmt_set_scaling_freq_min(unsigned int lcore, unsigned int min)
70442651168SKevin Laatz {
70542651168SKevin Laatz 	if (lcore >= RTE_MAX_LCORE) {
706ae67895bSDavid Marchand 		POWER_LOG(ERR, "Invalid lcore ID: %u", lcore);
70742651168SKevin Laatz 		return -EINVAL;
70842651168SKevin Laatz 	}
70942651168SKevin Laatz 
71042651168SKevin Laatz 	if (min > scale_freq_max[lcore]) {
711ae67895bSDavid Marchand 		POWER_LOG(ERR, "Invalid min frequency: Cannot be greater than max frequency");
71242651168SKevin Laatz 		return -EINVAL;
71342651168SKevin Laatz 	}
71442651168SKevin Laatz 	scale_freq_min[lcore] = min;
71542651168SKevin Laatz 
71642651168SKevin Laatz 	return 0;
71742651168SKevin Laatz }
71842651168SKevin Laatz 
71942651168SKevin Laatz int
72042651168SKevin Laatz rte_power_pmd_mgmt_set_scaling_freq_max(unsigned int lcore, unsigned int max)
72142651168SKevin Laatz {
72242651168SKevin Laatz 	if (lcore >= RTE_MAX_LCORE) {
723ae67895bSDavid Marchand 		POWER_LOG(ERR, "Invalid lcore ID: %u", lcore);
72442651168SKevin Laatz 		return -EINVAL;
72542651168SKevin Laatz 	}
72642651168SKevin Laatz 
72742651168SKevin Laatz 	/* Zero means 'not set'. Use UINT32_MAX to enable RTE_MIN/MAX macro use when scaling. */
72842651168SKevin Laatz 	if (max == 0)
72942651168SKevin Laatz 		max = UINT32_MAX;
73042651168SKevin Laatz 	if (max < scale_freq_min[lcore]) {
731ae67895bSDavid Marchand 		POWER_LOG(ERR, "Invalid max frequency: Cannot be less than min frequency");
73242651168SKevin Laatz 		return -EINVAL;
73342651168SKevin Laatz 	}
73442651168SKevin Laatz 
73542651168SKevin Laatz 	scale_freq_max[lcore] = max;
73642651168SKevin Laatz 
73742651168SKevin Laatz 	return 0;
73842651168SKevin Laatz }
73942651168SKevin Laatz 
74042651168SKevin Laatz int
74142651168SKevin Laatz rte_power_pmd_mgmt_get_scaling_freq_min(unsigned int lcore)
74242651168SKevin Laatz {
74342651168SKevin Laatz 	if (lcore >= RTE_MAX_LCORE) {
744ae67895bSDavid Marchand 		POWER_LOG(ERR, "Invalid lcore ID: %u", lcore);
74542651168SKevin Laatz 		return -EINVAL;
74642651168SKevin Laatz 	}
74742651168SKevin Laatz 
74842651168SKevin Laatz 	if (scale_freq_max[lcore] == 0)
749ae67895bSDavid Marchand 		POWER_LOG(DEBUG, "Scaling freq min config not set. Using sysfs min freq.");
75042651168SKevin Laatz 
75142651168SKevin Laatz 	return scale_freq_min[lcore];
75242651168SKevin Laatz }
75342651168SKevin Laatz 
75442651168SKevin Laatz int
75542651168SKevin Laatz rte_power_pmd_mgmt_get_scaling_freq_max(unsigned int lcore)
75642651168SKevin Laatz {
75742651168SKevin Laatz 	if (lcore >= RTE_MAX_LCORE) {
758ae67895bSDavid Marchand 		POWER_LOG(ERR, "Invalid lcore ID: %u", lcore);
75942651168SKevin Laatz 		return -EINVAL;
76042651168SKevin Laatz 	}
76142651168SKevin Laatz 
76242651168SKevin Laatz 	if (scale_freq_max[lcore] == UINT32_MAX) {
763ae67895bSDavid Marchand 		POWER_LOG(DEBUG, "Scaling freq max config not set. Using sysfs max freq.");
76442651168SKevin Laatz 		return 0;
76542651168SKevin Laatz 	}
76642651168SKevin Laatz 
76742651168SKevin Laatz 	return scale_freq_max[lcore];
76842651168SKevin Laatz }
76942651168SKevin Laatz 
7705dff9a72SAnatoly Burakov RTE_INIT(rte_power_ethdev_pmgmt_init) {
771*13064331SMattias Rönnblom 	unsigned int lcore_id;
772*13064331SMattias Rönnblom 	struct pmd_core_cfg *lcore_cfg;
773*13064331SMattias Rönnblom 	int i;
774*13064331SMattias Rönnblom 
775*13064331SMattias Rönnblom 	RTE_LCORE_VAR_ALLOC(lcore_cfgs);
7765dff9a72SAnatoly Burakov 
7775dff9a72SAnatoly Burakov 	/* initialize all tailqs */
778*13064331SMattias Rönnblom 	RTE_LCORE_VAR_FOREACH(lcore_id, lcore_cfg, lcore_cfgs)
779*13064331SMattias Rönnblom 		TAILQ_INIT(&lcore_cfg->head);
7809e9e945bSKevin Laatz 
7819e9e945bSKevin Laatz 	/* initialize config defaults */
7829e9e945bSKevin Laatz 	emptypoll_max = 512;
7834a8fbc28SKevin Laatz 	pause_duration = 1;
78442651168SKevin Laatz 	/* scaling defaults out of range to ensure not used unless set by user or app */
785*13064331SMattias Rönnblom 	for (i = 0; i < RTE_MAX_LCORE; i++) {
786*13064331SMattias Rönnblom 		scale_freq_min[i] = 0;
787*13064331SMattias Rönnblom 		scale_freq_max[i] = UINT32_MAX;
78842651168SKevin Laatz 	}
7895dff9a72SAnatoly Burakov }
790