xref: /dpdk/drivers/net/mlx5/mlx5_hws_cnt.h (revision e77506397fc8005c5129e22e9e2d15d5876790fd)
14d368e1dSXiaoyu Min /* SPDX-License-Identifier: BSD-3-Clause
24d368e1dSXiaoyu Min  * Copyright 2022 Mellanox Technologies, Ltd
34d368e1dSXiaoyu Min  */
44d368e1dSXiaoyu Min 
54d368e1dSXiaoyu Min #ifndef _MLX5_HWS_CNT_H_
64d368e1dSXiaoyu Min #define _MLX5_HWS_CNT_H_
74d368e1dSXiaoyu Min 
84d368e1dSXiaoyu Min #include <rte_ring.h>
94d368e1dSXiaoyu Min #include "mlx5_utils.h"
104d368e1dSXiaoyu Min #include "mlx5_flow.h"
114d368e1dSXiaoyu Min 
124d368e1dSXiaoyu Min /*
1304a4de75SMichael Baum  * HWS COUNTER ID's layout
144d368e1dSXiaoyu Min  *       3                   2                   1                   0
154d368e1dSXiaoyu Min  *     1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
164d368e1dSXiaoyu Min  *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
174d368e1dSXiaoyu Min  *    |  T  |     | D |                                               |
184d368e1dSXiaoyu Min  *    ~  Y  |     | C |                    IDX                        ~
194d368e1dSXiaoyu Min  *    |  P  |     | S |                                               |
204d368e1dSXiaoyu Min  *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
214d368e1dSXiaoyu Min  *
2204a4de75SMichael Baum  *    Bit 31:29 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10
234d368e1dSXiaoyu Min  *    Bit 25:24 = DCS index
244d368e1dSXiaoyu Min  *    Bit 23:00 = IDX in this counter belonged DCS bulk.
254d368e1dSXiaoyu Min  */
264d368e1dSXiaoyu Min 
274d368e1dSXiaoyu Min #define MLX5_HWS_CNT_DCS_IDX_OFFSET 24
284d368e1dSXiaoyu Min #define MLX5_HWS_CNT_DCS_IDX_MASK 0x3
294d368e1dSXiaoyu Min #define MLX5_HWS_CNT_IDX_MASK ((1UL << MLX5_HWS_CNT_DCS_IDX_OFFSET) - 1)
304d368e1dSXiaoyu Min 
3104a4de75SMichael Baum #define MLX5_HWS_AGE_IDX_MASK (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1)
3204a4de75SMichael Baum 
334d368e1dSXiaoyu Min struct mlx5_hws_cnt_dcs {
344d368e1dSXiaoyu Min 	void *dr_action;
354d368e1dSXiaoyu Min 	uint32_t batch_sz;
364d368e1dSXiaoyu Min 	uint32_t iidx; /* internal index of first counter in this bulk. */
374d368e1dSXiaoyu Min 	struct mlx5_devx_obj *obj;
384d368e1dSXiaoyu Min };
394d368e1dSXiaoyu Min 
404d368e1dSXiaoyu Min struct mlx5_hws_cnt_dcs_mng {
414d368e1dSXiaoyu Min 	uint32_t batch_total;
424d368e1dSXiaoyu Min 	struct mlx5_hws_cnt_dcs dcs[MLX5_HWS_CNT_DCS_NUM];
434d368e1dSXiaoyu Min };
444d368e1dSXiaoyu Min 
454d368e1dSXiaoyu Min struct mlx5_hws_cnt {
464d368e1dSXiaoyu Min 	struct flow_counter_stats reset;
4704a4de75SMichael Baum 	bool in_used; /* Indicator whether this counter in used or in pool. */
484d368e1dSXiaoyu Min 	union {
4904a4de75SMichael Baum 		struct {
504d368e1dSXiaoyu Min 			uint32_t share:1;
514d368e1dSXiaoyu Min 			/*
5204a4de75SMichael Baum 			 * share will be set to 1 when this counter is used as
5304a4de75SMichael Baum 			 * indirect action.
544d368e1dSXiaoyu Min 			 */
5504a4de75SMichael Baum 			uint32_t age_idx:24;
5604a4de75SMichael Baum 			/*
5704a4de75SMichael Baum 			 * When this counter uses for aging, it save the index
5804a4de75SMichael Baum 			 * of AGE parameter. For pure counter (without aging)
5904a4de75SMichael Baum 			 * this index is zero.
6004a4de75SMichael Baum 			 */
6104a4de75SMichael Baum 		};
6204a4de75SMichael Baum 		/* This struct is only meaningful when user own this counter. */
634d368e1dSXiaoyu Min 		uint32_t query_gen_when_free;
644d368e1dSXiaoyu Min 		/*
654d368e1dSXiaoyu Min 		 * When PMD own this counter (user put back counter to PMD
664d368e1dSXiaoyu Min 		 * counter pool, i.e), this field recorded value of counter
674d368e1dSXiaoyu Min 		 * pools query generation at time user release the counter.
684d368e1dSXiaoyu Min 		 */
694d368e1dSXiaoyu Min 	};
704d368e1dSXiaoyu Min };
714d368e1dSXiaoyu Min 
724d368e1dSXiaoyu Min struct mlx5_hws_cnt_raw_data_mng {
734d368e1dSXiaoyu Min 	struct flow_counter_stats *raw;
744d368e1dSXiaoyu Min 	struct mlx5_pmd_mr mr;
754d368e1dSXiaoyu Min };
764d368e1dSXiaoyu Min 
774d368e1dSXiaoyu Min struct mlx5_hws_cache_param {
784d368e1dSXiaoyu Min 	uint32_t size;
794d368e1dSXiaoyu Min 	uint32_t q_num;
804d368e1dSXiaoyu Min 	uint32_t fetch_sz;
814d368e1dSXiaoyu Min 	uint32_t threshold;
824d368e1dSXiaoyu Min 	uint32_t preload_sz;
834d368e1dSXiaoyu Min };
844d368e1dSXiaoyu Min 
854d368e1dSXiaoyu Min struct mlx5_hws_cnt_pool_cfg {
864d368e1dSXiaoyu Min 	char *name;
874d368e1dSXiaoyu Min 	uint32_t request_num;
884d368e1dSXiaoyu Min 	uint32_t alloc_factor;
8913ea6bdcSViacheslav Ovsiienko 	struct mlx5_hws_cnt_pool *host_cpool;
904d368e1dSXiaoyu Min };
914d368e1dSXiaoyu Min 
924d368e1dSXiaoyu Min struct mlx5_hws_cnt_pool_caches {
934d368e1dSXiaoyu Min 	uint32_t fetch_sz;
944d368e1dSXiaoyu Min 	uint32_t threshold;
954d368e1dSXiaoyu Min 	uint32_t preload_sz;
964d368e1dSXiaoyu Min 	uint32_t q_num;
974d368e1dSXiaoyu Min 	struct rte_ring *qcache[];
984d368e1dSXiaoyu Min };
994d368e1dSXiaoyu Min 
10027595cd8STyler Retzlaff struct __rte_cache_aligned mlx5_hws_cnt_pool {
1016ac2104aSSuanming Mou 	LIST_ENTRY(mlx5_hws_cnt_pool) next;
10227595cd8STyler Retzlaff 	alignas(RTE_CACHE_LINE_SIZE) struct mlx5_hws_cnt_pool_cfg cfg;
10327595cd8STyler Retzlaff 	alignas(RTE_CACHE_LINE_SIZE) struct mlx5_hws_cnt_dcs_mng dcs_mng;
104e12a0166STyler Retzlaff 	alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t) query_gen;
1054d368e1dSXiaoyu Min 	struct mlx5_hws_cnt *pool;
1064d368e1dSXiaoyu Min 	struct mlx5_hws_cnt_raw_data_mng *raw_mng;
1074d368e1dSXiaoyu Min 	struct rte_ring *reuse_list;
1084d368e1dSXiaoyu Min 	struct rte_ring *free_list;
1094d368e1dSXiaoyu Min 	struct rte_ring *wait_reset_list;
1104d368e1dSXiaoyu Min 	struct mlx5_hws_cnt_pool_caches *cache;
11104a4de75SMichael Baum 	uint64_t time_of_last_age_check;
1126ac2104aSSuanming Mou 	struct mlx5_priv *priv;
11327595cd8STyler Retzlaff };
1144d368e1dSXiaoyu Min 
11504a4de75SMichael Baum /* HWS AGE status. */
11604a4de75SMichael Baum enum {
11704a4de75SMichael Baum 	HWS_AGE_FREE, /* Initialized state. */
11804a4de75SMichael Baum 	HWS_AGE_CANDIDATE, /* AGE assigned to flows. */
11904a4de75SMichael Baum 	HWS_AGE_CANDIDATE_INSIDE_RING,
12004a4de75SMichael Baum 	/*
12104a4de75SMichael Baum 	 * AGE assigned to flows but it still in ring. It was aged-out but the
122d37435dcSMichael Baum 	 * timeout was changed, so it in ring but still candidate.
12304a4de75SMichael Baum 	 */
12404a4de75SMichael Baum 	HWS_AGE_AGED_OUT_REPORTED,
12504a4de75SMichael Baum 	/*
12604a4de75SMichael Baum 	 * Aged-out, reported by rte_flow_get_q_aged_flows and wait for destroy.
12704a4de75SMichael Baum 	 */
12804a4de75SMichael Baum 	HWS_AGE_AGED_OUT_NOT_REPORTED,
12904a4de75SMichael Baum 	/*
13004a4de75SMichael Baum 	 * Aged-out, inside the aged-out ring.
13104a4de75SMichael Baum 	 * wait for rte_flow_get_q_aged_flows and destroy.
13204a4de75SMichael Baum 	 */
13304a4de75SMichael Baum };
13404a4de75SMichael Baum 
13504a4de75SMichael Baum /* HWS counter age parameter. */
136*e7750639SAndre Muezerie struct __rte_cache_aligned __rte_packed_begin mlx5_hws_age_param {
137e12a0166STyler Retzlaff 	RTE_ATOMIC(uint32_t) timeout; /* Aging timeout in seconds (atomically accessed). */
138e12a0166STyler Retzlaff 	RTE_ATOMIC(uint32_t) sec_since_last_hit;
13904a4de75SMichael Baum 	/* Time in seconds since last hit (atomically accessed). */
140e12a0166STyler Retzlaff 	RTE_ATOMIC(uint16_t) state; /* AGE state (atomically accessed). */
14104a4de75SMichael Baum 	uint64_t accumulator_last_hits;
14204a4de75SMichael Baum 	/* Last total value of hits for comparing. */
14304a4de75SMichael Baum 	uint64_t accumulator_hits;
14404a4de75SMichael Baum 	/* Accumulator for hits coming from several counters. */
14504a4de75SMichael Baum 	uint32_t accumulator_cnt;
14604a4de75SMichael Baum 	/* Number counters which already updated the accumulator in this sec. */
14704a4de75SMichael Baum 	uint32_t nb_cnts; /* Number counters used by this AGE. */
14804a4de75SMichael Baum 	uint32_t queue_id; /* Queue id of the counter. */
14904a4de75SMichael Baum 	cnt_id_t own_cnt_index;
15004a4de75SMichael Baum 	/* Counter action created specifically for this AGE action. */
15104a4de75SMichael Baum 	void *context; /* Flow AGE context. */
152*e7750639SAndre Muezerie } __rte_packed_end;
15304a4de75SMichael Baum 
15413ea6bdcSViacheslav Ovsiienko 
15513ea6bdcSViacheslav Ovsiienko /**
15613ea6bdcSViacheslav Ovsiienko  * Return the actual counter pool should be used in cross vHCA sharing mode.
15713ea6bdcSViacheslav Ovsiienko  * as index of raw/cnt pool.
15813ea6bdcSViacheslav Ovsiienko  *
15913ea6bdcSViacheslav Ovsiienko  * @param cnt_id
16013ea6bdcSViacheslav Ovsiienko  *   The external counter id
16113ea6bdcSViacheslav Ovsiienko  * @return
16213ea6bdcSViacheslav Ovsiienko  *   Internal index
16313ea6bdcSViacheslav Ovsiienko  */
1643ad9de1cSMaxime Coquelin static __rte_always_inline struct mlx5_hws_cnt_pool *
16513ea6bdcSViacheslav Ovsiienko mlx5_hws_cnt_host_pool(struct mlx5_hws_cnt_pool *cpool)
16613ea6bdcSViacheslav Ovsiienko {
16713ea6bdcSViacheslav Ovsiienko 	return cpool->cfg.host_cpool ? cpool->cfg.host_cpool : cpool;
16813ea6bdcSViacheslav Ovsiienko }
16913ea6bdcSViacheslav Ovsiienko 
1704d368e1dSXiaoyu Min /**
1714d368e1dSXiaoyu Min  * Translate counter id into internal index (start from 0), which can be used
1724d368e1dSXiaoyu Min  * as index of raw/cnt pool.
1734d368e1dSXiaoyu Min  *
1744d368e1dSXiaoyu Min  * @param cnt_id
1754d368e1dSXiaoyu Min  *   The external counter id
1764d368e1dSXiaoyu Min  * @return
1774d368e1dSXiaoyu Min  *   Internal index
1784d368e1dSXiaoyu Min  */
17904a4de75SMichael Baum static __rte_always_inline uint32_t
1804d368e1dSXiaoyu Min mlx5_hws_cnt_iidx(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
1814d368e1dSXiaoyu Min {
18213ea6bdcSViacheslav Ovsiienko 	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
1834d368e1dSXiaoyu Min 	uint8_t dcs_idx = cnt_id >> MLX5_HWS_CNT_DCS_IDX_OFFSET;
1844d368e1dSXiaoyu Min 	uint32_t offset = cnt_id & MLX5_HWS_CNT_IDX_MASK;
1854d368e1dSXiaoyu Min 
1864d368e1dSXiaoyu Min 	dcs_idx &= MLX5_HWS_CNT_DCS_IDX_MASK;
18713ea6bdcSViacheslav Ovsiienko 	return (hpool->dcs_mng.dcs[dcs_idx].iidx + offset);
1884d368e1dSXiaoyu Min }
1894d368e1dSXiaoyu Min 
1904d368e1dSXiaoyu Min /**
1914d368e1dSXiaoyu Min  * Check if it's valid counter id.
1924d368e1dSXiaoyu Min  */
1934d368e1dSXiaoyu Min static __rte_always_inline bool
1944d368e1dSXiaoyu Min mlx5_hws_cnt_id_valid(cnt_id_t cnt_id)
1954d368e1dSXiaoyu Min {
1964d368e1dSXiaoyu Min 	return (cnt_id >> MLX5_INDIRECT_ACTION_TYPE_OFFSET) ==
1974d368e1dSXiaoyu Min 		MLX5_INDIRECT_ACTION_TYPE_COUNT ? true : false;
1984d368e1dSXiaoyu Min }
1994d368e1dSXiaoyu Min 
2004d368e1dSXiaoyu Min /**
2014d368e1dSXiaoyu Min  * Generate Counter id from internal index.
2024d368e1dSXiaoyu Min  *
2034d368e1dSXiaoyu Min  * @param cpool
2044d368e1dSXiaoyu Min  *   The pointer to counter pool
205d37435dcSMichael Baum  * @param iidx
2064d368e1dSXiaoyu Min  *   The internal counter index.
2074d368e1dSXiaoyu Min  *
2084d368e1dSXiaoyu Min  * @return
2094d368e1dSXiaoyu Min  *   Counter id
2104d368e1dSXiaoyu Min  */
2114d368e1dSXiaoyu Min static __rte_always_inline cnt_id_t
21204a4de75SMichael Baum mlx5_hws_cnt_id_gen(struct mlx5_hws_cnt_pool *cpool, uint32_t iidx)
2134d368e1dSXiaoyu Min {
21413ea6bdcSViacheslav Ovsiienko 	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
21513ea6bdcSViacheslav Ovsiienko 	struct mlx5_hws_cnt_dcs_mng *dcs_mng = &hpool->dcs_mng;
2164d368e1dSXiaoyu Min 	uint32_t idx;
2174d368e1dSXiaoyu Min 	uint32_t offset;
2184d368e1dSXiaoyu Min 	cnt_id_t cnt_id;
2194d368e1dSXiaoyu Min 
2204d368e1dSXiaoyu Min 	for (idx = 0, offset = iidx; idx < dcs_mng->batch_total; idx++) {
2214d368e1dSXiaoyu Min 		if (dcs_mng->dcs[idx].batch_sz <= offset)
2224d368e1dSXiaoyu Min 			offset -= dcs_mng->dcs[idx].batch_sz;
2234d368e1dSXiaoyu Min 		else
2244d368e1dSXiaoyu Min 			break;
2254d368e1dSXiaoyu Min 	}
2264d368e1dSXiaoyu Min 	cnt_id = offset;
2274d368e1dSXiaoyu Min 	cnt_id |= (idx << MLX5_HWS_CNT_DCS_IDX_OFFSET);
2284d368e1dSXiaoyu Min 	return (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
2294d368e1dSXiaoyu Min 			MLX5_INDIRECT_ACTION_TYPE_OFFSET) | cnt_id;
2304d368e1dSXiaoyu Min }
2314d368e1dSXiaoyu Min 
2324d368e1dSXiaoyu Min static __rte_always_inline void
2334d368e1dSXiaoyu Min __hws_cnt_query_raw(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id,
2344d368e1dSXiaoyu Min 		uint64_t *raw_pkts, uint64_t *raw_bytes)
2354d368e1dSXiaoyu Min {
23613ea6bdcSViacheslav Ovsiienko 	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
23713ea6bdcSViacheslav Ovsiienko 	struct mlx5_hws_cnt_raw_data_mng *raw_mng = hpool->raw_mng;
2384d368e1dSXiaoyu Min 	struct flow_counter_stats s[2];
2394d368e1dSXiaoyu Min 	uint8_t i = 0x1;
2404d368e1dSXiaoyu Min 	size_t stat_sz = sizeof(s[0]);
2414d368e1dSXiaoyu Min 	uint32_t iidx = mlx5_hws_cnt_iidx(cpool, cnt_id);
2424d368e1dSXiaoyu Min 
2434d368e1dSXiaoyu Min 	memcpy(&s[0], &raw_mng->raw[iidx], stat_sz);
2444d368e1dSXiaoyu Min 	do {
2454d368e1dSXiaoyu Min 		memcpy(&s[i & 1], &raw_mng->raw[iidx], stat_sz);
2464d368e1dSXiaoyu Min 		if (memcmp(&s[0], &s[1], stat_sz) == 0) {
2474d368e1dSXiaoyu Min 			*raw_pkts = rte_be_to_cpu_64(s[0].hits);
2484d368e1dSXiaoyu Min 			*raw_bytes = rte_be_to_cpu_64(s[0].bytes);
2494d368e1dSXiaoyu Min 			break;
2504d368e1dSXiaoyu Min 		}
2514d368e1dSXiaoyu Min 		i = ~i;
2524d368e1dSXiaoyu Min 	} while (1);
2534d368e1dSXiaoyu Min }
2544d368e1dSXiaoyu Min 
2554d368e1dSXiaoyu Min /**
256d37435dcSMichael Baum  * Copy elements from one zero-copy ring to zero-copy ring in place.
2574d368e1dSXiaoyu Min  *
2584d368e1dSXiaoyu Min  * The input is a rte ring zero-copy data struct, which has two pointer.
2594d368e1dSXiaoyu Min  * in case of the wrapper happened, the ptr2 will be meaningful.
2604d368e1dSXiaoyu Min  *
261d37435dcSMichael Baum  * So this routine needs to consider the situation that the address given by
2624d368e1dSXiaoyu Min  * source and destination could be both wrapped.
2634d368e1dSXiaoyu Min  * First, calculate the first number of element needs to be copied until wrapped
2644d368e1dSXiaoyu Min  * address, which could be in source or destination.
2654d368e1dSXiaoyu Min  * Second, copy left number of element until second wrapped address. If in first
2664d368e1dSXiaoyu Min  * step the wrapped address is source, then this time it must be in destination.
267d37435dcSMichael Baum  * and vice-versa.
268d37435dcSMichael Baum  * Third, copy all left number of element.
2694d368e1dSXiaoyu Min  *
2704d368e1dSXiaoyu Min  * In worst case, we need copy three pieces of continuous memory.
2714d368e1dSXiaoyu Min  *
2724d368e1dSXiaoyu Min  * @param zcdd
273d37435dcSMichael Baum  *   A pointer to zero-copy data of destination ring.
2744d368e1dSXiaoyu Min  * @param zcds
2754d368e1dSXiaoyu Min  *   A pointer to zero-copy data of source ring.
2764d368e1dSXiaoyu Min  * @param n
277d37435dcSMichael Baum  *   Number of elements to copy.
2784d368e1dSXiaoyu Min  */
2794d368e1dSXiaoyu Min static __rte_always_inline void
2804d368e1dSXiaoyu Min __hws_cnt_r2rcpy(struct rte_ring_zc_data *zcdd, struct rte_ring_zc_data *zcds,
2814d368e1dSXiaoyu Min 		 unsigned int n)
2824d368e1dSXiaoyu Min {
2834d368e1dSXiaoyu Min 	unsigned int n1, n2, n3;
2844d368e1dSXiaoyu Min 	void *s1, *s2, *s3;
2854d368e1dSXiaoyu Min 	void *d1, *d2, *d3;
2864d368e1dSXiaoyu Min 
2874d368e1dSXiaoyu Min 	s1 = zcds->ptr1;
2884d368e1dSXiaoyu Min 	d1 = zcdd->ptr1;
2894d368e1dSXiaoyu Min 	n1 = RTE_MIN(zcdd->n1, zcds->n1);
2904d368e1dSXiaoyu Min 	if (zcds->n1 > n1) {
2914d368e1dSXiaoyu Min 		n2 = zcds->n1 - n1;
2924d368e1dSXiaoyu Min 		s2 = RTE_PTR_ADD(zcds->ptr1, sizeof(cnt_id_t) * n1);
2934d368e1dSXiaoyu Min 		d2 = zcdd->ptr2;
2944d368e1dSXiaoyu Min 		n3 = n - n1 - n2;
2954d368e1dSXiaoyu Min 		s3 = zcds->ptr2;
2964d368e1dSXiaoyu Min 		d3 = RTE_PTR_ADD(zcdd->ptr2, sizeof(cnt_id_t) * n2);
2974d368e1dSXiaoyu Min 	} else {
2984d368e1dSXiaoyu Min 		n2 = zcdd->n1 - n1;
2994d368e1dSXiaoyu Min 		s2 = zcds->ptr2;
3004d368e1dSXiaoyu Min 		d2 = RTE_PTR_ADD(zcdd->ptr1, sizeof(cnt_id_t) * n1);
3014d368e1dSXiaoyu Min 		n3 = n - n1 - n2;
3024d368e1dSXiaoyu Min 		s3 = RTE_PTR_ADD(zcds->ptr2, sizeof(cnt_id_t) * n2);
3034d368e1dSXiaoyu Min 		d3 = zcdd->ptr2;
3044d368e1dSXiaoyu Min 	}
3054d368e1dSXiaoyu Min 	memcpy(d1, s1, n1 * sizeof(cnt_id_t));
3062fd25a6dSMichael Baum 	if (n2 != 0)
3074d368e1dSXiaoyu Min 		memcpy(d2, s2, n2 * sizeof(cnt_id_t));
3084d368e1dSXiaoyu Min 	if (n3 != 0)
3094d368e1dSXiaoyu Min 		memcpy(d3, s3, n3 * sizeof(cnt_id_t));
3104d368e1dSXiaoyu Min }
3114d368e1dSXiaoyu Min 
3124d368e1dSXiaoyu Min static __rte_always_inline int
3134d368e1dSXiaoyu Min mlx5_hws_cnt_pool_cache_flush(struct mlx5_hws_cnt_pool *cpool,
3144d368e1dSXiaoyu Min 			      uint32_t queue_id)
3154d368e1dSXiaoyu Min {
316d37435dcSMichael Baum 	unsigned int ret __rte_unused;
3174d368e1dSXiaoyu Min 	struct rte_ring_zc_data zcdr = {0};
3184d368e1dSXiaoyu Min 	struct rte_ring_zc_data zcdc = {0};
3194d368e1dSXiaoyu Min 	struct rte_ring *reset_list = NULL;
3204d368e1dSXiaoyu Min 	struct rte_ring *qcache = cpool->cache->qcache[queue_id];
321d37435dcSMichael Baum 	uint32_t ring_size = rte_ring_count(qcache);
3224d368e1dSXiaoyu Min 
323d37435dcSMichael Baum 	ret = rte_ring_dequeue_zc_burst_elem_start(qcache, sizeof(cnt_id_t),
324d37435dcSMichael Baum 						   ring_size, &zcdc, NULL);
325d37435dcSMichael Baum 	MLX5_ASSERT(ret == ring_size);
3264d368e1dSXiaoyu Min 	reset_list = cpool->wait_reset_list;
327d37435dcSMichael Baum 	ret = rte_ring_enqueue_zc_burst_elem_start(reset_list, sizeof(cnt_id_t),
328d37435dcSMichael Baum 						   ring_size, &zcdr, NULL);
329d37435dcSMichael Baum 	MLX5_ASSERT(ret == ring_size);
330d37435dcSMichael Baum 	__hws_cnt_r2rcpy(&zcdr, &zcdc, ring_size);
331d37435dcSMichael Baum 	rte_ring_enqueue_zc_elem_finish(reset_list, ring_size);
332d37435dcSMichael Baum 	rte_ring_dequeue_zc_elem_finish(qcache, ring_size);
3334d368e1dSXiaoyu Min 	return 0;
3344d368e1dSXiaoyu Min }
3354d368e1dSXiaoyu Min 
3364d368e1dSXiaoyu Min static __rte_always_inline int
3374d368e1dSXiaoyu Min mlx5_hws_cnt_pool_cache_fetch(struct mlx5_hws_cnt_pool *cpool,
3384d368e1dSXiaoyu Min 			      uint32_t queue_id)
3394d368e1dSXiaoyu Min {
3404d368e1dSXiaoyu Min 	struct rte_ring *qcache = cpool->cache->qcache[queue_id];
3414d368e1dSXiaoyu Min 	struct rte_ring *free_list = NULL;
3424d368e1dSXiaoyu Min 	struct rte_ring *reuse_list = NULL;
3434d368e1dSXiaoyu Min 	struct rte_ring *list = NULL;
3444d368e1dSXiaoyu Min 	struct rte_ring_zc_data zcdf = {0};
3454d368e1dSXiaoyu Min 	struct rte_ring_zc_data zcdc = {0};
3464d368e1dSXiaoyu Min 	struct rte_ring_zc_data zcdu = {0};
3474d368e1dSXiaoyu Min 	struct rte_ring_zc_data zcds = {0};
3484d368e1dSXiaoyu Min 	struct mlx5_hws_cnt_pool_caches *cache = cpool->cache;
349d37435dcSMichael Baum 	unsigned int ret, actual_fetch_size __rte_unused;
3504d368e1dSXiaoyu Min 
3514d368e1dSXiaoyu Min 	reuse_list = cpool->reuse_list;
3524d368e1dSXiaoyu Min 	ret = rte_ring_dequeue_zc_burst_elem_start(reuse_list,
3534d368e1dSXiaoyu Min 			sizeof(cnt_id_t), cache->fetch_sz, &zcdu, NULL);
3544d368e1dSXiaoyu Min 	zcds = zcdu;
3554d368e1dSXiaoyu Min 	list = reuse_list;
3564d368e1dSXiaoyu Min 	if (unlikely(ret == 0)) { /* no reuse counter. */
3574d368e1dSXiaoyu Min 		rte_ring_dequeue_zc_elem_finish(reuse_list, 0);
3584d368e1dSXiaoyu Min 		free_list = cpool->free_list;
3594d368e1dSXiaoyu Min 		ret = rte_ring_dequeue_zc_burst_elem_start(free_list,
360d37435dcSMichael Baum 							   sizeof(cnt_id_t),
361d37435dcSMichael Baum 							   cache->fetch_sz,
362d37435dcSMichael Baum 							   &zcdf, NULL);
3634d368e1dSXiaoyu Min 		zcds = zcdf;
3644d368e1dSXiaoyu Min 		list = free_list;
3654d368e1dSXiaoyu Min 		if (unlikely(ret == 0)) { /* no free counter. */
3664d368e1dSXiaoyu Min 			rte_ring_dequeue_zc_elem_finish(free_list, 0);
3674d368e1dSXiaoyu Min 			if (rte_ring_count(cpool->wait_reset_list))
3684d368e1dSXiaoyu Min 				return -EAGAIN;
3694d368e1dSXiaoyu Min 			return -ENOENT;
3704d368e1dSXiaoyu Min 		}
3714d368e1dSXiaoyu Min 	}
372d37435dcSMichael Baum 	actual_fetch_size = ret;
373d37435dcSMichael Baum 	ret = rte_ring_enqueue_zc_burst_elem_start(qcache, sizeof(cnt_id_t),
3744d368e1dSXiaoyu Min 						   ret, &zcdc, NULL);
375d37435dcSMichael Baum 	MLX5_ASSERT(ret == actual_fetch_size);
3764d368e1dSXiaoyu Min 	__hws_cnt_r2rcpy(&zcdc, &zcds, ret);
3774d368e1dSXiaoyu Min 	rte_ring_dequeue_zc_elem_finish(list, ret);
3784d368e1dSXiaoyu Min 	rte_ring_enqueue_zc_elem_finish(qcache, ret);
3794d368e1dSXiaoyu Min 	return 0;
3804d368e1dSXiaoyu Min }
3814d368e1dSXiaoyu Min 
3824d368e1dSXiaoyu Min static __rte_always_inline int
3834d368e1dSXiaoyu Min __mlx5_hws_cnt_pool_enqueue_revert(struct rte_ring *r, unsigned int n,
3844d368e1dSXiaoyu Min 		struct rte_ring_zc_data *zcd)
3854d368e1dSXiaoyu Min {
3864d368e1dSXiaoyu Min 	uint32_t current_head = 0;
3874d368e1dSXiaoyu Min 	uint32_t revert2head = 0;
3884d368e1dSXiaoyu Min 
3894d368e1dSXiaoyu Min 	MLX5_ASSERT(r->prod.sync_type == RTE_RING_SYNC_ST);
3904d368e1dSXiaoyu Min 	MLX5_ASSERT(r->cons.sync_type == RTE_RING_SYNC_ST);
39132faaf30STyler Retzlaff 	current_head = rte_atomic_load_explicit(&r->prod.head, rte_memory_order_relaxed);
3924d368e1dSXiaoyu Min 	MLX5_ASSERT(n <= r->capacity);
3934d368e1dSXiaoyu Min 	MLX5_ASSERT(n <= rte_ring_count(r));
3944d368e1dSXiaoyu Min 	revert2head = current_head - n;
3954d368e1dSXiaoyu Min 	r->prod.head = revert2head; /* This ring should be SP. */
3964d368e1dSXiaoyu Min 	__rte_ring_get_elem_addr(r, revert2head, sizeof(cnt_id_t), n,
3974d368e1dSXiaoyu Min 			&zcd->ptr1, &zcd->n1, &zcd->ptr2);
3984d368e1dSXiaoyu Min 	/* Update tail */
39932faaf30STyler Retzlaff 	rte_atomic_store_explicit(&r->prod.tail, revert2head, rte_memory_order_release);
4004d368e1dSXiaoyu Min 	return n;
4014d368e1dSXiaoyu Min }
4024d368e1dSXiaoyu Min 
4034d368e1dSXiaoyu Min /**
4044d368e1dSXiaoyu Min  * Put one counter back in the mempool.
4054d368e1dSXiaoyu Min  *
4064d368e1dSXiaoyu Min  * @param cpool
4074d368e1dSXiaoyu Min  *   A pointer to the counter pool structure.
408d37435dcSMichael Baum  * @param queue
409d37435dcSMichael Baum  *   A pointer to HWS queue. If null, it means put into common pool.
4104d368e1dSXiaoyu Min  * @param cnt_id
4114d368e1dSXiaoyu Min  *   A counter id to be added.
4124d368e1dSXiaoyu Min  */
413d37435dcSMichael Baum static __rte_always_inline void
414d37435dcSMichael Baum mlx5_hws_cnt_pool_put(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
415d37435dcSMichael Baum 		      cnt_id_t *cnt_id)
4164d368e1dSXiaoyu Min {
4174d368e1dSXiaoyu Min 	unsigned int ret = 0;
41813ea6bdcSViacheslav Ovsiienko 	struct mlx5_hws_cnt_pool *hpool;
4194d368e1dSXiaoyu Min 	struct rte_ring_zc_data zcdc = {0};
4204d368e1dSXiaoyu Min 	struct rte_ring_zc_data zcdr = {0};
4214d368e1dSXiaoyu Min 	struct rte_ring *qcache = NULL;
4224d368e1dSXiaoyu Min 	unsigned int wb_num = 0; /* cache write-back number. */
42304a4de75SMichael Baum 	uint32_t iidx;
4244d368e1dSXiaoyu Min 
42513ea6bdcSViacheslav Ovsiienko 	hpool = mlx5_hws_cnt_host_pool(cpool);
42613ea6bdcSViacheslav Ovsiienko 	iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
42713ea6bdcSViacheslav Ovsiienko 	hpool->pool[iidx].in_used = false;
42813ea6bdcSViacheslav Ovsiienko 	hpool->pool[iidx].query_gen_when_free =
429e12a0166STyler Retzlaff 		rte_atomic_load_explicit(&hpool->query_gen, rte_memory_order_relaxed);
43013ea6bdcSViacheslav Ovsiienko 	if (likely(queue != NULL) && cpool->cfg.host_cpool == NULL)
43113ea6bdcSViacheslav Ovsiienko 		qcache = hpool->cache->qcache[*queue];
4324d368e1dSXiaoyu Min 	if (unlikely(qcache == NULL)) {
43313ea6bdcSViacheslav Ovsiienko 		ret = rte_ring_enqueue_elem(hpool->wait_reset_list, cnt_id,
4344d368e1dSXiaoyu Min 				sizeof(cnt_id_t));
4354d368e1dSXiaoyu Min 		MLX5_ASSERT(ret == 0);
436d37435dcSMichael Baum 		return;
4374d368e1dSXiaoyu Min 	}
4384d368e1dSXiaoyu Min 	ret = rte_ring_enqueue_burst_elem(qcache, cnt_id, sizeof(cnt_id_t), 1,
4394d368e1dSXiaoyu Min 					  NULL);
4404d368e1dSXiaoyu Min 	if (unlikely(ret == 0)) { /* cache is full. */
441d37435dcSMichael Baum 		struct rte_ring *reset_list = cpool->wait_reset_list;
442d37435dcSMichael Baum 
4434d368e1dSXiaoyu Min 		wb_num = rte_ring_count(qcache) - cpool->cache->threshold;
4444d368e1dSXiaoyu Min 		MLX5_ASSERT(wb_num < rte_ring_count(qcache));
4454d368e1dSXiaoyu Min 		__mlx5_hws_cnt_pool_enqueue_revert(qcache, wb_num, &zcdc);
446d37435dcSMichael Baum 		ret = rte_ring_enqueue_zc_burst_elem_start(reset_list,
447d37435dcSMichael Baum 							   sizeof(cnt_id_t),
448d37435dcSMichael Baum 							   wb_num, &zcdr, NULL);
449d37435dcSMichael Baum 		MLX5_ASSERT(ret == wb_num);
450d37435dcSMichael Baum 		__hws_cnt_r2rcpy(&zcdr, &zcdc, ret);
451d37435dcSMichael Baum 		rte_ring_enqueue_zc_elem_finish(reset_list, ret);
4524d368e1dSXiaoyu Min 		/* write-back THIS counter too */
453d37435dcSMichael Baum 		ret = rte_ring_enqueue_burst_elem(reset_list, cnt_id,
454d37435dcSMichael Baum 						  sizeof(cnt_id_t), 1, NULL);
4554d368e1dSXiaoyu Min 	}
456d37435dcSMichael Baum 	MLX5_ASSERT(ret == 1);
4574d368e1dSXiaoyu Min }
4584d368e1dSXiaoyu Min 
4594d368e1dSXiaoyu Min /**
4604d368e1dSXiaoyu Min  * Get one counter from the pool.
4614d368e1dSXiaoyu Min  *
4624d368e1dSXiaoyu Min  * If @param queue is not null, objects will be retrieved first from queue's
4634d368e1dSXiaoyu Min  * cache, subsequently from the common pool. Note that it can return -ENOENT
4644d368e1dSXiaoyu Min  * when the local cache and common pool are empty, even if cache from other
4654d368e1dSXiaoyu Min  * queue are full.
4664d368e1dSXiaoyu Min  *
4674d368e1dSXiaoyu Min  * @param cntp
4684d368e1dSXiaoyu Min  *   A pointer to the counter pool structure.
4694d368e1dSXiaoyu Min  * @param queue
4704d368e1dSXiaoyu Min  *   A pointer to HWS queue. If null, it means fetch from common pool.
4714d368e1dSXiaoyu Min  * @param cnt_id
4724d368e1dSXiaoyu Min  *   A pointer to a cnt_id_t * pointer (counter id) that will be filled.
47304a4de75SMichael Baum  * @param age_idx
47404a4de75SMichael Baum  *   Index of AGE parameter using this counter, zero means there is no such AGE.
47504a4de75SMichael Baum  *
4764d368e1dSXiaoyu Min  * @return
4774d368e1dSXiaoyu Min  *   - 0: Success; objects taken.
4784d368e1dSXiaoyu Min  *   - -ENOENT: Not enough entries in the mempool; no object is retrieved.
4794d368e1dSXiaoyu Min  *   - -EAGAIN: counter is not ready; try again.
4804d368e1dSXiaoyu Min  */
4814d368e1dSXiaoyu Min static __rte_always_inline int
48204a4de75SMichael Baum mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
48304a4de75SMichael Baum 		      cnt_id_t *cnt_id, uint32_t age_idx)
4844d368e1dSXiaoyu Min {
4854d368e1dSXiaoyu Min 	unsigned int ret;
4864d368e1dSXiaoyu Min 	struct rte_ring_zc_data zcdc = {0};
4874d368e1dSXiaoyu Min 	struct rte_ring *qcache = NULL;
48804a4de75SMichael Baum 	uint32_t iidx, query_gen = 0;
48904a4de75SMichael Baum 	cnt_id_t tmp_cid = 0;
4904d368e1dSXiaoyu Min 
49113ea6bdcSViacheslav Ovsiienko 	if (likely(queue != NULL && cpool->cfg.host_cpool == NULL))
4924d368e1dSXiaoyu Min 		qcache = cpool->cache->qcache[*queue];
4934d368e1dSXiaoyu Min 	if (unlikely(qcache == NULL)) {
49413ea6bdcSViacheslav Ovsiienko 		cpool = mlx5_hws_cnt_host_pool(cpool);
4954d368e1dSXiaoyu Min 		ret = rte_ring_dequeue_elem(cpool->reuse_list, &tmp_cid,
4964d368e1dSXiaoyu Min 				sizeof(cnt_id_t));
4974d368e1dSXiaoyu Min 		if (unlikely(ret != 0)) {
4984d368e1dSXiaoyu Min 			ret = rte_ring_dequeue_elem(cpool->free_list, &tmp_cid,
4994d368e1dSXiaoyu Min 					sizeof(cnt_id_t));
5004d368e1dSXiaoyu Min 			if (unlikely(ret != 0)) {
5014d368e1dSXiaoyu Min 				if (rte_ring_count(cpool->wait_reset_list))
5024d368e1dSXiaoyu Min 					return -EAGAIN;
5034d368e1dSXiaoyu Min 				return -ENOENT;
5044d368e1dSXiaoyu Min 			}
5054d368e1dSXiaoyu Min 		}
5064d368e1dSXiaoyu Min 		*cnt_id = tmp_cid;
5074d368e1dSXiaoyu Min 		iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
5084d368e1dSXiaoyu Min 		__hws_cnt_query_raw(cpool, *cnt_id,
5094d368e1dSXiaoyu Min 				    &cpool->pool[iidx].reset.hits,
5104d368e1dSXiaoyu Min 				    &cpool->pool[iidx].reset.bytes);
511f69eb503SDariusz Sosnowski 		cpool->pool[iidx].share = 0;
51277ca194bSMichael Baum 		MLX5_ASSERT(!cpool->pool[iidx].in_used);
51304a4de75SMichael Baum 		cpool->pool[iidx].in_used = true;
51404a4de75SMichael Baum 		cpool->pool[iidx].age_idx = age_idx;
5154d368e1dSXiaoyu Min 		return 0;
5164d368e1dSXiaoyu Min 	}
5174d368e1dSXiaoyu Min 	ret = rte_ring_dequeue_zc_burst_elem_start(qcache, sizeof(cnt_id_t), 1,
5184d368e1dSXiaoyu Min 						   &zcdc, NULL);
5194d368e1dSXiaoyu Min 	if (unlikely(ret == 0)) { /* local cache is empty. */
5204d368e1dSXiaoyu Min 		rte_ring_dequeue_zc_elem_finish(qcache, 0);
5214d368e1dSXiaoyu Min 		/* let's fetch from global free list. */
5224d368e1dSXiaoyu Min 		ret = mlx5_hws_cnt_pool_cache_fetch(cpool, *queue);
5234d368e1dSXiaoyu Min 		if (unlikely(ret != 0))
5244d368e1dSXiaoyu Min 			return ret;
525d37435dcSMichael Baum 		ret = rte_ring_dequeue_zc_burst_elem_start(qcache,
526d37435dcSMichael Baum 							   sizeof(cnt_id_t), 1,
527d37435dcSMichael Baum 							   &zcdc, NULL);
528d37435dcSMichael Baum 		MLX5_ASSERT(ret == 1);
5294d368e1dSXiaoyu Min 	}
5304d368e1dSXiaoyu Min 	/* get one from local cache. */
5314d368e1dSXiaoyu Min 	*cnt_id = (*(cnt_id_t *)zcdc.ptr1);
5324d368e1dSXiaoyu Min 	iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
5334d368e1dSXiaoyu Min 	query_gen = cpool->pool[iidx].query_gen_when_free;
5344d368e1dSXiaoyu Min 	if (cpool->query_gen == query_gen) { /* counter is waiting to reset. */
5354d368e1dSXiaoyu Min 		rte_ring_dequeue_zc_elem_finish(qcache, 0);
5364d368e1dSXiaoyu Min 		/* write-back counter to reset list. */
5374d368e1dSXiaoyu Min 		mlx5_hws_cnt_pool_cache_flush(cpool, *queue);
5384d368e1dSXiaoyu Min 		/* let's fetch from global free list. */
5394d368e1dSXiaoyu Min 		ret = mlx5_hws_cnt_pool_cache_fetch(cpool, *queue);
5404d368e1dSXiaoyu Min 		if (unlikely(ret != 0))
5414d368e1dSXiaoyu Min 			return ret;
542d37435dcSMichael Baum 		ret = rte_ring_dequeue_zc_burst_elem_start(qcache,
543d37435dcSMichael Baum 							   sizeof(cnt_id_t), 1,
544d37435dcSMichael Baum 							   &zcdc, NULL);
545d37435dcSMichael Baum 		MLX5_ASSERT(ret == 1);
5464d368e1dSXiaoyu Min 		*cnt_id = *(cnt_id_t *)zcdc.ptr1;
5475b21f925SMichael Baum 		iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
5484d368e1dSXiaoyu Min 	}
5494d368e1dSXiaoyu Min 	__hws_cnt_query_raw(cpool, *cnt_id, &cpool->pool[iidx].reset.hits,
5504d368e1dSXiaoyu Min 			    &cpool->pool[iidx].reset.bytes);
5514d368e1dSXiaoyu Min 	rte_ring_dequeue_zc_elem_finish(qcache, 1);
5524d368e1dSXiaoyu Min 	cpool->pool[iidx].share = 0;
55377ca194bSMichael Baum 	MLX5_ASSERT(!cpool->pool[iidx].in_used);
55404a4de75SMichael Baum 	cpool->pool[iidx].in_used = true;
55504a4de75SMichael Baum 	cpool->pool[iidx].age_idx = age_idx;
5564d368e1dSXiaoyu Min 	return 0;
5574d368e1dSXiaoyu Min }
5584d368e1dSXiaoyu Min 
55918f1de8cSDariusz Sosnowski /**
560d755221bSDariusz Sosnowski  * Decide if the given queue can be used to perform counter allocation/deallcation
561d755221bSDariusz Sosnowski  * based on counter configuration
56218f1de8cSDariusz Sosnowski  *
56318f1de8cSDariusz Sosnowski  * @param[in] priv
56418f1de8cSDariusz Sosnowski  *   Pointer to the port private data structure.
565d755221bSDariusz Sosnowski  * @param[in] queue
566d755221bSDariusz Sosnowski  *   Pointer to the queue index.
56718f1de8cSDariusz Sosnowski  *
56818f1de8cSDariusz Sosnowski  * @return
569d755221bSDariusz Sosnowski  *   @p queue if cache related to the queue can be used. NULL otherwise.
57018f1de8cSDariusz Sosnowski  */
571d755221bSDariusz Sosnowski static __rte_always_inline uint32_t *
572d755221bSDariusz Sosnowski mlx5_hws_cnt_get_queue(struct mlx5_priv *priv, uint32_t *queue)
57318f1de8cSDariusz Sosnowski {
574d755221bSDariusz Sosnowski 	if (priv && priv->hws_cpool) {
575d755221bSDariusz Sosnowski 		/* Do not use queue cache if counter pool is shared. */
576d755221bSDariusz Sosnowski 		if (priv->shared_refcnt || priv->hws_cpool->cfg.host_cpool != NULL)
577d755221bSDariusz Sosnowski 			return NULL;
578d755221bSDariusz Sosnowski 		/* Do not use queue cache if counter cache is disabled. */
579d755221bSDariusz Sosnowski 		if (priv->hws_cpool->cache == NULL)
580d755221bSDariusz Sosnowski 			return NULL;
581d755221bSDariusz Sosnowski 		return queue;
582d755221bSDariusz Sosnowski 	}
583d755221bSDariusz Sosnowski 	/* This case should not be reached if counter pool was successfully configured. */
584d755221bSDariusz Sosnowski 	MLX5_ASSERT(false);
585d755221bSDariusz Sosnowski 	return NULL;
58618f1de8cSDariusz Sosnowski }
58718f1de8cSDariusz Sosnowski 
5884d368e1dSXiaoyu Min static __rte_always_inline unsigned int
5894d368e1dSXiaoyu Min mlx5_hws_cnt_pool_get_size(struct mlx5_hws_cnt_pool *cpool)
5904d368e1dSXiaoyu Min {
59113ea6bdcSViacheslav Ovsiienko 	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
59213ea6bdcSViacheslav Ovsiienko 
59313ea6bdcSViacheslav Ovsiienko 	return rte_ring_get_capacity(hpool->free_list);
5944d368e1dSXiaoyu Min }
5954d368e1dSXiaoyu Min 
5964d368e1dSXiaoyu Min static __rte_always_inline int
5974d368e1dSXiaoyu Min mlx5_hws_cnt_pool_get_action_offset(struct mlx5_hws_cnt_pool *cpool,
5984d368e1dSXiaoyu Min 		cnt_id_t cnt_id, struct mlx5dr_action **action,
5994d368e1dSXiaoyu Min 		uint32_t *offset)
6004d368e1dSXiaoyu Min {
6014d368e1dSXiaoyu Min 	uint8_t idx = cnt_id >> MLX5_HWS_CNT_DCS_IDX_OFFSET;
6024d368e1dSXiaoyu Min 
6034d368e1dSXiaoyu Min 	idx &= MLX5_HWS_CNT_DCS_IDX_MASK;
6044d368e1dSXiaoyu Min 	*action = cpool->dcs_mng.dcs[idx].dr_action;
6054d368e1dSXiaoyu Min 	*offset = cnt_id & MLX5_HWS_CNT_IDX_MASK;
6064d368e1dSXiaoyu Min 	return 0;
6074d368e1dSXiaoyu Min }
6084d368e1dSXiaoyu Min 
6094d368e1dSXiaoyu Min static __rte_always_inline int
61004a4de75SMichael Baum mlx5_hws_cnt_shared_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id,
61104a4de75SMichael Baum 			uint32_t age_idx)
6124d368e1dSXiaoyu Min {
61313ea6bdcSViacheslav Ovsiienko 	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
6144d368e1dSXiaoyu Min 	uint32_t iidx;
61513ea6bdcSViacheslav Ovsiienko 	int ret;
6164d368e1dSXiaoyu Min 
61713ea6bdcSViacheslav Ovsiienko 	ret = mlx5_hws_cnt_pool_get(hpool, NULL, cnt_id, age_idx);
6184d368e1dSXiaoyu Min 	if (ret != 0)
6194d368e1dSXiaoyu Min 		return ret;
62013ea6bdcSViacheslav Ovsiienko 	iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
62113ea6bdcSViacheslav Ovsiienko 	hpool->pool[iidx].share = 1;
6224d368e1dSXiaoyu Min 	return 0;
6234d368e1dSXiaoyu Min }
6244d368e1dSXiaoyu Min 
625d37435dcSMichael Baum static __rte_always_inline void
6264d368e1dSXiaoyu Min mlx5_hws_cnt_shared_put(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id)
6274d368e1dSXiaoyu Min {
62813ea6bdcSViacheslav Ovsiienko 	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
62913ea6bdcSViacheslav Ovsiienko 	uint32_t iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
6304d368e1dSXiaoyu Min 
63113ea6bdcSViacheslav Ovsiienko 	hpool->pool[iidx].share = 0;
63213ea6bdcSViacheslav Ovsiienko 	mlx5_hws_cnt_pool_put(hpool, NULL, cnt_id);
6334d368e1dSXiaoyu Min }
6344d368e1dSXiaoyu Min 
6354d368e1dSXiaoyu Min static __rte_always_inline bool
6364d368e1dSXiaoyu Min mlx5_hws_cnt_is_shared(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
6374d368e1dSXiaoyu Min {
63813ea6bdcSViacheslav Ovsiienko 	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
63913ea6bdcSViacheslav Ovsiienko 	uint32_t iidx = mlx5_hws_cnt_iidx(hpool, cnt_id);
6404d368e1dSXiaoyu Min 
64113ea6bdcSViacheslav Ovsiienko 	return hpool->pool[iidx].share ? true : false;
6424d368e1dSXiaoyu Min }
6434d368e1dSXiaoyu Min 
64404a4de75SMichael Baum static __rte_always_inline void
64504a4de75SMichael Baum mlx5_hws_cnt_age_set(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id,
64604a4de75SMichael Baum 		     uint32_t age_idx)
64704a4de75SMichael Baum {
64813ea6bdcSViacheslav Ovsiienko 	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
64913ea6bdcSViacheslav Ovsiienko 	uint32_t iidx = mlx5_hws_cnt_iidx(hpool, cnt_id);
65004a4de75SMichael Baum 
65113ea6bdcSViacheslav Ovsiienko 	MLX5_ASSERT(hpool->pool[iidx].share);
65213ea6bdcSViacheslav Ovsiienko 	hpool->pool[iidx].age_idx = age_idx;
65304a4de75SMichael Baum }
65404a4de75SMichael Baum 
65504a4de75SMichael Baum static __rte_always_inline uint32_t
65604a4de75SMichael Baum mlx5_hws_cnt_age_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
65704a4de75SMichael Baum {
65813ea6bdcSViacheslav Ovsiienko 	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
65913ea6bdcSViacheslav Ovsiienko 	uint32_t iidx = mlx5_hws_cnt_iidx(hpool, cnt_id);
66004a4de75SMichael Baum 
66113ea6bdcSViacheslav Ovsiienko 	MLX5_ASSERT(hpool->pool[iidx].share);
66213ea6bdcSViacheslav Ovsiienko 	return hpool->pool[iidx].age_idx;
66304a4de75SMichael Baum }
66404a4de75SMichael Baum 
66504a4de75SMichael Baum static __rte_always_inline cnt_id_t
66604a4de75SMichael Baum mlx5_hws_age_cnt_get(struct mlx5_priv *priv, struct mlx5_hws_age_param *param,
66704a4de75SMichael Baum 		     uint32_t age_idx)
66804a4de75SMichael Baum {
66904a4de75SMichael Baum 	if (!param->own_cnt_index) {
67004a4de75SMichael Baum 		/* Create indirect counter one for internal usage. */
67104a4de75SMichael Baum 		if (mlx5_hws_cnt_shared_get(priv->hws_cpool,
67204a4de75SMichael Baum 					    &param->own_cnt_index, age_idx) < 0)
67304a4de75SMichael Baum 			return 0;
67404a4de75SMichael Baum 		param->nb_cnts++;
67504a4de75SMichael Baum 	}
67604a4de75SMichael Baum 	return param->own_cnt_index;
67704a4de75SMichael Baum }
67804a4de75SMichael Baum 
67904a4de75SMichael Baum static __rte_always_inline void
68004a4de75SMichael Baum mlx5_hws_age_nb_cnt_increase(struct mlx5_priv *priv, uint32_t age_idx)
68104a4de75SMichael Baum {
68204a4de75SMichael Baum 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
68304a4de75SMichael Baum 	struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
68404a4de75SMichael Baum 	struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
68504a4de75SMichael Baum 
68604a4de75SMichael Baum 	MLX5_ASSERT(param != NULL);
68704a4de75SMichael Baum 	param->nb_cnts++;
68804a4de75SMichael Baum }
68904a4de75SMichael Baum 
69004a4de75SMichael Baum static __rte_always_inline void
69104a4de75SMichael Baum mlx5_hws_age_nb_cnt_decrease(struct mlx5_priv *priv, uint32_t age_idx)
69204a4de75SMichael Baum {
69304a4de75SMichael Baum 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
69404a4de75SMichael Baum 	struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
69504a4de75SMichael Baum 	struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
69604a4de75SMichael Baum 
69704a4de75SMichael Baum 	if (param != NULL)
69804a4de75SMichael Baum 		param->nb_cnts--;
69904a4de75SMichael Baum }
70004a4de75SMichael Baum 
70104a4de75SMichael Baum static __rte_always_inline bool
70204a4de75SMichael Baum mlx5_hws_age_is_indirect(uint32_t age_idx)
70304a4de75SMichael Baum {
70404a4de75SMichael Baum 	return (age_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET) ==
70504a4de75SMichael Baum 		MLX5_INDIRECT_ACTION_TYPE_AGE ? true : false;
70604a4de75SMichael Baum }
70704a4de75SMichael Baum 
7084d368e1dSXiaoyu Min /* init HWS counter pool. */
7094d368e1dSXiaoyu Min int
7104d368e1dSXiaoyu Min mlx5_hws_cnt_service_thread_create(struct mlx5_dev_ctx_shared *sh);
7114d368e1dSXiaoyu Min 
7124d368e1dSXiaoyu Min void
7134d368e1dSXiaoyu Min mlx5_hws_cnt_service_thread_destroy(struct mlx5_dev_ctx_shared *sh);
7144d368e1dSXiaoyu Min 
715e1c83d29SMaayan Kashani int
7164d368e1dSXiaoyu Min mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev,
717e1c83d29SMaayan Kashani 		uint32_t nb_counters, uint16_t nb_queue,
718d46f3b52SGregory Etelson 		struct mlx5_hws_cnt_pool *chost, struct rte_flow_error *error);
7194d368e1dSXiaoyu Min 
7204d368e1dSXiaoyu Min void
7214d368e1dSXiaoyu Min mlx5_hws_cnt_pool_destroy(struct mlx5_dev_ctx_shared *sh,
7224d368e1dSXiaoyu Min 		struct mlx5_hws_cnt_pool *cpool);
7234d368e1dSXiaoyu Min 
7244d368e1dSXiaoyu Min int
725d46f3b52SGregory Etelson mlx5_hws_cnt_svc_init(struct mlx5_dev_ctx_shared *sh,
726d46f3b52SGregory Etelson 		      struct rte_flow_error *error);
7274d368e1dSXiaoyu Min 
7284d368e1dSXiaoyu Min void
7294d368e1dSXiaoyu Min mlx5_hws_cnt_svc_deinit(struct mlx5_dev_ctx_shared *sh);
7304d368e1dSXiaoyu Min 
73104a4de75SMichael Baum int
73204a4de75SMichael Baum mlx5_hws_age_action_destroy(struct mlx5_priv *priv, uint32_t idx,
73304a4de75SMichael Baum 			    struct rte_flow_error *error);
73404a4de75SMichael Baum 
73504a4de75SMichael Baum uint32_t
73604a4de75SMichael Baum mlx5_hws_age_action_create(struct mlx5_priv *priv, uint32_t queue_id,
73704a4de75SMichael Baum 			   bool shared, const struct rte_flow_action_age *age,
73804a4de75SMichael Baum 			   uint32_t flow_idx, struct rte_flow_error *error);
73904a4de75SMichael Baum 
74004a4de75SMichael Baum int
74104a4de75SMichael Baum mlx5_hws_age_action_update(struct mlx5_priv *priv, uint32_t idx,
74204a4de75SMichael Baum 			   const void *update, struct rte_flow_error *error);
74304a4de75SMichael Baum 
74404a4de75SMichael Baum void *
74504a4de75SMichael Baum mlx5_hws_age_context_get(struct mlx5_priv *priv, uint32_t idx);
74604a4de75SMichael Baum 
74704a4de75SMichael Baum int
74804a4de75SMichael Baum mlx5_hws_age_pool_init(struct rte_eth_dev *dev,
749e1c83d29SMaayan Kashani 		       uint32_t nb_aging_objects,
750e1c83d29SMaayan Kashani 		       uint16_t nb_queues,
751e1c83d29SMaayan Kashani 		       bool strict_queue);
75204a4de75SMichael Baum 
75304a4de75SMichael Baum void
75404a4de75SMichael Baum mlx5_hws_age_pool_destroy(struct mlx5_priv *priv);
75504a4de75SMichael Baum 
7564d368e1dSXiaoyu Min #endif /* _MLX5_HWS_CNT_H_ */
757