xref: /dpdk/drivers/net/mlx5/mlx5_hws_cnt.c (revision c0e29968294c92ca15fdb34ce63fbba01c4562a6)
14d368e1dSXiaoyu Min /* SPDX-License-Identifier: BSD-3-Clause
24d368e1dSXiaoyu Min  * Copyright 2020 Mellanox Technologies, Ltd
34d368e1dSXiaoyu Min  */
44d368e1dSXiaoyu Min 
54d368e1dSXiaoyu Min #include <stdint.h>
64d368e1dSXiaoyu Min #include <rte_malloc.h>
74d368e1dSXiaoyu Min #include <mlx5_malloc.h>
84d368e1dSXiaoyu Min #include <rte_ring.h>
94d368e1dSXiaoyu Min #include <mlx5_devx_cmds.h>
104d368e1dSXiaoyu Min #include <rte_cycles.h>
1104a4de75SMichael Baum #include <rte_eal_paging.h>
12a7ba40b2SThomas Monjalon #include <rte_thread.h>
134d368e1dSXiaoyu Min 
144d368e1dSXiaoyu Min #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
154d368e1dSXiaoyu Min 
164d368e1dSXiaoyu Min #include "mlx5_utils.h"
174d368e1dSXiaoyu Min #include "mlx5_hws_cnt.h"
184d368e1dSXiaoyu Min 
194d368e1dSXiaoyu Min #define HWS_CNT_CACHE_SZ_DEFAULT 511
204d368e1dSXiaoyu Min #define HWS_CNT_CACHE_PRELOAD_DEFAULT 254
214d368e1dSXiaoyu Min #define HWS_CNT_CACHE_FETCH_DEFAULT 254
224d368e1dSXiaoyu Min #define HWS_CNT_CACHE_THRESHOLD_DEFAULT 254
234d368e1dSXiaoyu Min #define HWS_CNT_ALLOC_FACTOR_DEFAULT 20
244d368e1dSXiaoyu Min 
254d368e1dSXiaoyu Min static void
264d368e1dSXiaoyu Min __hws_cnt_id_load(struct mlx5_hws_cnt_pool *cpool)
274d368e1dSXiaoyu Min {
284d368e1dSXiaoyu Min 	uint32_t cnt_num = mlx5_hws_cnt_pool_get_size(cpool);
2913ea6bdcSViacheslav Ovsiienko 	uint32_t iidx;
304d368e1dSXiaoyu Min 
314d368e1dSXiaoyu Min 	/*
324d368e1dSXiaoyu Min 	 * Counter ID order is important for tracking the max number of in used
334d368e1dSXiaoyu Min 	 * counter for querying, which means counter internal index order must
344d368e1dSXiaoyu Min 	 * be from zero to the number user configured, i.e: 0 - 8000000.
354d368e1dSXiaoyu Min 	 * Need to load counter ID in this order into the cache firstly,
364d368e1dSXiaoyu Min 	 * and then the global free list.
374d368e1dSXiaoyu Min 	 * In the end, user fetch the counter from minimal to the maximum.
384d368e1dSXiaoyu Min 	 */
3913ea6bdcSViacheslav Ovsiienko 	for (iidx = 0; iidx < cnt_num; iidx++) {
4013ea6bdcSViacheslav Ovsiienko 		cnt_id_t cnt_id  = mlx5_hws_cnt_id_gen(cpool, iidx);
4113ea6bdcSViacheslav Ovsiienko 
424d368e1dSXiaoyu Min 		rte_ring_enqueue_elem(cpool->free_list, &cnt_id,
434d368e1dSXiaoyu Min 				sizeof(cnt_id));
444d368e1dSXiaoyu Min 	}
454d368e1dSXiaoyu Min }
464d368e1dSXiaoyu Min 
474d368e1dSXiaoyu Min static void
484d368e1dSXiaoyu Min __mlx5_hws_cnt_svc(struct mlx5_dev_ctx_shared *sh,
494d368e1dSXiaoyu Min 		   struct mlx5_hws_cnt_pool *cpool)
504d368e1dSXiaoyu Min {
514d368e1dSXiaoyu Min 	struct rte_ring *reset_list = cpool->wait_reset_list;
524d368e1dSXiaoyu Min 	struct rte_ring *reuse_list = cpool->reuse_list;
534d368e1dSXiaoyu Min 	uint32_t reset_cnt_num;
544d368e1dSXiaoyu Min 	struct rte_ring_zc_data zcdr = {0};
554d368e1dSXiaoyu Min 	struct rte_ring_zc_data zcdu = {0};
56d37435dcSMichael Baum 	uint32_t ret __rte_unused;
574d368e1dSXiaoyu Min 
584d368e1dSXiaoyu Min 	reset_cnt_num = rte_ring_count(reset_list);
594d368e1dSXiaoyu Min 	cpool->query_gen++;
604d368e1dSXiaoyu Min 	mlx5_aso_cnt_query(sh, cpool);
614d368e1dSXiaoyu Min 	zcdr.n1 = 0;
624d368e1dSXiaoyu Min 	zcdu.n1 = 0;
63d37435dcSMichael Baum 	ret = rte_ring_enqueue_zc_burst_elem_start(reuse_list,
64d37435dcSMichael Baum 						   sizeof(cnt_id_t),
65d37435dcSMichael Baum 						   reset_cnt_num, &zcdu,
664d368e1dSXiaoyu Min 						   NULL);
67d37435dcSMichael Baum 	MLX5_ASSERT(ret == reset_cnt_num);
68d37435dcSMichael Baum 	ret = rte_ring_dequeue_zc_burst_elem_start(reset_list,
69d37435dcSMichael Baum 						   sizeof(cnt_id_t),
70d37435dcSMichael Baum 						   reset_cnt_num, &zcdr,
714d368e1dSXiaoyu Min 						   NULL);
72d37435dcSMichael Baum 	MLX5_ASSERT(ret == reset_cnt_num);
734d368e1dSXiaoyu Min 	__hws_cnt_r2rcpy(&zcdu, &zcdr, reset_cnt_num);
74d37435dcSMichael Baum 	rte_ring_dequeue_zc_elem_finish(reset_list, reset_cnt_num);
75d37435dcSMichael Baum 	rte_ring_enqueue_zc_elem_finish(reuse_list, reset_cnt_num);
76*c0e29968SDariusz Sosnowski 
77*c0e29968SDariusz Sosnowski 	if (rte_log_can_log(mlx5_logtype, RTE_LOG_DEBUG)) {
784d368e1dSXiaoyu Min 		reset_cnt_num = rte_ring_count(reset_list);
79*c0e29968SDariusz Sosnowski 		DRV_LOG(DEBUG, "ibdev %s cpool %p wait_reset_cnt=%" PRIu32,
80*c0e29968SDariusz Sosnowski 			       sh->ibdev_name, (void *)cpool, reset_cnt_num);
81*c0e29968SDariusz Sosnowski 	}
824d368e1dSXiaoyu Min }
834d368e1dSXiaoyu Min 
8404a4de75SMichael Baum /**
8504a4de75SMichael Baum  * Release AGE parameter.
8604a4de75SMichael Baum  *
8704a4de75SMichael Baum  * @param priv
8804a4de75SMichael Baum  *   Pointer to the port private data structure.
8904a4de75SMichael Baum  * @param own_cnt_index
9004a4de75SMichael Baum  *   Counter ID to created only for this AGE to release.
9104a4de75SMichael Baum  *   Zero means there is no such counter.
9204a4de75SMichael Baum  * @param age_ipool
9304a4de75SMichael Baum  *   Pointer to AGE parameter indexed pool.
9404a4de75SMichael Baum  * @param idx
9504a4de75SMichael Baum  *   Index of AGE parameter in the indexed pool.
9604a4de75SMichael Baum  */
9704a4de75SMichael Baum static void
9804a4de75SMichael Baum mlx5_hws_age_param_free(struct mlx5_priv *priv, cnt_id_t own_cnt_index,
9904a4de75SMichael Baum 			struct mlx5_indexed_pool *age_ipool, uint32_t idx)
10004a4de75SMichael Baum {
10104a4de75SMichael Baum 	if (own_cnt_index) {
10204a4de75SMichael Baum 		struct mlx5_hws_cnt_pool *cpool = priv->hws_cpool;
10304a4de75SMichael Baum 
10404a4de75SMichael Baum 		MLX5_ASSERT(mlx5_hws_cnt_is_shared(cpool, own_cnt_index));
10504a4de75SMichael Baum 		mlx5_hws_cnt_shared_put(cpool, &own_cnt_index);
10604a4de75SMichael Baum 	}
10704a4de75SMichael Baum 	mlx5_ipool_free(age_ipool, idx);
10804a4de75SMichael Baum }
10904a4de75SMichael Baum 
11004a4de75SMichael Baum /**
11104a4de75SMichael Baum  * Check and callback event for new aged flow in the HWS counter pool.
11204a4de75SMichael Baum  *
11304a4de75SMichael Baum  * @param[in] priv
11404a4de75SMichael Baum  *   Pointer to port private object.
11504a4de75SMichael Baum  * @param[in] cpool
11604a4de75SMichael Baum  *   Pointer to current counter pool.
11704a4de75SMichael Baum  */
11804a4de75SMichael Baum static void
11904a4de75SMichael Baum mlx5_hws_aging_check(struct mlx5_priv *priv, struct mlx5_hws_cnt_pool *cpool)
12004a4de75SMichael Baum {
12104a4de75SMichael Baum 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
12204a4de75SMichael Baum 	struct flow_counter_stats *stats = cpool->raw_mng->raw;
12304a4de75SMichael Baum 	struct mlx5_hws_age_param *param;
12404a4de75SMichael Baum 	struct rte_ring *r;
12504a4de75SMichael Baum 	const uint64_t curr_time = MLX5_CURR_TIME_SEC;
12604a4de75SMichael Baum 	const uint32_t time_delta = curr_time - cpool->time_of_last_age_check;
12704a4de75SMichael Baum 	uint32_t nb_alloc_cnts = mlx5_hws_cnt_pool_get_size(cpool);
12804a4de75SMichael Baum 	uint16_t expected1 = HWS_AGE_CANDIDATE;
12904a4de75SMichael Baum 	uint16_t expected2 = HWS_AGE_CANDIDATE_INSIDE_RING;
13004a4de75SMichael Baum 	uint32_t i;
13104a4de75SMichael Baum 
13204a4de75SMichael Baum 	cpool->time_of_last_age_check = curr_time;
13304a4de75SMichael Baum 	for (i = 0; i < nb_alloc_cnts; ++i) {
13404a4de75SMichael Baum 		uint32_t age_idx = cpool->pool[i].age_idx;
13504a4de75SMichael Baum 		uint64_t hits;
13604a4de75SMichael Baum 
13704a4de75SMichael Baum 		if (!cpool->pool[i].in_used || age_idx == 0)
13804a4de75SMichael Baum 			continue;
13904a4de75SMichael Baum 		param = mlx5_ipool_get(age_info->ages_ipool, age_idx);
14004a4de75SMichael Baum 		if (unlikely(param == NULL)) {
14104a4de75SMichael Baum 			/*
14204a4de75SMichael Baum 			 * When AGE which used indirect counter it is user
14304a4de75SMichael Baum 			 * responsibility not using this indirect counter
14404a4de75SMichael Baum 			 * without this AGE.
14504a4de75SMichael Baum 			 * If this counter is used after the AGE was freed, the
14604a4de75SMichael Baum 			 * AGE index is invalid and using it here will cause a
14704a4de75SMichael Baum 			 * segmentation fault.
14804a4de75SMichael Baum 			 */
14904a4de75SMichael Baum 			DRV_LOG(WARNING,
15004a4de75SMichael Baum 				"Counter %u is lost his AGE, it is unused.", i);
15104a4de75SMichael Baum 			continue;
15204a4de75SMichael Baum 		}
15304a4de75SMichael Baum 		if (param->timeout == 0)
15404a4de75SMichael Baum 			continue;
155e12a0166STyler Retzlaff 		switch (rte_atomic_load_explicit(&param->state, rte_memory_order_relaxed)) {
15604a4de75SMichael Baum 		case HWS_AGE_AGED_OUT_NOT_REPORTED:
15704a4de75SMichael Baum 		case HWS_AGE_AGED_OUT_REPORTED:
15804a4de75SMichael Baum 			/* Already aged-out, no action is needed. */
15904a4de75SMichael Baum 			continue;
16004a4de75SMichael Baum 		case HWS_AGE_CANDIDATE:
16104a4de75SMichael Baum 		case HWS_AGE_CANDIDATE_INSIDE_RING:
16204a4de75SMichael Baum 			/* This AGE candidate to be aged-out, go to checking. */
16304a4de75SMichael Baum 			break;
16404a4de75SMichael Baum 		case HWS_AGE_FREE:
16504a4de75SMichael Baum 			/*
16604a4de75SMichael Baum 			 * AGE parameter with state "FREE" couldn't be pointed
16704a4de75SMichael Baum 			 * by any counter since counter is destroyed first.
16804a4de75SMichael Baum 			 * Fall-through.
16904a4de75SMichael Baum 			 */
17004a4de75SMichael Baum 		default:
17104a4de75SMichael Baum 			MLX5_ASSERT(0);
17204a4de75SMichael Baum 			continue;
17304a4de75SMichael Baum 		}
17404a4de75SMichael Baum 		hits = rte_be_to_cpu_64(stats[i].hits);
17504a4de75SMichael Baum 		if (param->nb_cnts == 1) {
17604a4de75SMichael Baum 			if (hits != param->accumulator_last_hits) {
177e12a0166STyler Retzlaff 				rte_atomic_store_explicit(&param->sec_since_last_hit, 0,
178e12a0166STyler Retzlaff 						 rte_memory_order_relaxed);
17904a4de75SMichael Baum 				param->accumulator_last_hits = hits;
18004a4de75SMichael Baum 				continue;
18104a4de75SMichael Baum 			}
18204a4de75SMichael Baum 		} else {
18304a4de75SMichael Baum 			param->accumulator_hits += hits;
18404a4de75SMichael Baum 			param->accumulator_cnt++;
18504a4de75SMichael Baum 			if (param->accumulator_cnt < param->nb_cnts)
18604a4de75SMichael Baum 				continue;
18704a4de75SMichael Baum 			param->accumulator_cnt = 0;
18804a4de75SMichael Baum 			if (param->accumulator_last_hits !=
18904a4de75SMichael Baum 						param->accumulator_hits) {
190e12a0166STyler Retzlaff 				rte_atomic_store_explicit(&param->sec_since_last_hit,
191e12a0166STyler Retzlaff 						 0, rte_memory_order_relaxed);
19204a4de75SMichael Baum 				param->accumulator_last_hits =
19304a4de75SMichael Baum 							param->accumulator_hits;
19404a4de75SMichael Baum 				param->accumulator_hits = 0;
19504a4de75SMichael Baum 				continue;
19604a4de75SMichael Baum 			}
19704a4de75SMichael Baum 			param->accumulator_hits = 0;
19804a4de75SMichael Baum 		}
199e12a0166STyler Retzlaff 		if (rte_atomic_fetch_add_explicit(&param->sec_since_last_hit, time_delta,
200e12a0166STyler Retzlaff 				       rte_memory_order_relaxed) + time_delta <=
201e12a0166STyler Retzlaff 		   rte_atomic_load_explicit(&param->timeout, rte_memory_order_relaxed))
20204a4de75SMichael Baum 			continue;
20304a4de75SMichael Baum 		/* Prepare the relevant ring for this AGE parameter */
20404a4de75SMichael Baum 		if (priv->hws_strict_queue)
20504a4de75SMichael Baum 			r = age_info->hw_q_age->aged_lists[param->queue_id];
20604a4de75SMichael Baum 		else
20704a4de75SMichael Baum 			r = age_info->hw_age.aged_list;
20804a4de75SMichael Baum 		/* Changing the state atomically and insert it into the ring. */
209e12a0166STyler Retzlaff 		if (rte_atomic_compare_exchange_strong_explicit(&param->state, &expected1,
21004a4de75SMichael Baum 						HWS_AGE_AGED_OUT_NOT_REPORTED,
211e12a0166STyler Retzlaff 						rte_memory_order_relaxed,
212e12a0166STyler Retzlaff 						rte_memory_order_relaxed)) {
21304a4de75SMichael Baum 			int ret = rte_ring_enqueue_burst_elem(r, &age_idx,
21404a4de75SMichael Baum 							      sizeof(uint32_t),
21504a4de75SMichael Baum 							      1, NULL);
21604a4de75SMichael Baum 
21704a4de75SMichael Baum 			/*
21804a4de75SMichael Baum 			 * The ring doesn't have enough room for this entry,
21904a4de75SMichael Baum 			 * it replace back the state for the next second.
22004a4de75SMichael Baum 			 *
22104a4de75SMichael Baum 			 * FIXME: if until next sec it get traffic, we are going
22204a4de75SMichael Baum 			 *        to lose this "aged out", will be fixed later
22304a4de75SMichael Baum 			 *        when optimise it to fill ring in bulks.
22404a4de75SMichael Baum 			 */
22504a4de75SMichael Baum 			expected2 = HWS_AGE_AGED_OUT_NOT_REPORTED;
22604a4de75SMichael Baum 			if (ret == 0 &&
227e12a0166STyler Retzlaff 			    !rte_atomic_compare_exchange_strong_explicit(&param->state,
22804a4de75SMichael Baum 							 &expected2, expected1,
229e12a0166STyler Retzlaff 							 rte_memory_order_relaxed,
230e12a0166STyler Retzlaff 							 rte_memory_order_relaxed) &&
23104a4de75SMichael Baum 			    expected2 == HWS_AGE_FREE)
23204a4de75SMichael Baum 				mlx5_hws_age_param_free(priv,
23304a4de75SMichael Baum 							param->own_cnt_index,
23404a4de75SMichael Baum 							age_info->ages_ipool,
23504a4de75SMichael Baum 							age_idx);
23604a4de75SMichael Baum 			/* The event is irrelevant in strict queue mode. */
23704a4de75SMichael Baum 			if (!priv->hws_strict_queue)
23804a4de75SMichael Baum 				MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
23904a4de75SMichael Baum 		} else {
240e12a0166STyler Retzlaff 			rte_atomic_compare_exchange_strong_explicit(&param->state, &expected2,
24104a4de75SMichael Baum 						  HWS_AGE_AGED_OUT_NOT_REPORTED,
242e12a0166STyler Retzlaff 						  rte_memory_order_relaxed,
243e12a0166STyler Retzlaff 						  rte_memory_order_relaxed);
24404a4de75SMichael Baum 		}
24504a4de75SMichael Baum 	}
24604a4de75SMichael Baum 	/* The event is irrelevant in strict queue mode. */
24704a4de75SMichael Baum 	if (!priv->hws_strict_queue)
24804a4de75SMichael Baum 		mlx5_age_event_prepare(priv->sh);
24904a4de75SMichael Baum }
25004a4de75SMichael Baum 
2514d368e1dSXiaoyu Min static void
2524d368e1dSXiaoyu Min mlx5_hws_cnt_raw_data_free(struct mlx5_dev_ctx_shared *sh,
2534d368e1dSXiaoyu Min 			   struct mlx5_hws_cnt_raw_data_mng *mng)
2544d368e1dSXiaoyu Min {
2554d368e1dSXiaoyu Min 	if (mng == NULL)
2564d368e1dSXiaoyu Min 		return;
2574d368e1dSXiaoyu Min 	sh->cdev->mr_scache.dereg_mr_cb(&mng->mr);
2584d368e1dSXiaoyu Min 	mlx5_free(mng->raw);
2594d368e1dSXiaoyu Min 	mlx5_free(mng);
2604d368e1dSXiaoyu Min }
2614d368e1dSXiaoyu Min 
2624d368e1dSXiaoyu Min __rte_unused
2634d368e1dSXiaoyu Min static struct mlx5_hws_cnt_raw_data_mng *
264d46f3b52SGregory Etelson mlx5_hws_cnt_raw_data_alloc(struct mlx5_dev_ctx_shared *sh, uint32_t n,
265d46f3b52SGregory Etelson 			    struct rte_flow_error *error)
2664d368e1dSXiaoyu Min {
2674d368e1dSXiaoyu Min 	struct mlx5_hws_cnt_raw_data_mng *mng = NULL;
2684d368e1dSXiaoyu Min 	int ret;
2694d368e1dSXiaoyu Min 	size_t sz = n * sizeof(struct flow_counter_stats);
27004a4de75SMichael Baum 	size_t pgsz = rte_mem_page_size();
2714d368e1dSXiaoyu Min 
27204a4de75SMichael Baum 	MLX5_ASSERT(pgsz > 0);
2734d368e1dSXiaoyu Min 	mng = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sizeof(*mng), 0,
2744d368e1dSXiaoyu Min 			SOCKET_ID_ANY);
275d46f3b52SGregory Etelson 	if (mng == NULL) {
276d46f3b52SGregory Etelson 		rte_flow_error_set(error, ENOMEM,
277d46f3b52SGregory Etelson 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
278d46f3b52SGregory Etelson 				   NULL, "failed to allocate counters memory manager");
2794d368e1dSXiaoyu Min 		goto error;
280d46f3b52SGregory Etelson 	}
28104a4de75SMichael Baum 	mng->raw = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sz, pgsz,
2824d368e1dSXiaoyu Min 			SOCKET_ID_ANY);
283d46f3b52SGregory Etelson 	if (mng->raw == NULL) {
284d46f3b52SGregory Etelson 		rte_flow_error_set(error, ENOMEM,
285d46f3b52SGregory Etelson 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
286d46f3b52SGregory Etelson 				   NULL, "failed to allocate raw counters memory");
2874d368e1dSXiaoyu Min 		goto error;
288d46f3b52SGregory Etelson 	}
2894d368e1dSXiaoyu Min 	ret = sh->cdev->mr_scache.reg_mr_cb(sh->cdev->pd, mng->raw, sz,
2904d368e1dSXiaoyu Min 					    &mng->mr);
2914d368e1dSXiaoyu Min 	if (ret) {
292d46f3b52SGregory Etelson 		rte_flow_error_set(error, errno,
293d46f3b52SGregory Etelson 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
294d46f3b52SGregory Etelson 				   NULL, "failed to register counters memory region");
2954d368e1dSXiaoyu Min 		goto error;
2964d368e1dSXiaoyu Min 	}
2974d368e1dSXiaoyu Min 	return mng;
2984d368e1dSXiaoyu Min error:
2994d368e1dSXiaoyu Min 	mlx5_hws_cnt_raw_data_free(sh, mng);
3004d368e1dSXiaoyu Min 	return NULL;
3014d368e1dSXiaoyu Min }
3024d368e1dSXiaoyu Min 
303a7ba40b2SThomas Monjalon static uint32_t
3044d368e1dSXiaoyu Min mlx5_hws_cnt_svc(void *opaque)
3054d368e1dSXiaoyu Min {
3064d368e1dSXiaoyu Min 	struct mlx5_dev_ctx_shared *sh =
3074d368e1dSXiaoyu Min 		(struct mlx5_dev_ctx_shared *)opaque;
3084d368e1dSXiaoyu Min 	uint64_t interval =
3094d368e1dSXiaoyu Min 		(uint64_t)sh->cnt_svc->query_interval * (US_PER_S / MS_PER_S);
3106ac2104aSSuanming Mou 	struct mlx5_hws_cnt_pool *hws_cpool;
3114d368e1dSXiaoyu Min 	uint64_t start_cycle, query_cycle = 0;
3124d368e1dSXiaoyu Min 	uint64_t query_us;
3134d368e1dSXiaoyu Min 	uint64_t sleep_us;
3144d368e1dSXiaoyu Min 
3154d368e1dSXiaoyu Min 	while (sh->cnt_svc->svc_running != 0) {
3166ac2104aSSuanming Mou 		if (rte_spinlock_trylock(&sh->cpool_lock) == 0)
3176ac2104aSSuanming Mou 			continue;
3184d368e1dSXiaoyu Min 		start_cycle = rte_rdtsc();
3196ac2104aSSuanming Mou 		/* 200ms for 16M counters. */
3206ac2104aSSuanming Mou 		LIST_FOREACH(hws_cpool, &sh->hws_cpool_list, next) {
3216ac2104aSSuanming Mou 			struct mlx5_priv *opriv = hws_cpool->priv;
3226ac2104aSSuanming Mou 
3236ac2104aSSuanming Mou 			__mlx5_hws_cnt_svc(sh, hws_cpool);
32404a4de75SMichael Baum 			if (opriv->hws_age_req)
3256ac2104aSSuanming Mou 				mlx5_hws_aging_check(opriv, hws_cpool);
3264d368e1dSXiaoyu Min 		}
3274d368e1dSXiaoyu Min 		query_cycle = rte_rdtsc() - start_cycle;
3286ac2104aSSuanming Mou 		rte_spinlock_unlock(&sh->cpool_lock);
3294d368e1dSXiaoyu Min 		query_us = query_cycle / (rte_get_timer_hz() / US_PER_S);
3304d368e1dSXiaoyu Min 		sleep_us = interval - query_us;
331*c0e29968SDariusz Sosnowski 		DRV_LOG(DEBUG, "ibdev %s counter service thread: "
332*c0e29968SDariusz Sosnowski 			       "interval_us=%" PRIu64 " query_us=%" PRIu64 " "
333*c0e29968SDariusz Sosnowski 			       "sleep_us=%" PRIu64,
334*c0e29968SDariusz Sosnowski 			sh->ibdev_name, interval, query_us,
335*c0e29968SDariusz Sosnowski 			interval > query_us ? sleep_us : 0);
3364d368e1dSXiaoyu Min 		if (interval > query_us)
3374d368e1dSXiaoyu Min 			rte_delay_us_sleep(sleep_us);
3384d368e1dSXiaoyu Min 	}
339a7ba40b2SThomas Monjalon 	return 0;
3404d368e1dSXiaoyu Min }
3414d368e1dSXiaoyu Min 
34213ea6bdcSViacheslav Ovsiienko static void
34313ea6bdcSViacheslav Ovsiienko mlx5_hws_cnt_pool_deinit(struct mlx5_hws_cnt_pool * const cntp)
34413ea6bdcSViacheslav Ovsiienko {
34513ea6bdcSViacheslav Ovsiienko 	uint32_t qidx = 0;
34613ea6bdcSViacheslav Ovsiienko 	if (cntp == NULL)
34713ea6bdcSViacheslav Ovsiienko 		return;
34813ea6bdcSViacheslav Ovsiienko 	rte_ring_free(cntp->free_list);
34913ea6bdcSViacheslav Ovsiienko 	rte_ring_free(cntp->wait_reset_list);
35013ea6bdcSViacheslav Ovsiienko 	rte_ring_free(cntp->reuse_list);
35113ea6bdcSViacheslav Ovsiienko 	if (cntp->cache) {
35213ea6bdcSViacheslav Ovsiienko 		for (qidx = 0; qidx < cntp->cache->q_num; qidx++)
35313ea6bdcSViacheslav Ovsiienko 			rte_ring_free(cntp->cache->qcache[qidx]);
35413ea6bdcSViacheslav Ovsiienko 	}
35513ea6bdcSViacheslav Ovsiienko 	mlx5_free(cntp->cache);
35613ea6bdcSViacheslav Ovsiienko 	mlx5_free(cntp->raw_mng);
35713ea6bdcSViacheslav Ovsiienko 	mlx5_free(cntp->pool);
35813ea6bdcSViacheslav Ovsiienko 	mlx5_free(cntp);
35913ea6bdcSViacheslav Ovsiienko }
36013ea6bdcSViacheslav Ovsiienko 
361d755221bSDariusz Sosnowski static bool
362d755221bSDariusz Sosnowski mlx5_hws_cnt_should_enable_cache(const struct mlx5_hws_cnt_pool_cfg *pcfg,
363d755221bSDariusz Sosnowski 				 const struct mlx5_hws_cache_param *ccfg)
364d755221bSDariusz Sosnowski {
365d755221bSDariusz Sosnowski 	/*
366d755221bSDariusz Sosnowski 	 * Enable cache if and only if there are enough counters requested
367d755221bSDariusz Sosnowski 	 * to populate all of the caches.
368d755221bSDariusz Sosnowski 	 */
369d755221bSDariusz Sosnowski 	return pcfg->request_num >= ccfg->q_num * ccfg->size;
370d755221bSDariusz Sosnowski }
371d755221bSDariusz Sosnowski 
372d755221bSDariusz Sosnowski static struct mlx5_hws_cnt_pool_caches *
373d755221bSDariusz Sosnowski mlx5_hws_cnt_cache_init(const struct mlx5_hws_cnt_pool_cfg *pcfg,
374d755221bSDariusz Sosnowski 			const struct mlx5_hws_cache_param *ccfg)
375d755221bSDariusz Sosnowski {
376d755221bSDariusz Sosnowski 	struct mlx5_hws_cnt_pool_caches *cache;
377d755221bSDariusz Sosnowski 	char mz_name[RTE_MEMZONE_NAMESIZE];
378d755221bSDariusz Sosnowski 	uint32_t qidx;
379d755221bSDariusz Sosnowski 
380d755221bSDariusz Sosnowski 	/* If counter pool is big enough, setup the counter pool cache. */
381d755221bSDariusz Sosnowski 	cache = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO,
382d755221bSDariusz Sosnowski 			sizeof(*cache) +
383d755221bSDariusz Sosnowski 			sizeof(((struct mlx5_hws_cnt_pool_caches *)0)->qcache[0])
384d755221bSDariusz Sosnowski 				* ccfg->q_num, 0, SOCKET_ID_ANY);
385d755221bSDariusz Sosnowski 	if (cache == NULL)
386d755221bSDariusz Sosnowski 		return NULL;
387d755221bSDariusz Sosnowski 	/* Store the necessary cache parameters. */
388d755221bSDariusz Sosnowski 	cache->fetch_sz = ccfg->fetch_sz;
389d755221bSDariusz Sosnowski 	cache->preload_sz = ccfg->preload_sz;
390d755221bSDariusz Sosnowski 	cache->threshold = ccfg->threshold;
391d755221bSDariusz Sosnowski 	cache->q_num = ccfg->q_num;
392d755221bSDariusz Sosnowski 	for (qidx = 0; qidx < ccfg->q_num; qidx++) {
393d755221bSDariusz Sosnowski 		snprintf(mz_name, sizeof(mz_name), "%s_qc/%x", pcfg->name, qidx);
394d755221bSDariusz Sosnowski 		cache->qcache[qidx] = rte_ring_create(mz_name, ccfg->size,
395d755221bSDariusz Sosnowski 				SOCKET_ID_ANY,
396d755221bSDariusz Sosnowski 				RING_F_SP_ENQ | RING_F_SC_DEQ |
397d755221bSDariusz Sosnowski 				RING_F_EXACT_SZ);
398d755221bSDariusz Sosnowski 		if (cache->qcache[qidx] == NULL)
399d755221bSDariusz Sosnowski 			goto error;
400d755221bSDariusz Sosnowski 	}
401d755221bSDariusz Sosnowski 	return cache;
402d755221bSDariusz Sosnowski 
403d755221bSDariusz Sosnowski error:
404d755221bSDariusz Sosnowski 	while (qidx--)
405d755221bSDariusz Sosnowski 		rte_ring_free(cache->qcache[qidx]);
406d755221bSDariusz Sosnowski 	mlx5_free(cache);
407d755221bSDariusz Sosnowski 	return NULL;
408d755221bSDariusz Sosnowski }
409d755221bSDariusz Sosnowski 
41013ea6bdcSViacheslav Ovsiienko static struct mlx5_hws_cnt_pool *
41104a4de75SMichael Baum mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,
41204a4de75SMichael Baum 		       const struct mlx5_hws_cnt_pool_cfg *pcfg,
413d46f3b52SGregory Etelson 		       const struct mlx5_hws_cache_param *ccfg,
414d46f3b52SGregory Etelson 		       struct rte_flow_error *error)
4154d368e1dSXiaoyu Min {
4164d368e1dSXiaoyu Min 	char mz_name[RTE_MEMZONE_NAMESIZE];
4174d368e1dSXiaoyu Min 	struct mlx5_hws_cnt_pool *cntp;
4184d368e1dSXiaoyu Min 	uint64_t cnt_num = 0;
4194d368e1dSXiaoyu Min 
4204d368e1dSXiaoyu Min 	MLX5_ASSERT(pcfg);
4214d368e1dSXiaoyu Min 	MLX5_ASSERT(ccfg);
4224d368e1dSXiaoyu Min 	cntp = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sizeof(*cntp), 0,
4234d368e1dSXiaoyu Min 			   SOCKET_ID_ANY);
424d46f3b52SGregory Etelson 	if (cntp == NULL) {
425d46f3b52SGregory Etelson 		rte_flow_error_set(error, ENOMEM,
426d46f3b52SGregory Etelson 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
427d46f3b52SGregory Etelson 				   "failed to allocate counter pool context");
4284d368e1dSXiaoyu Min 		return NULL;
429d46f3b52SGregory Etelson 	}
4304d368e1dSXiaoyu Min 
4314d368e1dSXiaoyu Min 	cntp->cfg = *pcfg;
43213ea6bdcSViacheslav Ovsiienko 	if (cntp->cfg.host_cpool)
43313ea6bdcSViacheslav Ovsiienko 		return cntp;
43404a4de75SMichael Baum 	if (pcfg->request_num > sh->hws_max_nb_counters) {
43504a4de75SMichael Baum 		DRV_LOG(ERR, "Counter number %u "
43604a4de75SMichael Baum 			"is greater than the maximum supported (%u).",
43704a4de75SMichael Baum 			pcfg->request_num, sh->hws_max_nb_counters);
438d46f3b52SGregory Etelson 		rte_flow_error_set(error, EINVAL,
439d46f3b52SGregory Etelson 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
440d46f3b52SGregory Etelson 				   "requested counters number exceeds supported capacity");
44104a4de75SMichael Baum 		goto error;
44204a4de75SMichael Baum 	}
4434d368e1dSXiaoyu Min 	cnt_num = pcfg->request_num * (100 + pcfg->alloc_factor) / 100;
4444d368e1dSXiaoyu Min 	if (cnt_num > UINT32_MAX) {
4454d368e1dSXiaoyu Min 		DRV_LOG(ERR, "counter number %"PRIu64" is out of 32bit range",
4464d368e1dSXiaoyu Min 			cnt_num);
447d46f3b52SGregory Etelson 		rte_flow_error_set(error, EINVAL,
448d46f3b52SGregory Etelson 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
449d46f3b52SGregory Etelson 				   "counters number must fit in 32 bits");
4504d368e1dSXiaoyu Min 		goto error;
4514d368e1dSXiaoyu Min 	}
45204a4de75SMichael Baum 	/*
45304a4de75SMichael Baum 	 * When counter request number is supported, but the factor takes it
45404a4de75SMichael Baum 	 * out of size, the factor is reduced.
45504a4de75SMichael Baum 	 */
45604a4de75SMichael Baum 	cnt_num = RTE_MIN((uint32_t)cnt_num, sh->hws_max_nb_counters);
4574d368e1dSXiaoyu Min 	cntp->pool = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO,
45804a4de75SMichael Baum 				 sizeof(struct mlx5_hws_cnt) * cnt_num,
4594d368e1dSXiaoyu Min 				 0, SOCKET_ID_ANY);
460d46f3b52SGregory Etelson 	if (cntp->pool == NULL) {
461d46f3b52SGregory Etelson 		rte_flow_error_set(error, ENOMEM,
462d46f3b52SGregory Etelson 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
463d46f3b52SGregory Etelson 				   "failed to allocate counter pool context");
4644d368e1dSXiaoyu Min 		goto error;
465d46f3b52SGregory Etelson 	}
4664d368e1dSXiaoyu Min 	snprintf(mz_name, sizeof(mz_name), "%s_F_RING", pcfg->name);
4674d368e1dSXiaoyu Min 	cntp->free_list = rte_ring_create_elem(mz_name, sizeof(cnt_id_t),
4684d368e1dSXiaoyu Min 				(uint32_t)cnt_num, SOCKET_ID_ANY,
46913ea6bdcSViacheslav Ovsiienko 				RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ |
47013ea6bdcSViacheslav Ovsiienko 				RING_F_EXACT_SZ);
4714d368e1dSXiaoyu Min 	if (cntp->free_list == NULL) {
472d46f3b52SGregory Etelson 		rte_flow_error_set(error, ENOMEM,
473d46f3b52SGregory Etelson 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
474d46f3b52SGregory Etelson 				   "failed to allocate free counters ring");
4754d368e1dSXiaoyu Min 		goto error;
4764d368e1dSXiaoyu Min 	}
4774d368e1dSXiaoyu Min 	snprintf(mz_name, sizeof(mz_name), "%s_R_RING", pcfg->name);
4784d368e1dSXiaoyu Min 	cntp->wait_reset_list = rte_ring_create_elem(mz_name, sizeof(cnt_id_t),
4794d368e1dSXiaoyu Min 			(uint32_t)cnt_num, SOCKET_ID_ANY,
4804d368e1dSXiaoyu Min 			RING_F_MP_HTS_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
4814d368e1dSXiaoyu Min 	if (cntp->wait_reset_list == NULL) {
482d46f3b52SGregory Etelson 		rte_flow_error_set(error, ENOMEM,
483d46f3b52SGregory Etelson 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
484d46f3b52SGregory Etelson 				   "failed to allocate counters wait reset ring");
4854d368e1dSXiaoyu Min 		goto error;
4864d368e1dSXiaoyu Min 	}
4874d368e1dSXiaoyu Min 	snprintf(mz_name, sizeof(mz_name), "%s_U_RING", pcfg->name);
4884d368e1dSXiaoyu Min 	cntp->reuse_list = rte_ring_create_elem(mz_name, sizeof(cnt_id_t),
4894d368e1dSXiaoyu Min 			(uint32_t)cnt_num, SOCKET_ID_ANY,
49013ea6bdcSViacheslav Ovsiienko 			RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ | RING_F_EXACT_SZ);
4914d368e1dSXiaoyu Min 	if (cntp->reuse_list == NULL) {
492d46f3b52SGregory Etelson 		rte_flow_error_set(error, ENOMEM,
493d46f3b52SGregory Etelson 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
494d46f3b52SGregory Etelson 				   "failed to allocate counters reuse ring");
4954d368e1dSXiaoyu Min 		goto error;
4964d368e1dSXiaoyu Min 	}
497d755221bSDariusz Sosnowski 	/* Allocate counter cache only if needed. */
498d755221bSDariusz Sosnowski 	if (mlx5_hws_cnt_should_enable_cache(pcfg, ccfg)) {
499d755221bSDariusz Sosnowski 		cntp->cache = mlx5_hws_cnt_cache_init(pcfg, ccfg);
500d46f3b52SGregory Etelson 		if (cntp->cache == NULL) {
501d46f3b52SGregory Etelson 			rte_flow_error_set(error, ENOMEM,
502d46f3b52SGregory Etelson 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
503d46f3b52SGregory Etelson 					   "failed to allocate counters cache");
5044d368e1dSXiaoyu Min 			goto error;
5054d368e1dSXiaoyu Min 		}
506d46f3b52SGregory Etelson 	}
50704a4de75SMichael Baum 	/* Initialize the time for aging-out calculation. */
50804a4de75SMichael Baum 	cntp->time_of_last_age_check = MLX5_CURR_TIME_SEC;
5094d368e1dSXiaoyu Min 	return cntp;
5104d368e1dSXiaoyu Min error:
5114d368e1dSXiaoyu Min 	mlx5_hws_cnt_pool_deinit(cntp);
5124d368e1dSXiaoyu Min 	return NULL;
5134d368e1dSXiaoyu Min }
5144d368e1dSXiaoyu Min 
5154d368e1dSXiaoyu Min int
5164d368e1dSXiaoyu Min mlx5_hws_cnt_service_thread_create(struct mlx5_dev_ctx_shared *sh)
5174d368e1dSXiaoyu Min {
518a7ba40b2SThomas Monjalon 	char name[RTE_THREAD_INTERNAL_NAME_SIZE];
519a7ba40b2SThomas Monjalon 	rte_thread_attr_t attr;
5204d368e1dSXiaoyu Min 	int ret;
5214d368e1dSXiaoyu Min 	uint32_t service_core = sh->cnt_svc->service_core;
5224d368e1dSXiaoyu Min 
523a7ba40b2SThomas Monjalon 	ret = rte_thread_attr_init(&attr);
524a7ba40b2SThomas Monjalon 	if (ret != 0)
525a7ba40b2SThomas Monjalon 		goto error;
526a7ba40b2SThomas Monjalon 	CPU_SET(service_core, &attr.cpuset);
5274d368e1dSXiaoyu Min 	sh->cnt_svc->svc_running = 1;
528a7ba40b2SThomas Monjalon 	ret = rte_thread_create(&sh->cnt_svc->service_thread,
529a7ba40b2SThomas Monjalon 			&attr, mlx5_hws_cnt_svc, sh);
530a7ba40b2SThomas Monjalon 	if (ret != 0)
531a7ba40b2SThomas Monjalon 		goto error;
532a7ba40b2SThomas Monjalon 	snprintf(name, sizeof(name), "mlx5-cn%d", service_core);
533a7ba40b2SThomas Monjalon 	rte_thread_set_prefixed_name(sh->cnt_svc->service_thread, name);
534a7ba40b2SThomas Monjalon 
5354d368e1dSXiaoyu Min 	return 0;
536a7ba40b2SThomas Monjalon error:
537a7ba40b2SThomas Monjalon 	DRV_LOG(ERR, "Failed to create HW steering's counter service thread.");
538a7ba40b2SThomas Monjalon 	return ret;
5394d368e1dSXiaoyu Min }
5404d368e1dSXiaoyu Min 
5414d368e1dSXiaoyu Min void
5424d368e1dSXiaoyu Min mlx5_hws_cnt_service_thread_destroy(struct mlx5_dev_ctx_shared *sh)
5434d368e1dSXiaoyu Min {
544a7ba40b2SThomas Monjalon 	if (sh->cnt_svc->service_thread.opaque_id == 0)
5454d368e1dSXiaoyu Min 		return;
5464d368e1dSXiaoyu Min 	sh->cnt_svc->svc_running = 0;
547a7ba40b2SThomas Monjalon 	rte_thread_join(sh->cnt_svc->service_thread, NULL);
548a7ba40b2SThomas Monjalon 	sh->cnt_svc->service_thread.opaque_id = 0;
5494d368e1dSXiaoyu Min }
5504d368e1dSXiaoyu Min 
55113ea6bdcSViacheslav Ovsiienko static int
5524d368e1dSXiaoyu Min mlx5_hws_cnt_pool_dcs_alloc(struct mlx5_dev_ctx_shared *sh,
553d46f3b52SGregory Etelson 			    struct mlx5_hws_cnt_pool *cpool,
554d46f3b52SGregory Etelson 			    struct rte_flow_error *error)
5554d368e1dSXiaoyu Min {
5564d368e1dSXiaoyu Min 	struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
55704a4de75SMichael Baum 	uint32_t max_log_bulk_sz = sh->hws_max_log_bulk_sz;
5584d368e1dSXiaoyu Min 	uint32_t log_bulk_sz;
55904a4de75SMichael Baum 	uint32_t idx, alloc_candidate, alloced = 0;
5604d368e1dSXiaoyu Min 	unsigned int cnt_num = mlx5_hws_cnt_pool_get_size(cpool);
5614d368e1dSXiaoyu Min 	struct mlx5_devx_counter_attr attr = {0};
5624d368e1dSXiaoyu Min 	struct mlx5_devx_obj *dcs;
5634d368e1dSXiaoyu Min 
56413ea6bdcSViacheslav Ovsiienko 	MLX5_ASSERT(cpool->cfg.host_cpool == NULL);
565d46f3b52SGregory Etelson 	if (hca_attr->flow_counter_bulk_log_max_alloc == 0)
566d46f3b52SGregory Etelson 		return rte_flow_error_set(error, ENOTSUP,
567d46f3b52SGregory Etelson 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
568d46f3b52SGregory Etelson 					  NULL, "FW doesn't support bulk log max alloc");
5694d368e1dSXiaoyu Min 	cnt_num = RTE_ALIGN_CEIL(cnt_num, 4); /* minimal 4 counter in bulk. */
5704d368e1dSXiaoyu Min 	log_bulk_sz = RTE_MIN(max_log_bulk_sz, rte_log2_u32(cnt_num));
5714d368e1dSXiaoyu Min 	attr.pd = sh->cdev->pdn;
5724d368e1dSXiaoyu Min 	attr.pd_valid = 1;
5734d368e1dSXiaoyu Min 	attr.bulk_log_max_alloc = 1;
5744d368e1dSXiaoyu Min 	attr.flow_counter_bulk_log_size = log_bulk_sz;
5754d368e1dSXiaoyu Min 	idx = 0;
5764d368e1dSXiaoyu Min 	dcs = mlx5_devx_cmd_flow_counter_alloc_general(sh->cdev->ctx, &attr);
577d46f3b52SGregory Etelson 	if (dcs == NULL) {
578d46f3b52SGregory Etelson 		rte_flow_error_set(error, rte_errno,
579d46f3b52SGregory Etelson 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
580d46f3b52SGregory Etelson 				   NULL, "FW failed to allocate counters");
5814d368e1dSXiaoyu Min 		goto error;
582d46f3b52SGregory Etelson 	}
5834d368e1dSXiaoyu Min 	cpool->dcs_mng.dcs[idx].obj = dcs;
5844d368e1dSXiaoyu Min 	cpool->dcs_mng.dcs[idx].batch_sz = (1 << log_bulk_sz);
5854d368e1dSXiaoyu Min 	cpool->dcs_mng.batch_total++;
5864d368e1dSXiaoyu Min 	idx++;
5874d368e1dSXiaoyu Min 	cpool->dcs_mng.dcs[0].iidx = 0;
5884d368e1dSXiaoyu Min 	alloced = cpool->dcs_mng.dcs[0].batch_sz;
5894d368e1dSXiaoyu Min 	if (cnt_num > cpool->dcs_mng.dcs[0].batch_sz) {
59004a4de75SMichael Baum 		while (idx < MLX5_HWS_CNT_DCS_NUM) {
5914d368e1dSXiaoyu Min 			attr.flow_counter_bulk_log_size = --max_log_bulk_sz;
59204a4de75SMichael Baum 			alloc_candidate = RTE_BIT32(max_log_bulk_sz);
59304a4de75SMichael Baum 			if (alloced + alloc_candidate > sh->hws_max_nb_counters)
59404a4de75SMichael Baum 				continue;
5954d368e1dSXiaoyu Min 			dcs = mlx5_devx_cmd_flow_counter_alloc_general
5964d368e1dSXiaoyu Min 				(sh->cdev->ctx, &attr);
597d46f3b52SGregory Etelson 			if (dcs == NULL) {
598d46f3b52SGregory Etelson 				rte_flow_error_set(error, rte_errno,
599d46f3b52SGregory Etelson 						   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
600d46f3b52SGregory Etelson 						   NULL, "FW failed to allocate counters");
6014d368e1dSXiaoyu Min 				goto error;
602d46f3b52SGregory Etelson 			}
6034d368e1dSXiaoyu Min 			cpool->dcs_mng.dcs[idx].obj = dcs;
60404a4de75SMichael Baum 			cpool->dcs_mng.dcs[idx].batch_sz = alloc_candidate;
6054d368e1dSXiaoyu Min 			cpool->dcs_mng.dcs[idx].iidx = alloced;
6064d368e1dSXiaoyu Min 			alloced += cpool->dcs_mng.dcs[idx].batch_sz;
6074d368e1dSXiaoyu Min 			cpool->dcs_mng.batch_total++;
60804a4de75SMichael Baum 			if (alloced >= cnt_num)
60904a4de75SMichael Baum 				break;
61004a4de75SMichael Baum 			idx++;
6114d368e1dSXiaoyu Min 		}
6124d368e1dSXiaoyu Min 	}
6134d368e1dSXiaoyu Min 	return 0;
6144d368e1dSXiaoyu Min error:
6154d368e1dSXiaoyu Min 	DRV_LOG(DEBUG,
6164d368e1dSXiaoyu Min 		"Cannot alloc device counter, allocated[%" PRIu32 "] request[%" PRIu32 "]",
6174d368e1dSXiaoyu Min 		alloced, cnt_num);
6184d368e1dSXiaoyu Min 	for (idx = 0; idx < cpool->dcs_mng.batch_total; idx++) {
6194d368e1dSXiaoyu Min 		mlx5_devx_cmd_destroy(cpool->dcs_mng.dcs[idx].obj);
6204d368e1dSXiaoyu Min 		cpool->dcs_mng.dcs[idx].obj = NULL;
6214d368e1dSXiaoyu Min 		cpool->dcs_mng.dcs[idx].batch_sz = 0;
6224d368e1dSXiaoyu Min 		cpool->dcs_mng.dcs[idx].iidx = 0;
6234d368e1dSXiaoyu Min 	}
6244d368e1dSXiaoyu Min 	cpool->dcs_mng.batch_total = 0;
6254d368e1dSXiaoyu Min 	return -1;
6264d368e1dSXiaoyu Min }
6274d368e1dSXiaoyu Min 
62813ea6bdcSViacheslav Ovsiienko static void
6294d368e1dSXiaoyu Min mlx5_hws_cnt_pool_dcs_free(struct mlx5_dev_ctx_shared *sh,
6304d368e1dSXiaoyu Min 			   struct mlx5_hws_cnt_pool *cpool)
6314d368e1dSXiaoyu Min {
6324d368e1dSXiaoyu Min 	uint32_t idx;
6334d368e1dSXiaoyu Min 
6344d368e1dSXiaoyu Min 	if (cpool == NULL)
6354d368e1dSXiaoyu Min 		return;
6364d368e1dSXiaoyu Min 	for (idx = 0; idx < MLX5_HWS_CNT_DCS_NUM; idx++)
6374d368e1dSXiaoyu Min 		mlx5_devx_cmd_destroy(cpool->dcs_mng.dcs[idx].obj);
6384d368e1dSXiaoyu Min 	if (cpool->raw_mng) {
6394d368e1dSXiaoyu Min 		mlx5_hws_cnt_raw_data_free(sh, cpool->raw_mng);
6404d368e1dSXiaoyu Min 		cpool->raw_mng = NULL;
6414d368e1dSXiaoyu Min 	}
6424d368e1dSXiaoyu Min }
6434d368e1dSXiaoyu Min 
64413ea6bdcSViacheslav Ovsiienko static void
64513ea6bdcSViacheslav Ovsiienko mlx5_hws_cnt_pool_action_destroy(struct mlx5_hws_cnt_pool *cpool)
64613ea6bdcSViacheslav Ovsiienko {
64713ea6bdcSViacheslav Ovsiienko 	uint32_t idx;
64813ea6bdcSViacheslav Ovsiienko 
64913ea6bdcSViacheslav Ovsiienko 	for (idx = 0; idx < cpool->dcs_mng.batch_total; idx++) {
65013ea6bdcSViacheslav Ovsiienko 		struct mlx5_hws_cnt_dcs *dcs = &cpool->dcs_mng.dcs[idx];
65113ea6bdcSViacheslav Ovsiienko 
65213ea6bdcSViacheslav Ovsiienko 		if (dcs->dr_action != NULL) {
65313ea6bdcSViacheslav Ovsiienko 			mlx5dr_action_destroy(dcs->dr_action);
65413ea6bdcSViacheslav Ovsiienko 			dcs->dr_action = NULL;
65513ea6bdcSViacheslav Ovsiienko 		}
65613ea6bdcSViacheslav Ovsiienko 	}
65713ea6bdcSViacheslav Ovsiienko }
65813ea6bdcSViacheslav Ovsiienko 
65913ea6bdcSViacheslav Ovsiienko static int
6604d368e1dSXiaoyu Min mlx5_hws_cnt_pool_action_create(struct mlx5_priv *priv,
6614d368e1dSXiaoyu Min 		struct mlx5_hws_cnt_pool *cpool)
6624d368e1dSXiaoyu Min {
66313ea6bdcSViacheslav Ovsiienko 	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
6644d368e1dSXiaoyu Min 	uint32_t idx;
6654d368e1dSXiaoyu Min 	int ret = 0;
6664d368e1dSXiaoyu Min 	uint32_t flags;
6674d368e1dSXiaoyu Min 
6684d368e1dSXiaoyu Min 	flags = MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;
6694d368e1dSXiaoyu Min 	if (priv->sh->config.dv_esw_en && priv->master)
6704d368e1dSXiaoyu Min 		flags |= MLX5DR_ACTION_FLAG_HWS_FDB;
67113ea6bdcSViacheslav Ovsiienko 	for (idx = 0; idx < hpool->dcs_mng.batch_total; idx++) {
67213ea6bdcSViacheslav Ovsiienko 		struct mlx5_hws_cnt_dcs *hdcs = &hpool->dcs_mng.dcs[idx];
67313ea6bdcSViacheslav Ovsiienko 		struct mlx5_hws_cnt_dcs *dcs = &cpool->dcs_mng.dcs[idx];
67413ea6bdcSViacheslav Ovsiienko 
6754d368e1dSXiaoyu Min 		dcs->dr_action = mlx5dr_action_create_counter(priv->dr_ctx,
67613ea6bdcSViacheslav Ovsiienko 					(struct mlx5dr_devx_obj *)hdcs->obj,
6774d368e1dSXiaoyu Min 					flags);
6784d368e1dSXiaoyu Min 		if (dcs->dr_action == NULL) {
6794d368e1dSXiaoyu Min 			mlx5_hws_cnt_pool_action_destroy(cpool);
6804d368e1dSXiaoyu Min 			ret = -ENOSYS;
6814d368e1dSXiaoyu Min 			break;
6824d368e1dSXiaoyu Min 		}
6834d368e1dSXiaoyu Min 	}
6844d368e1dSXiaoyu Min 	return ret;
6854d368e1dSXiaoyu Min }
6864d368e1dSXiaoyu Min 
687e1c83d29SMaayan Kashani int
6884d368e1dSXiaoyu Min mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev,
689e1c83d29SMaayan Kashani 			 uint32_t nb_counters, uint16_t nb_queue,
690d46f3b52SGregory Etelson 			 struct mlx5_hws_cnt_pool *chost,
691d46f3b52SGregory Etelson 			 struct rte_flow_error *error)
6924d368e1dSXiaoyu Min {
6934d368e1dSXiaoyu Min 	struct mlx5_hws_cnt_pool *cpool = NULL;
6944d368e1dSXiaoyu Min 	struct mlx5_priv *priv = dev->data->dev_private;
6954d368e1dSXiaoyu Min 	struct mlx5_hws_cache_param cparam = {0};
6964d368e1dSXiaoyu Min 	struct mlx5_hws_cnt_pool_cfg pcfg = {0};
6974d368e1dSXiaoyu Min 	char *mp_name;
698d46f3b52SGregory Etelson 	int ret = 0;
6994d368e1dSXiaoyu Min 	size_t sz;
7004d368e1dSXiaoyu Min 
7011c955e4cSBing Zhao 	mp_name = mlx5_malloc(MLX5_MEM_ZERO, RTE_MEMZONE_NAMESIZE, 0, SOCKET_ID_ANY);
70213ea6bdcSViacheslav Ovsiienko 	if (mp_name == NULL)
70313ea6bdcSViacheslav Ovsiienko 		goto error;
7041c955e4cSBing Zhao 	snprintf(mp_name, RTE_MEMZONE_NAMESIZE, "MLX5_HWS_CNT_P_%x", dev->data->port_id);
70513ea6bdcSViacheslav Ovsiienko 	pcfg.name = mp_name;
706e1c83d29SMaayan Kashani 	pcfg.request_num = nb_counters;
70713ea6bdcSViacheslav Ovsiienko 	pcfg.alloc_factor = HWS_CNT_ALLOC_FACTOR_DEFAULT;
708e1c83d29SMaayan Kashani 	if (chost) {
70913ea6bdcSViacheslav Ovsiienko 		pcfg.host_cpool = chost;
710d46f3b52SGregory Etelson 		cpool = mlx5_hws_cnt_pool_init(priv->sh, &pcfg, &cparam, error);
71113ea6bdcSViacheslav Ovsiienko 		if (cpool == NULL)
71213ea6bdcSViacheslav Ovsiienko 			goto error;
71313ea6bdcSViacheslav Ovsiienko 		ret = mlx5_hws_cnt_pool_action_create(priv, cpool);
714d46f3b52SGregory Etelson 		if (ret != 0) {
715d46f3b52SGregory Etelson 			rte_flow_error_set(error, -ret,
716d46f3b52SGregory Etelson 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
717d46f3b52SGregory Etelson 					   NULL, "failed to allocate counter actions on guest port");
71813ea6bdcSViacheslav Ovsiienko 			goto error;
719d46f3b52SGregory Etelson 		}
720e1c83d29SMaayan Kashani 		goto success;
72130ff1d25SViacheslav Ovsiienko 	}
7224d368e1dSXiaoyu Min 	/* init cnt service if not. */
7234d368e1dSXiaoyu Min 	if (priv->sh->cnt_svc == NULL) {
724d46f3b52SGregory Etelson 		ret = mlx5_hws_cnt_svc_init(priv->sh, error);
725e1c83d29SMaayan Kashani 		if (ret)
726e1c83d29SMaayan Kashani 			return ret;
7274d368e1dSXiaoyu Min 	}
7284d368e1dSXiaoyu Min 	cparam.fetch_sz = HWS_CNT_CACHE_FETCH_DEFAULT;
7294d368e1dSXiaoyu Min 	cparam.preload_sz = HWS_CNT_CACHE_PRELOAD_DEFAULT;
7304d368e1dSXiaoyu Min 	cparam.q_num = nb_queue;
7314d368e1dSXiaoyu Min 	cparam.threshold = HWS_CNT_CACHE_THRESHOLD_DEFAULT;
7324d368e1dSXiaoyu Min 	cparam.size = HWS_CNT_CACHE_SZ_DEFAULT;
733d46f3b52SGregory Etelson 	cpool = mlx5_hws_cnt_pool_init(priv->sh, &pcfg, &cparam, error);
7344d368e1dSXiaoyu Min 	if (cpool == NULL)
7354d368e1dSXiaoyu Min 		goto error;
736d46f3b52SGregory Etelson 	ret = mlx5_hws_cnt_pool_dcs_alloc(priv->sh, cpool, error);
7374d368e1dSXiaoyu Min 	if (ret != 0)
7384d368e1dSXiaoyu Min 		goto error;
7394d368e1dSXiaoyu Min 	sz = RTE_ALIGN_CEIL(mlx5_hws_cnt_pool_get_size(cpool), 4);
740d46f3b52SGregory Etelson 	cpool->raw_mng = mlx5_hws_cnt_raw_data_alloc(priv->sh, sz, error);
7414d368e1dSXiaoyu Min 	if (cpool->raw_mng == NULL)
7424d368e1dSXiaoyu Min 		goto error;
7434d368e1dSXiaoyu Min 	__hws_cnt_id_load(cpool);
7444d368e1dSXiaoyu Min 	/*
7454d368e1dSXiaoyu Min 	 * Bump query gen right after pool create so the
7464d368e1dSXiaoyu Min 	 * pre-loaded counters can be used directly
7474d368e1dSXiaoyu Min 	 * because they already have init value no need
7484d368e1dSXiaoyu Min 	 * to wait for query.
7494d368e1dSXiaoyu Min 	 */
7504d368e1dSXiaoyu Min 	cpool->query_gen = 1;
7514d368e1dSXiaoyu Min 	ret = mlx5_hws_cnt_pool_action_create(priv, cpool);
752d46f3b52SGregory Etelson 	if (ret != 0) {
753d46f3b52SGregory Etelson 		rte_flow_error_set(error, -ret,
754d46f3b52SGregory Etelson 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
755d46f3b52SGregory Etelson 				   NULL, "failed to allocate counter actions");
7564d368e1dSXiaoyu Min 		goto error;
757d46f3b52SGregory Etelson 	}
7584d368e1dSXiaoyu Min 	priv->sh->cnt_svc->refcnt++;
7596ac2104aSSuanming Mou 	cpool->priv = priv;
7606ac2104aSSuanming Mou 	rte_spinlock_lock(&priv->sh->cpool_lock);
7616ac2104aSSuanming Mou 	LIST_INSERT_HEAD(&priv->sh->hws_cpool_list, cpool, next);
7626ac2104aSSuanming Mou 	rte_spinlock_unlock(&priv->sh->cpool_lock);
763e1c83d29SMaayan Kashani success:
764e1c83d29SMaayan Kashani 	priv->hws_cpool = cpool;
765e1c83d29SMaayan Kashani 	return 0;
7664d368e1dSXiaoyu Min error:
767d46f3b52SGregory Etelson 	MLX5_ASSERT(ret);
7684d368e1dSXiaoyu Min 	mlx5_hws_cnt_pool_destroy(priv->sh, cpool);
769e1c83d29SMaayan Kashani 	priv->hws_cpool = NULL;
770e1c83d29SMaayan Kashani 	return ret;
7714d368e1dSXiaoyu Min }
7724d368e1dSXiaoyu Min 
7734d368e1dSXiaoyu Min void
7744d368e1dSXiaoyu Min mlx5_hws_cnt_pool_destroy(struct mlx5_dev_ctx_shared *sh,
7754d368e1dSXiaoyu Min 		struct mlx5_hws_cnt_pool *cpool)
7764d368e1dSXiaoyu Min {
7774d368e1dSXiaoyu Min 	if (cpool == NULL)
7784d368e1dSXiaoyu Min 		return;
7796ac2104aSSuanming Mou 	/*
7806ac2104aSSuanming Mou 	 * 16M counter consumes 200ms to finish the query.
7816ac2104aSSuanming Mou 	 * Maybe blocked for at most 200ms here.
7826ac2104aSSuanming Mou 	 */
7836ac2104aSSuanming Mou 	rte_spinlock_lock(&sh->cpool_lock);
7843331d595SMaayan Kashani 	/* Try to remove cpool before it was added to list caused segfault. */
7853331d595SMaayan Kashani 	if (!LIST_EMPTY(&sh->hws_cpool_list) && cpool->next.le_prev)
7866ac2104aSSuanming Mou 		LIST_REMOVE(cpool, next);
7876ac2104aSSuanming Mou 	rte_spinlock_unlock(&sh->cpool_lock);
78813ea6bdcSViacheslav Ovsiienko 	if (cpool->cfg.host_cpool == NULL) {
7894d368e1dSXiaoyu Min 		if (--sh->cnt_svc->refcnt == 0)
7904d368e1dSXiaoyu Min 			mlx5_hws_cnt_svc_deinit(sh);
79113ea6bdcSViacheslav Ovsiienko 	}
7924d368e1dSXiaoyu Min 	mlx5_hws_cnt_pool_action_destroy(cpool);
79313ea6bdcSViacheslav Ovsiienko 	if (cpool->cfg.host_cpool == NULL) {
7944d368e1dSXiaoyu Min 		mlx5_hws_cnt_pool_dcs_free(sh, cpool);
7954d368e1dSXiaoyu Min 		mlx5_hws_cnt_raw_data_free(sh, cpool->raw_mng);
79613ea6bdcSViacheslav Ovsiienko 	}
7974d368e1dSXiaoyu Min 	mlx5_free((void *)cpool->cfg.name);
7984d368e1dSXiaoyu Min 	mlx5_hws_cnt_pool_deinit(cpool);
7994d368e1dSXiaoyu Min }
8004d368e1dSXiaoyu Min 
8014d368e1dSXiaoyu Min int
802d46f3b52SGregory Etelson mlx5_hws_cnt_svc_init(struct mlx5_dev_ctx_shared *sh,
803d46f3b52SGregory Etelson 		      struct rte_flow_error *error)
8044d368e1dSXiaoyu Min {
8054d368e1dSXiaoyu Min 	int ret;
8064d368e1dSXiaoyu Min 
8074d368e1dSXiaoyu Min 	sh->cnt_svc = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO,
8084d368e1dSXiaoyu Min 			sizeof(*sh->cnt_svc), 0, SOCKET_ID_ANY);
8094d368e1dSXiaoyu Min 	if (sh->cnt_svc == NULL)
810d46f3b52SGregory Etelson 		goto err;
8114d368e1dSXiaoyu Min 	sh->cnt_svc->query_interval = sh->config.cnt_svc.cycle_time;
8124d368e1dSXiaoyu Min 	sh->cnt_svc->service_core = sh->config.cnt_svc.service_core;
8134d368e1dSXiaoyu Min 	ret = mlx5_aso_cnt_queue_init(sh);
8144d368e1dSXiaoyu Min 	if (ret != 0) {
8154d368e1dSXiaoyu Min 		mlx5_free(sh->cnt_svc);
8164d368e1dSXiaoyu Min 		sh->cnt_svc = NULL;
817d46f3b52SGregory Etelson 		goto err;
8184d368e1dSXiaoyu Min 	}
8194d368e1dSXiaoyu Min 	ret = mlx5_hws_cnt_service_thread_create(sh);
8204d368e1dSXiaoyu Min 	if (ret != 0) {
8214d368e1dSXiaoyu Min 		mlx5_aso_cnt_queue_uninit(sh);
8224d368e1dSXiaoyu Min 		mlx5_free(sh->cnt_svc);
8234d368e1dSXiaoyu Min 		sh->cnt_svc = NULL;
8244d368e1dSXiaoyu Min 	}
8254d368e1dSXiaoyu Min 	return 0;
826d46f3b52SGregory Etelson err:
827d46f3b52SGregory Etelson 	return rte_flow_error_set(error, ENOMEM,
828d46f3b52SGregory Etelson 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
829d46f3b52SGregory Etelson 				  NULL, "failed to init counters service");
830d46f3b52SGregory Etelson 
8314d368e1dSXiaoyu Min }
8324d368e1dSXiaoyu Min 
8334d368e1dSXiaoyu Min void
8344d368e1dSXiaoyu Min mlx5_hws_cnt_svc_deinit(struct mlx5_dev_ctx_shared *sh)
8354d368e1dSXiaoyu Min {
8364d368e1dSXiaoyu Min 	if (sh->cnt_svc == NULL)
8374d368e1dSXiaoyu Min 		return;
8384d368e1dSXiaoyu Min 	mlx5_hws_cnt_service_thread_destroy(sh);
8394d368e1dSXiaoyu Min 	mlx5_aso_cnt_queue_uninit(sh);
8404d368e1dSXiaoyu Min 	mlx5_free(sh->cnt_svc);
8414d368e1dSXiaoyu Min 	sh->cnt_svc = NULL;
8424d368e1dSXiaoyu Min }
8434d368e1dSXiaoyu Min 
84404a4de75SMichael Baum /**
84504a4de75SMichael Baum  * Destroy AGE action.
84604a4de75SMichael Baum  *
84704a4de75SMichael Baum  * @param priv
84804a4de75SMichael Baum  *   Pointer to the port private data structure.
84904a4de75SMichael Baum  * @param idx
85004a4de75SMichael Baum  *   Index of AGE parameter.
85104a4de75SMichael Baum  * @param error
85204a4de75SMichael Baum  *   Pointer to error structure.
85304a4de75SMichael Baum  *
85404a4de75SMichael Baum  * @return
85504a4de75SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
85604a4de75SMichael Baum  */
85704a4de75SMichael Baum int
85804a4de75SMichael Baum mlx5_hws_age_action_destroy(struct mlx5_priv *priv, uint32_t idx,
85904a4de75SMichael Baum 			    struct rte_flow_error *error)
86004a4de75SMichael Baum {
86104a4de75SMichael Baum 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
86204a4de75SMichael Baum 	struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
86304a4de75SMichael Baum 	struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, idx);
86404a4de75SMichael Baum 
86504a4de75SMichael Baum 	if (param == NULL)
86604a4de75SMichael Baum 		return rte_flow_error_set(error, EINVAL,
86704a4de75SMichael Baum 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
86804a4de75SMichael Baum 					  "invalid AGE parameter index");
869e12a0166STyler Retzlaff 	switch (rte_atomic_exchange_explicit(&param->state, HWS_AGE_FREE,
870e12a0166STyler Retzlaff 				    rte_memory_order_relaxed)) {
87104a4de75SMichael Baum 	case HWS_AGE_CANDIDATE:
87204a4de75SMichael Baum 	case HWS_AGE_AGED_OUT_REPORTED:
87304a4de75SMichael Baum 		mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
87404a4de75SMichael Baum 		break;
87504a4de75SMichael Baum 	case HWS_AGE_AGED_OUT_NOT_REPORTED:
87604a4de75SMichael Baum 	case HWS_AGE_CANDIDATE_INSIDE_RING:
87704a4de75SMichael Baum 		/*
87804a4de75SMichael Baum 		 * In both cases AGE is inside the ring. Change the state here
87904a4de75SMichael Baum 		 * and destroy it later when it is taken out of ring.
88004a4de75SMichael Baum 		 */
88104a4de75SMichael Baum 		break;
88204a4de75SMichael Baum 	case HWS_AGE_FREE:
88304a4de75SMichael Baum 		/*
88404a4de75SMichael Baum 		 * If index is valid and state is FREE, it says this AGE has
88504a4de75SMichael Baum 		 * been freed for the user but not for the PMD since it is
88604a4de75SMichael Baum 		 * inside the ring.
88704a4de75SMichael Baum 		 */
88804a4de75SMichael Baum 		return rte_flow_error_set(error, EINVAL,
88904a4de75SMichael Baum 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
89004a4de75SMichael Baum 					  "this AGE has already been released");
89104a4de75SMichael Baum 	default:
89204a4de75SMichael Baum 		MLX5_ASSERT(0);
89304a4de75SMichael Baum 		break;
89404a4de75SMichael Baum 	}
89504a4de75SMichael Baum 	return 0;
89604a4de75SMichael Baum }
89704a4de75SMichael Baum 
89804a4de75SMichael Baum /**
89904a4de75SMichael Baum  * Create AGE action parameter.
90004a4de75SMichael Baum  *
90104a4de75SMichael Baum  * @param[in] priv
90204a4de75SMichael Baum  *   Pointer to the port private data structure.
90304a4de75SMichael Baum  * @param[in] queue_id
90404a4de75SMichael Baum  *   Which HWS queue to be used.
90504a4de75SMichael Baum  * @param[in] shared
90604a4de75SMichael Baum  *   Whether it indirect AGE action.
90704a4de75SMichael Baum  * @param[in] flow_idx
90804a4de75SMichael Baum  *   Flow index from indexed pool.
90904a4de75SMichael Baum  *   For indirect AGE action it doesn't affect.
91004a4de75SMichael Baum  * @param[in] age
91104a4de75SMichael Baum  *   Pointer to the aging action configuration.
91204a4de75SMichael Baum  * @param[out] error
91304a4de75SMichael Baum  *   Pointer to error structure.
91404a4de75SMichael Baum  *
91504a4de75SMichael Baum  * @return
91604a4de75SMichael Baum  *   Index to AGE action parameter on success, 0 otherwise.
91704a4de75SMichael Baum  */
91804a4de75SMichael Baum uint32_t
91904a4de75SMichael Baum mlx5_hws_age_action_create(struct mlx5_priv *priv, uint32_t queue_id,
92004a4de75SMichael Baum 			   bool shared, const struct rte_flow_action_age *age,
92104a4de75SMichael Baum 			   uint32_t flow_idx, struct rte_flow_error *error)
92204a4de75SMichael Baum {
92304a4de75SMichael Baum 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
92404a4de75SMichael Baum 	struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
92504a4de75SMichael Baum 	struct mlx5_hws_age_param *param;
92604a4de75SMichael Baum 	uint32_t age_idx;
92704a4de75SMichael Baum 
92804a4de75SMichael Baum 	param = mlx5_ipool_malloc(ipool, &age_idx);
92904a4de75SMichael Baum 	if (param == NULL) {
93004a4de75SMichael Baum 		rte_flow_error_set(error, ENOMEM,
93104a4de75SMichael Baum 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
93204a4de75SMichael Baum 				   "cannot allocate AGE parameter");
93304a4de75SMichael Baum 		return 0;
93404a4de75SMichael Baum 	}
935e12a0166STyler Retzlaff 	MLX5_ASSERT(rte_atomic_load_explicit(&param->state,
936e12a0166STyler Retzlaff 				    rte_memory_order_relaxed) == HWS_AGE_FREE);
93704a4de75SMichael Baum 	if (shared) {
93804a4de75SMichael Baum 		param->nb_cnts = 0;
93904a4de75SMichael Baum 		param->accumulator_hits = 0;
94004a4de75SMichael Baum 		param->accumulator_cnt = 0;
94104a4de75SMichael Baum 		flow_idx = age_idx;
94204a4de75SMichael Baum 	} else {
94304a4de75SMichael Baum 		param->nb_cnts = 1;
94404a4de75SMichael Baum 	}
94504a4de75SMichael Baum 	param->context = age->context ? age->context :
94604a4de75SMichael Baum 					(void *)(uintptr_t)flow_idx;
94704a4de75SMichael Baum 	param->timeout = age->timeout;
94804a4de75SMichael Baum 	param->queue_id = queue_id;
94904a4de75SMichael Baum 	param->accumulator_last_hits = 0;
95004a4de75SMichael Baum 	param->own_cnt_index = 0;
95104a4de75SMichael Baum 	param->sec_since_last_hit = 0;
95204a4de75SMichael Baum 	param->state = HWS_AGE_CANDIDATE;
95304a4de75SMichael Baum 	return age_idx;
95404a4de75SMichael Baum }
95504a4de75SMichael Baum 
95604a4de75SMichael Baum /**
95704a4de75SMichael Baum  * Update indirect AGE action parameter.
95804a4de75SMichael Baum  *
95904a4de75SMichael Baum  * @param[in] priv
96004a4de75SMichael Baum  *   Pointer to the port private data structure.
96104a4de75SMichael Baum  * @param[in] idx
96204a4de75SMichael Baum  *   Index of AGE parameter.
96304a4de75SMichael Baum  * @param[in] update
96404a4de75SMichael Baum  *   Update value.
96504a4de75SMichael Baum  * @param[out] error
96604a4de75SMichael Baum  *   Pointer to error structure.
96704a4de75SMichael Baum  *
96804a4de75SMichael Baum  * @return
96904a4de75SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
97004a4de75SMichael Baum  */
97104a4de75SMichael Baum int
97204a4de75SMichael Baum mlx5_hws_age_action_update(struct mlx5_priv *priv, uint32_t idx,
97304a4de75SMichael Baum 			   const void *update, struct rte_flow_error *error)
97404a4de75SMichael Baum {
97504a4de75SMichael Baum 	const struct rte_flow_update_age *update_ade = update;
97604a4de75SMichael Baum 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
97704a4de75SMichael Baum 	struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
97804a4de75SMichael Baum 	struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, idx);
97904a4de75SMichael Baum 	bool sec_since_last_hit_reset = false;
98004a4de75SMichael Baum 	bool state_update = false;
98104a4de75SMichael Baum 
98204a4de75SMichael Baum 	if (param == NULL)
98304a4de75SMichael Baum 		return rte_flow_error_set(error, EINVAL,
98404a4de75SMichael Baum 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
98504a4de75SMichael Baum 					  "invalid AGE parameter index");
98604a4de75SMichael Baum 	if (update_ade->timeout_valid) {
987e12a0166STyler Retzlaff 		uint32_t old_timeout = rte_atomic_exchange_explicit(&param->timeout,
98804a4de75SMichael Baum 							   update_ade->timeout,
989e12a0166STyler Retzlaff 							   rte_memory_order_relaxed);
99004a4de75SMichael Baum 
99104a4de75SMichael Baum 		if (old_timeout == 0)
99204a4de75SMichael Baum 			sec_since_last_hit_reset = true;
99304a4de75SMichael Baum 		else if (old_timeout < update_ade->timeout ||
99404a4de75SMichael Baum 			 update_ade->timeout == 0)
99504a4de75SMichael Baum 			/*
99604a4de75SMichael Baum 			 * When timeout is increased, aged-out flows might be
99704a4de75SMichael Baum 			 * active again and state should be updated accordingly.
99804a4de75SMichael Baum 			 * When new timeout is 0, we update the state for not
99904a4de75SMichael Baum 			 * reporting aged-out stopped.
100004a4de75SMichael Baum 			 */
100104a4de75SMichael Baum 			state_update = true;
100204a4de75SMichael Baum 	}
100304a4de75SMichael Baum 	if (update_ade->touch) {
100404a4de75SMichael Baum 		sec_since_last_hit_reset = true;
100504a4de75SMichael Baum 		state_update = true;
100604a4de75SMichael Baum 	}
100704a4de75SMichael Baum 	if (sec_since_last_hit_reset)
1008e12a0166STyler Retzlaff 		rte_atomic_store_explicit(&param->sec_since_last_hit, 0,
1009e12a0166STyler Retzlaff 				 rte_memory_order_relaxed);
101004a4de75SMichael Baum 	if (state_update) {
101104a4de75SMichael Baum 		uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
101204a4de75SMichael Baum 
101304a4de75SMichael Baum 		/*
101404a4de75SMichael Baum 		 * Change states of aged-out flows to active:
101504a4de75SMichael Baum 		 *  - AGED_OUT_NOT_REPORTED -> CANDIDATE_INSIDE_RING
101604a4de75SMichael Baum 		 *  - AGED_OUT_REPORTED -> CANDIDATE
101704a4de75SMichael Baum 		 */
1018e12a0166STyler Retzlaff 		if (!rte_atomic_compare_exchange_strong_explicit(&param->state, &expected,
101904a4de75SMichael Baum 						 HWS_AGE_CANDIDATE_INSIDE_RING,
1020e12a0166STyler Retzlaff 						 rte_memory_order_relaxed,
1021e12a0166STyler Retzlaff 						 rte_memory_order_relaxed) &&
102204a4de75SMichael Baum 		    expected == HWS_AGE_AGED_OUT_REPORTED)
1023e12a0166STyler Retzlaff 			rte_atomic_store_explicit(&param->state, HWS_AGE_CANDIDATE,
1024e12a0166STyler Retzlaff 					 rte_memory_order_relaxed);
102504a4de75SMichael Baum 	}
102604a4de75SMichael Baum 	return 0;
102704a4de75SMichael Baum }
102804a4de75SMichael Baum 
102904a4de75SMichael Baum /**
103004a4de75SMichael Baum  * Get the AGE context if the aged-out index is still valid.
103104a4de75SMichael Baum  *
103204a4de75SMichael Baum  * @param priv
103304a4de75SMichael Baum  *   Pointer to the port private data structure.
103404a4de75SMichael Baum  * @param idx
103504a4de75SMichael Baum  *   Index of AGE parameter.
103604a4de75SMichael Baum  *
103704a4de75SMichael Baum  * @return
103804a4de75SMichael Baum  *   AGE context if the index is still aged-out, NULL otherwise.
103904a4de75SMichael Baum  */
104004a4de75SMichael Baum void *
104104a4de75SMichael Baum mlx5_hws_age_context_get(struct mlx5_priv *priv, uint32_t idx)
104204a4de75SMichael Baum {
104304a4de75SMichael Baum 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
104404a4de75SMichael Baum 	struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
104504a4de75SMichael Baum 	struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, idx);
104604a4de75SMichael Baum 	uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
104704a4de75SMichael Baum 
104804a4de75SMichael Baum 	MLX5_ASSERT(param != NULL);
1049e12a0166STyler Retzlaff 	if (rte_atomic_compare_exchange_strong_explicit(&param->state, &expected,
1050e12a0166STyler Retzlaff 					HWS_AGE_AGED_OUT_REPORTED,
1051e12a0166STyler Retzlaff 					rte_memory_order_relaxed, rte_memory_order_relaxed))
105204a4de75SMichael Baum 		return param->context;
105304a4de75SMichael Baum 	switch (expected) {
105404a4de75SMichael Baum 	case HWS_AGE_FREE:
105504a4de75SMichael Baum 		/*
105604a4de75SMichael Baum 		 * This AGE couldn't have been destroyed since it was inside
105704a4de75SMichael Baum 		 * the ring. Its state has updated, and now it is actually
105804a4de75SMichael Baum 		 * destroyed.
105904a4de75SMichael Baum 		 */
106004a4de75SMichael Baum 		mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
106104a4de75SMichael Baum 		break;
106204a4de75SMichael Baum 	case HWS_AGE_CANDIDATE_INSIDE_RING:
1063e12a0166STyler Retzlaff 		rte_atomic_store_explicit(&param->state, HWS_AGE_CANDIDATE,
1064e12a0166STyler Retzlaff 				 rte_memory_order_relaxed);
106504a4de75SMichael Baum 		break;
106604a4de75SMichael Baum 	case HWS_AGE_CANDIDATE:
106704a4de75SMichael Baum 		/*
106804a4de75SMichael Baum 		 * Only BG thread pushes to ring and it never pushes this state.
106904a4de75SMichael Baum 		 * When AGE inside the ring becomes candidate, it has a special
107004a4de75SMichael Baum 		 * state called HWS_AGE_CANDIDATE_INSIDE_RING.
107104a4de75SMichael Baum 		 * Fall-through.
107204a4de75SMichael Baum 		 */
107304a4de75SMichael Baum 	case HWS_AGE_AGED_OUT_REPORTED:
107404a4de75SMichael Baum 		/*
107504a4de75SMichael Baum 		 * Only this thread (doing query) may write this state, and it
107604a4de75SMichael Baum 		 * happens only after the query thread takes it out of the ring.
107704a4de75SMichael Baum 		 * Fall-through.
107804a4de75SMichael Baum 		 */
107904a4de75SMichael Baum 	case HWS_AGE_AGED_OUT_NOT_REPORTED:
108004a4de75SMichael Baum 		/*
108104a4de75SMichael Baum 		 * In this case the compare return true and function return
108204a4de75SMichael Baum 		 * the context immediately.
108304a4de75SMichael Baum 		 * Fall-through.
108404a4de75SMichael Baum 		 */
108504a4de75SMichael Baum 	default:
108604a4de75SMichael Baum 		MLX5_ASSERT(0);
108704a4de75SMichael Baum 		break;
108804a4de75SMichael Baum 	}
108904a4de75SMichael Baum 	return NULL;
109004a4de75SMichael Baum }
109104a4de75SMichael Baum 
109204a4de75SMichael Baum #ifdef RTE_ARCH_64
109304a4de75SMichael Baum #define MLX5_HWS_AGED_OUT_RING_SIZE_MAX UINT32_MAX
109404a4de75SMichael Baum #else
109504a4de75SMichael Baum #define MLX5_HWS_AGED_OUT_RING_SIZE_MAX RTE_BIT32(8)
109604a4de75SMichael Baum #endif
109704a4de75SMichael Baum 
109804a4de75SMichael Baum /**
109904a4de75SMichael Baum  * Get the size of aged out ring list for each queue.
110004a4de75SMichael Baum  *
110104a4de75SMichael Baum  * The size is one percent of nb_counters divided by nb_queues.
110204a4de75SMichael Baum  * The ring size must be power of 2, so it align up to power of 2.
110304a4de75SMichael Baum  * In 32 bit systems, the size is limited by 256.
110404a4de75SMichael Baum  *
110504a4de75SMichael Baum  * This function is called when RTE_FLOW_PORT_FLAG_STRICT_QUEUE is on.
110604a4de75SMichael Baum  *
110704a4de75SMichael Baum  * @param nb_counters
110804a4de75SMichael Baum  *   Final number of allocated counter in the pool.
110904a4de75SMichael Baum  * @param nb_queues
111004a4de75SMichael Baum  *   Number of HWS queues in this port.
111104a4de75SMichael Baum  *
111204a4de75SMichael Baum  * @return
111304a4de75SMichael Baum  *   Size of aged out ring per queue.
111404a4de75SMichael Baum  */
111504a4de75SMichael Baum static __rte_always_inline uint32_t
111604a4de75SMichael Baum mlx5_hws_aged_out_q_ring_size_get(uint32_t nb_counters, uint32_t nb_queues)
111704a4de75SMichael Baum {
111804a4de75SMichael Baum 	uint32_t size = rte_align32pow2((nb_counters / 100) / nb_queues);
111904a4de75SMichael Baum 	uint32_t max_size = MLX5_HWS_AGED_OUT_RING_SIZE_MAX;
112004a4de75SMichael Baum 
112104a4de75SMichael Baum 	return RTE_MIN(size, max_size);
112204a4de75SMichael Baum }
112304a4de75SMichael Baum 
112404a4de75SMichael Baum /**
112504a4de75SMichael Baum  * Get the size of the aged out ring list.
112604a4de75SMichael Baum  *
112704a4de75SMichael Baum  * The size is one percent of nb_counters.
112804a4de75SMichael Baum  * The ring size must be power of 2, so it align up to power of 2.
112904a4de75SMichael Baum  * In 32 bit systems, the size is limited by 256.
113004a4de75SMichael Baum  *
113104a4de75SMichael Baum  * This function is called when RTE_FLOW_PORT_FLAG_STRICT_QUEUE is off.
113204a4de75SMichael Baum  *
113304a4de75SMichael Baum  * @param nb_counters
113404a4de75SMichael Baum  *   Final number of allocated counter in the pool.
113504a4de75SMichael Baum  *
113604a4de75SMichael Baum  * @return
113704a4de75SMichael Baum  *   Size of the aged out ring list.
113804a4de75SMichael Baum  */
113904a4de75SMichael Baum static __rte_always_inline uint32_t
114004a4de75SMichael Baum mlx5_hws_aged_out_ring_size_get(uint32_t nb_counters)
114104a4de75SMichael Baum {
114204a4de75SMichael Baum 	uint32_t size = rte_align32pow2(nb_counters / 100);
114304a4de75SMichael Baum 	uint32_t max_size = MLX5_HWS_AGED_OUT_RING_SIZE_MAX;
114404a4de75SMichael Baum 
114504a4de75SMichael Baum 	return RTE_MIN(size, max_size);
114604a4de75SMichael Baum }
114704a4de75SMichael Baum 
114804a4de75SMichael Baum /**
114904a4de75SMichael Baum  * Initialize the shared aging list information per port.
115004a4de75SMichael Baum  *
115104a4de75SMichael Baum  * @param dev
115204a4de75SMichael Baum  *   Pointer to the rte_eth_dev structure.
115304a4de75SMichael Baum  * @param nb_queues
115404a4de75SMichael Baum  *   Number of HWS queues.
115504a4de75SMichael Baum  * @param strict_queue
115604a4de75SMichael Baum  *   Indicator whether is strict_queue mode.
115704a4de75SMichael Baum  * @param ring_size
115804a4de75SMichael Baum  *   Size of aged-out ring for creation.
115904a4de75SMichael Baum  *
116004a4de75SMichael Baum  * @return
116104a4de75SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
116204a4de75SMichael Baum  */
116304a4de75SMichael Baum static int
116404a4de75SMichael Baum mlx5_hws_age_info_init(struct rte_eth_dev *dev, uint16_t nb_queues,
116504a4de75SMichael Baum 		       bool strict_queue, uint32_t ring_size)
116604a4de75SMichael Baum {
116704a4de75SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
116804a4de75SMichael Baum 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
116904a4de75SMichael Baum 	uint32_t flags = RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ;
117004a4de75SMichael Baum 	char mz_name[RTE_MEMZONE_NAMESIZE];
117104a4de75SMichael Baum 	struct rte_ring *r = NULL;
117204a4de75SMichael Baum 	uint32_t qidx;
117304a4de75SMichael Baum 
117404a4de75SMichael Baum 	age_info->flags = 0;
117504a4de75SMichael Baum 	if (strict_queue) {
117604a4de75SMichael Baum 		size_t size = sizeof(*age_info->hw_q_age) +
117704a4de75SMichael Baum 			      sizeof(struct rte_ring *) * nb_queues;
117804a4de75SMichael Baum 
117904a4de75SMichael Baum 		age_info->hw_q_age = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO,
118004a4de75SMichael Baum 						 size, 0, SOCKET_ID_ANY);
118104a4de75SMichael Baum 		if (age_info->hw_q_age == NULL)
118204a4de75SMichael Baum 			return -ENOMEM;
118304a4de75SMichael Baum 		for (qidx = 0; qidx < nb_queues; ++qidx) {
118404a4de75SMichael Baum 			snprintf(mz_name, sizeof(mz_name),
118504a4de75SMichael Baum 				 "port_%u_queue_%u_aged_out_ring",
118604a4de75SMichael Baum 				 dev->data->port_id, qidx);
118704a4de75SMichael Baum 			r = rte_ring_create(mz_name, ring_size, SOCKET_ID_ANY,
118804a4de75SMichael Baum 					    flags);
118904a4de75SMichael Baum 			if (r == NULL) {
119004a4de75SMichael Baum 				DRV_LOG(ERR, "\"%s\" creation failed: %s",
119104a4de75SMichael Baum 					mz_name, rte_strerror(rte_errno));
119204a4de75SMichael Baum 				goto error;
119304a4de75SMichael Baum 			}
119404a4de75SMichael Baum 			age_info->hw_q_age->aged_lists[qidx] = r;
119504a4de75SMichael Baum 			DRV_LOG(DEBUG,
119604a4de75SMichael Baum 				"\"%s\" is successfully created (size=%u).",
119704a4de75SMichael Baum 				mz_name, ring_size);
119804a4de75SMichael Baum 		}
119904a4de75SMichael Baum 		age_info->hw_q_age->nb_rings = nb_queues;
120004a4de75SMichael Baum 	} else {
120104a4de75SMichael Baum 		snprintf(mz_name, sizeof(mz_name), "port_%u_aged_out_ring",
120204a4de75SMichael Baum 			 dev->data->port_id);
120304a4de75SMichael Baum 		r = rte_ring_create(mz_name, ring_size, SOCKET_ID_ANY, flags);
120404a4de75SMichael Baum 		if (r == NULL) {
120504a4de75SMichael Baum 			DRV_LOG(ERR, "\"%s\" creation failed: %s", mz_name,
120604a4de75SMichael Baum 				rte_strerror(rte_errno));
120704a4de75SMichael Baum 			return -rte_errno;
120804a4de75SMichael Baum 		}
120904a4de75SMichael Baum 		age_info->hw_age.aged_list = r;
121004a4de75SMichael Baum 		DRV_LOG(DEBUG, "\"%s\" is successfully created (size=%u).",
121104a4de75SMichael Baum 			mz_name, ring_size);
121204a4de75SMichael Baum 		/* In non "strict_queue" mode, initialize the event. */
121304a4de75SMichael Baum 		MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
121404a4de75SMichael Baum 	}
121504a4de75SMichael Baum 	return 0;
121604a4de75SMichael Baum error:
121704a4de75SMichael Baum 	MLX5_ASSERT(strict_queue);
121804a4de75SMichael Baum 	while (qidx--)
121904a4de75SMichael Baum 		rte_ring_free(age_info->hw_q_age->aged_lists[qidx]);
122004a4de75SMichael Baum 	mlx5_free(age_info->hw_q_age);
122104a4de75SMichael Baum 	return -1;
122204a4de75SMichael Baum }
122304a4de75SMichael Baum 
122404a4de75SMichael Baum /**
122504a4de75SMichael Baum  * Cleanup aged-out ring before destroying.
122604a4de75SMichael Baum  *
122704a4de75SMichael Baum  * @param priv
122804a4de75SMichael Baum  *   Pointer to port private object.
122904a4de75SMichael Baum  * @param r
123004a4de75SMichael Baum  *   Pointer to aged-out ring object.
123104a4de75SMichael Baum  */
123204a4de75SMichael Baum static void
123304a4de75SMichael Baum mlx5_hws_aged_out_ring_cleanup(struct mlx5_priv *priv, struct rte_ring *r)
123404a4de75SMichael Baum {
123504a4de75SMichael Baum 	int ring_size = rte_ring_count(r);
123604a4de75SMichael Baum 
123704a4de75SMichael Baum 	while (ring_size > 0) {
123804a4de75SMichael Baum 		uint32_t age_idx = 0;
123904a4de75SMichael Baum 
124004a4de75SMichael Baum 		if (rte_ring_dequeue_elem(r, &age_idx, sizeof(uint32_t)) < 0)
124104a4de75SMichael Baum 			break;
124204a4de75SMichael Baum 		/* get the AGE context if the aged-out index is still valid. */
124304a4de75SMichael Baum 		mlx5_hws_age_context_get(priv, age_idx);
124404a4de75SMichael Baum 		ring_size--;
124504a4de75SMichael Baum 	}
124604a4de75SMichael Baum 	rte_ring_free(r);
124704a4de75SMichael Baum }
124804a4de75SMichael Baum 
124904a4de75SMichael Baum /**
125004a4de75SMichael Baum  * Destroy the shared aging list information per port.
125104a4de75SMichael Baum  *
125204a4de75SMichael Baum  * @param priv
125304a4de75SMichael Baum  *   Pointer to port private object.
125404a4de75SMichael Baum  */
125504a4de75SMichael Baum static void
125604a4de75SMichael Baum mlx5_hws_age_info_destroy(struct mlx5_priv *priv)
125704a4de75SMichael Baum {
125804a4de75SMichael Baum 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
125904a4de75SMichael Baum 	uint16_t nb_queues = age_info->hw_q_age->nb_rings;
126004a4de75SMichael Baum 	struct rte_ring *r;
126104a4de75SMichael Baum 
126204a4de75SMichael Baum 	if (priv->hws_strict_queue) {
126304a4de75SMichael Baum 		uint32_t qidx;
126404a4de75SMichael Baum 
126504a4de75SMichael Baum 		for (qidx = 0; qidx < nb_queues; ++qidx) {
126604a4de75SMichael Baum 			r = age_info->hw_q_age->aged_lists[qidx];
126704a4de75SMichael Baum 			mlx5_hws_aged_out_ring_cleanup(priv, r);
126804a4de75SMichael Baum 		}
126904a4de75SMichael Baum 		mlx5_free(age_info->hw_q_age);
127004a4de75SMichael Baum 	} else {
127104a4de75SMichael Baum 		r = age_info->hw_age.aged_list;
127204a4de75SMichael Baum 		mlx5_hws_aged_out_ring_cleanup(priv, r);
127304a4de75SMichael Baum 	}
127404a4de75SMichael Baum }
127504a4de75SMichael Baum 
127604a4de75SMichael Baum /**
127704a4de75SMichael Baum  * Initialize the aging mechanism per port.
127804a4de75SMichael Baum  *
127904a4de75SMichael Baum  * @param dev
128004a4de75SMichael Baum  *   Pointer to the rte_eth_dev structure.
128104a4de75SMichael Baum  * @param attr
128204a4de75SMichael Baum  *   Port configuration attributes.
128304a4de75SMichael Baum  * @param nb_queues
128404a4de75SMichael Baum  *   Number of HWS queues.
128504a4de75SMichael Baum  *
128604a4de75SMichael Baum  * @return
128704a4de75SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
128804a4de75SMichael Baum  */
128904a4de75SMichael Baum int
129004a4de75SMichael Baum mlx5_hws_age_pool_init(struct rte_eth_dev *dev,
1291e1c83d29SMaayan Kashani 		       uint32_t nb_aging_objects,
1292e1c83d29SMaayan Kashani 		       uint16_t nb_queues,
1293e1c83d29SMaayan Kashani 		       bool strict_queue)
129404a4de75SMichael Baum {
129504a4de75SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
129604a4de75SMichael Baum 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
129704a4de75SMichael Baum 	struct mlx5_indexed_pool_config cfg = {
129804a4de75SMichael Baum 		.size =
129904a4de75SMichael Baum 		      RTE_CACHE_LINE_ROUNDUP(sizeof(struct mlx5_hws_age_param)),
130004a4de75SMichael Baum 		.trunk_size = 1 << 12,
130104a4de75SMichael Baum 		.per_core_cache = 1 << 13,
130204a4de75SMichael Baum 		.need_lock = 1,
130304a4de75SMichael Baum 		.release_mem_en = !!priv->sh->config.reclaim_mode,
130404a4de75SMichael Baum 		.malloc = mlx5_malloc,
130504a4de75SMichael Baum 		.free = mlx5_free,
130604a4de75SMichael Baum 		.type = "mlx5_hws_age_pool",
130704a4de75SMichael Baum 	};
130804a4de75SMichael Baum 	uint32_t nb_alloc_cnts;
130904a4de75SMichael Baum 	uint32_t rsize;
131004a4de75SMichael Baum 	uint32_t nb_ages_updated;
131104a4de75SMichael Baum 	int ret;
131204a4de75SMichael Baum 
131304a4de75SMichael Baum 	MLX5_ASSERT(priv->hws_cpool);
131404a4de75SMichael Baum 	nb_alloc_cnts = mlx5_hws_cnt_pool_get_size(priv->hws_cpool);
131504a4de75SMichael Baum 	if (strict_queue) {
131604a4de75SMichael Baum 		rsize = mlx5_hws_aged_out_q_ring_size_get(nb_alloc_cnts,
131704a4de75SMichael Baum 							  nb_queues);
1318e1c83d29SMaayan Kashani 		nb_ages_updated = rsize * nb_queues + nb_aging_objects;
131904a4de75SMichael Baum 	} else {
132004a4de75SMichael Baum 		rsize = mlx5_hws_aged_out_ring_size_get(nb_alloc_cnts);
1321e1c83d29SMaayan Kashani 		nb_ages_updated = rsize + nb_aging_objects;
132204a4de75SMichael Baum 	}
132304a4de75SMichael Baum 	ret = mlx5_hws_age_info_init(dev, nb_queues, strict_queue, rsize);
132404a4de75SMichael Baum 	if (ret < 0)
132504a4de75SMichael Baum 		return ret;
132604a4de75SMichael Baum 	cfg.max_idx = rte_align32pow2(nb_ages_updated);
132704a4de75SMichael Baum 	if (cfg.max_idx <= cfg.trunk_size) {
132804a4de75SMichael Baum 		cfg.per_core_cache = 0;
132904a4de75SMichael Baum 		cfg.trunk_size = cfg.max_idx;
133004a4de75SMichael Baum 	} else if (cfg.max_idx <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
133104a4de75SMichael Baum 		cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
133204a4de75SMichael Baum 	}
133304a4de75SMichael Baum 	age_info->ages_ipool = mlx5_ipool_create(&cfg);
133404a4de75SMichael Baum 	if (age_info->ages_ipool == NULL) {
133504a4de75SMichael Baum 		mlx5_hws_age_info_destroy(priv);
133604a4de75SMichael Baum 		rte_errno = ENOMEM;
133704a4de75SMichael Baum 		return -rte_errno;
133804a4de75SMichael Baum 	}
133904a4de75SMichael Baum 	priv->hws_age_req = 1;
134004a4de75SMichael Baum 	return 0;
134104a4de75SMichael Baum }
134204a4de75SMichael Baum 
134304a4de75SMichael Baum /**
134404a4de75SMichael Baum  * Cleanup all aging resources per port.
134504a4de75SMichael Baum  *
134604a4de75SMichael Baum  * @param priv
134704a4de75SMichael Baum  *   Pointer to port private object.
134804a4de75SMichael Baum  */
134904a4de75SMichael Baum void
135004a4de75SMichael Baum mlx5_hws_age_pool_destroy(struct mlx5_priv *priv)
135104a4de75SMichael Baum {
135204a4de75SMichael Baum 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
135304a4de75SMichael Baum 
13546ac2104aSSuanming Mou 	rte_spinlock_lock(&priv->sh->cpool_lock);
135504a4de75SMichael Baum 	MLX5_ASSERT(priv->hws_age_req);
135604a4de75SMichael Baum 	mlx5_hws_age_info_destroy(priv);
135704a4de75SMichael Baum 	mlx5_ipool_destroy(age_info->ages_ipool);
135804a4de75SMichael Baum 	age_info->ages_ipool = NULL;
135904a4de75SMichael Baum 	priv->hws_age_req = 0;
13606ac2104aSSuanming Mou 	rte_spinlock_unlock(&priv->sh->cpool_lock);
136104a4de75SMichael Baum }
136204a4de75SMichael Baum 
13634d368e1dSXiaoyu Min #endif
1364