xref: /dpdk/drivers/net/mlx5/mlx5_flow_quota.c (revision e12a0166c80f65e35408f4715b2f3a60763c3741)
115896eafSGregory Etelson /* SPDX-License-Identifier: BSD-3-Clause
215896eafSGregory Etelson  * Copyright(c) 2022 Nvidia Inc. All rights reserved.
315896eafSGregory Etelson  */
415896eafSGregory Etelson 
515896eafSGregory Etelson #include <stddef.h>
615896eafSGregory Etelson #include <rte_eal_paging.h>
715896eafSGregory Etelson 
815896eafSGregory Etelson #include "mlx5_utils.h"
915896eafSGregory Etelson #include "mlx5_flow.h"
1015896eafSGregory Etelson 
1115896eafSGregory Etelson #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
1215896eafSGregory Etelson 
1315896eafSGregory Etelson typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
1415896eafSGregory Etelson 				struct mlx5_quota_ctx *, uint32_t, uint32_t,
1515896eafSGregory Etelson 				void *);
1615896eafSGregory Etelson 
1715896eafSGregory Etelson #define MLX5_ASO_MTR1_INIT_MASK 0xffffffffULL
1815896eafSGregory Etelson #define MLX5_ASO_MTR0_INIT_MASK ((MLX5_ASO_MTR1_INIT_MASK) << 32)
1915896eafSGregory Etelson 
2015896eafSGregory Etelson static __rte_always_inline bool
is_aso_mtr1_obj(uint32_t qix)2115896eafSGregory Etelson is_aso_mtr1_obj(uint32_t qix)
2215896eafSGregory Etelson {
2315896eafSGregory Etelson 	return (qix & 1) != 0;
2415896eafSGregory Etelson }
2515896eafSGregory Etelson 
2615896eafSGregory Etelson static __rte_always_inline bool
is_quota_sync_queue(const struct mlx5_priv * priv,uint32_t queue)2715896eafSGregory Etelson is_quota_sync_queue(const struct mlx5_priv *priv, uint32_t queue)
2815896eafSGregory Etelson {
2915896eafSGregory Etelson 	return queue >= priv->nb_queue - 1;
3015896eafSGregory Etelson }
3115896eafSGregory Etelson 
3215896eafSGregory Etelson static __rte_always_inline uint32_t
quota_sync_queue(const struct mlx5_priv * priv)3315896eafSGregory Etelson quota_sync_queue(const struct mlx5_priv *priv)
3415896eafSGregory Etelson {
3515896eafSGregory Etelson 	return priv->nb_queue - 1;
3615896eafSGregory Etelson }
3715896eafSGregory Etelson 
3815896eafSGregory Etelson static __rte_always_inline uint32_t
mlx5_quota_wqe_read_offset(uint32_t qix,uint32_t sq_index)3915896eafSGregory Etelson mlx5_quota_wqe_read_offset(uint32_t qix, uint32_t sq_index)
4015896eafSGregory Etelson {
4115896eafSGregory Etelson 	return 2 * sq_index + (qix & 1);
4215896eafSGregory Etelson }
4315896eafSGregory Etelson 
4415896eafSGregory Etelson static int32_t
mlx5_quota_fetch_tokens(const struct mlx5_aso_mtr_dseg * rd_buf)4515896eafSGregory Etelson mlx5_quota_fetch_tokens(const struct mlx5_aso_mtr_dseg *rd_buf)
4615896eafSGregory Etelson {
4715896eafSGregory Etelson 	int c_tok = (int)rte_be_to_cpu_32(rd_buf->c_tokens);
4815896eafSGregory Etelson 	int e_tok = (int)rte_be_to_cpu_32(rd_buf->e_tokens);
4915896eafSGregory Etelson 	int result;
5015896eafSGregory Etelson 
5115896eafSGregory Etelson 	DRV_LOG(DEBUG, "c_tokens %d e_tokens %d\n",
5215896eafSGregory Etelson 		rte_be_to_cpu_32(rd_buf->c_tokens),
5315896eafSGregory Etelson 		rte_be_to_cpu_32(rd_buf->e_tokens));
5415896eafSGregory Etelson 	/* Query after SET ignores negative E tokens */
5515896eafSGregory Etelson 	if (c_tok >= 0 && e_tok < 0)
5615896eafSGregory Etelson 		result = c_tok;
5715896eafSGregory Etelson 	/**
5815896eafSGregory Etelson 	 * If number of tokens in Meter bucket is zero or above,
5915896eafSGregory Etelson 	 * Meter hardware will use that bucket and can set number of tokens to
6015896eafSGregory Etelson 	 * negative value.
6115896eafSGregory Etelson 	 * Quota can discard negative C tokens in query report.
6215896eafSGregory Etelson 	 * That is a known hardware limitation.
6315896eafSGregory Etelson 	 * Use case example:
6415896eafSGregory Etelson 	 *
6515896eafSGregory Etelson 	 *      C     E   Result
6615896eafSGregory Etelson 	 *     250   250   500
6715896eafSGregory Etelson 	 *      50   250   300
6815896eafSGregory Etelson 	 *    -150   250   100
6915896eafSGregory Etelson 	 *    -150    50    50 *
7015896eafSGregory Etelson 	 *    -150  -150  -300
7115896eafSGregory Etelson 	 *
7215896eafSGregory Etelson 	 */
7315896eafSGregory Etelson 	else if (c_tok < 0 && e_tok >= 0 && (c_tok + e_tok) < 0)
7415896eafSGregory Etelson 		result = e_tok;
7515896eafSGregory Etelson 	else
7615896eafSGregory Etelson 		result = c_tok + e_tok;
7715896eafSGregory Etelson 
7815896eafSGregory Etelson 	return result;
7915896eafSGregory Etelson }
8015896eafSGregory Etelson 
8115896eafSGregory Etelson static void
mlx5_quota_query_update_async_cmpl(struct mlx5_hw_q_job * job)8215896eafSGregory Etelson mlx5_quota_query_update_async_cmpl(struct mlx5_hw_q_job *job)
8315896eafSGregory Etelson {
8415896eafSGregory Etelson 	struct rte_flow_query_quota *query = job->query.user;
8515896eafSGregory Etelson 
8615896eafSGregory Etelson 	query->quota = mlx5_quota_fetch_tokens(job->query.hw);
8715896eafSGregory Etelson }
8815896eafSGregory Etelson 
8915896eafSGregory Etelson void
mlx5_quota_async_completion(struct rte_eth_dev * dev,uint32_t queue,struct mlx5_hw_q_job * job)9015896eafSGregory Etelson mlx5_quota_async_completion(struct rte_eth_dev *dev, uint32_t queue,
9115896eafSGregory Etelson 			    struct mlx5_hw_q_job *job)
9215896eafSGregory Etelson {
9315896eafSGregory Etelson 	struct mlx5_priv *priv = dev->data->dev_private;
9415896eafSGregory Etelson 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
9515896eafSGregory Etelson 	uint32_t qix = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
9615896eafSGregory Etelson 	struct mlx5_quota *qobj = mlx5_ipool_get(qctx->quota_ipool, qix);
9715896eafSGregory Etelson 
9815896eafSGregory Etelson 	RTE_SET_USED(queue);
9915896eafSGregory Etelson 	qobj->state = MLX5_QUOTA_STATE_READY;
10015896eafSGregory Etelson 	switch (job->type) {
10115896eafSGregory Etelson 	case MLX5_HW_Q_JOB_TYPE_CREATE:
10215896eafSGregory Etelson 		break;
10315896eafSGregory Etelson 	case MLX5_HW_Q_JOB_TYPE_QUERY:
10415896eafSGregory Etelson 	case MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY:
10515896eafSGregory Etelson 		mlx5_quota_query_update_async_cmpl(job);
10615896eafSGregory Etelson 		break;
10715896eafSGregory Etelson 	default:
10815896eafSGregory Etelson 		break;
10915896eafSGregory Etelson 	}
11015896eafSGregory Etelson }
11115896eafSGregory Etelson 
11215896eafSGregory Etelson static __rte_always_inline void
mlx5_quota_wqe_set_aso_read(volatile struct mlx5_aso_wqe * restrict wqe,struct mlx5_quota_ctx * qctx,uint32_t queue)11315896eafSGregory Etelson mlx5_quota_wqe_set_aso_read(volatile struct mlx5_aso_wqe *restrict wqe,
11415896eafSGregory Etelson 			    struct mlx5_quota_ctx *qctx, uint32_t queue)
11515896eafSGregory Etelson {
11615896eafSGregory Etelson 	struct mlx5_aso_sq *sq = qctx->sq + queue;
11715896eafSGregory Etelson 	uint32_t sq_mask = (1 << sq->log_desc_n) - 1;
11815896eafSGregory Etelson 	uint32_t sq_head = sq->head & sq_mask;
11915896eafSGregory Etelson 	uint64_t rd_addr = (uint64_t)(qctx->read_buf[queue] + 2 * sq_head);
12015896eafSGregory Etelson 
12115896eafSGregory Etelson 	wqe->aso_cseg.lkey = rte_cpu_to_be_32(qctx->mr.lkey);
12215896eafSGregory Etelson 	wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(rd_addr >> 32));
12315896eafSGregory Etelson 	wqe->aso_cseg.va_l_r = rte_cpu_to_be_32(((uint32_t)rd_addr) |
12415896eafSGregory Etelson 						MLX5_ASO_CSEG_READ_ENABLE);
12515896eafSGregory Etelson }
12615896eafSGregory Etelson 
12715896eafSGregory Etelson #define MLX5_ASO_MTR1_ADD_MASK 0x00000F00ULL
12815896eafSGregory Etelson #define MLX5_ASO_MTR1_SET_MASK 0x000F0F00ULL
12915896eafSGregory Etelson #define MLX5_ASO_MTR0_ADD_MASK ((MLX5_ASO_MTR1_ADD_MASK) << 32)
13015896eafSGregory Etelson #define MLX5_ASO_MTR0_SET_MASK ((MLX5_ASO_MTR1_SET_MASK) << 32)
13115896eafSGregory Etelson 
13215896eafSGregory Etelson static __rte_always_inline void
mlx5_quota_wqe_set_mtr_tokens(volatile struct mlx5_aso_wqe * restrict wqe,uint32_t qix,void * arg)13315896eafSGregory Etelson mlx5_quota_wqe_set_mtr_tokens(volatile struct mlx5_aso_wqe *restrict wqe,
13415896eafSGregory Etelson 			      uint32_t qix, void *arg)
13515896eafSGregory Etelson {
13615896eafSGregory Etelson 	volatile struct mlx5_aso_mtr_dseg *mtr_dseg;
13715896eafSGregory Etelson 	const struct rte_flow_update_quota *conf = arg;
13815896eafSGregory Etelson 	bool set_op = (conf->op == RTE_FLOW_UPDATE_QUOTA_SET);
13915896eafSGregory Etelson 
14015896eafSGregory Etelson 	if (is_aso_mtr1_obj(qix)) {
14115896eafSGregory Etelson 		wqe->aso_cseg.data_mask = set_op ?
14215896eafSGregory Etelson 					  RTE_BE64(MLX5_ASO_MTR1_SET_MASK) :
14315896eafSGregory Etelson 					  RTE_BE64(MLX5_ASO_MTR1_ADD_MASK);
14415896eafSGregory Etelson 		mtr_dseg = wqe->aso_dseg.mtrs + 1;
14515896eafSGregory Etelson 	} else {
14615896eafSGregory Etelson 		wqe->aso_cseg.data_mask = set_op ?
14715896eafSGregory Etelson 					  RTE_BE64(MLX5_ASO_MTR0_SET_MASK) :
14815896eafSGregory Etelson 					  RTE_BE64(MLX5_ASO_MTR0_ADD_MASK);
14915896eafSGregory Etelson 		mtr_dseg = wqe->aso_dseg.mtrs;
15015896eafSGregory Etelson 	}
15115896eafSGregory Etelson 	if (set_op) {
15215896eafSGregory Etelson 		/* prevent using E tokens when C tokens exhausted */
15315896eafSGregory Etelson 		mtr_dseg->e_tokens = -1;
15415896eafSGregory Etelson 		mtr_dseg->c_tokens = rte_cpu_to_be_32(conf->quota);
15515896eafSGregory Etelson 	} else {
15615896eafSGregory Etelson 		mtr_dseg->e_tokens = rte_cpu_to_be_32(conf->quota);
15715896eafSGregory Etelson 	}
15815896eafSGregory Etelson }
15915896eafSGregory Etelson 
16015896eafSGregory Etelson static __rte_always_inline void
mlx5_quota_wqe_query(volatile struct mlx5_aso_wqe * restrict wqe,struct mlx5_quota_ctx * qctx,__rte_unused uint32_t qix,uint32_t queue,__rte_unused void * arg)16115896eafSGregory Etelson mlx5_quota_wqe_query(volatile struct mlx5_aso_wqe *restrict wqe,
16215896eafSGregory Etelson 		     struct mlx5_quota_ctx *qctx, __rte_unused uint32_t qix,
16315896eafSGregory Etelson 		     uint32_t queue, __rte_unused void *arg)
16415896eafSGregory Etelson {
16515896eafSGregory Etelson 	mlx5_quota_wqe_set_aso_read(wqe, qctx, queue);
16615896eafSGregory Etelson 	wqe->aso_cseg.data_mask = 0ull; /* clear MTR ASO data modification */
16715896eafSGregory Etelson }
16815896eafSGregory Etelson 
16915896eafSGregory Etelson static __rte_always_inline void
mlx5_quota_wqe_update(volatile struct mlx5_aso_wqe * restrict wqe,__rte_unused struct mlx5_quota_ctx * qctx,uint32_t qix,__rte_unused uint32_t queue,void * arg)17015896eafSGregory Etelson mlx5_quota_wqe_update(volatile struct mlx5_aso_wqe *restrict wqe,
17115896eafSGregory Etelson 		      __rte_unused struct mlx5_quota_ctx *qctx, uint32_t qix,
17215896eafSGregory Etelson 		      __rte_unused uint32_t queue, void *arg)
17315896eafSGregory Etelson {
17415896eafSGregory Etelson 	mlx5_quota_wqe_set_mtr_tokens(wqe, qix, arg);
17515896eafSGregory Etelson 	wqe->aso_cseg.va_l_r = 0; /* clear READ flag */
17615896eafSGregory Etelson }
17715896eafSGregory Etelson 
17815896eafSGregory Etelson static __rte_always_inline void
mlx5_quota_wqe_query_update(volatile struct mlx5_aso_wqe * restrict wqe,struct mlx5_quota_ctx * qctx,uint32_t qix,uint32_t queue,void * arg)17915896eafSGregory Etelson mlx5_quota_wqe_query_update(volatile struct mlx5_aso_wqe *restrict wqe,
18015896eafSGregory Etelson 			    struct mlx5_quota_ctx *qctx, uint32_t qix,
18115896eafSGregory Etelson 			    uint32_t queue, void *arg)
18215896eafSGregory Etelson {
18315896eafSGregory Etelson 	mlx5_quota_wqe_set_aso_read(wqe, qctx, queue);
18415896eafSGregory Etelson 	mlx5_quota_wqe_set_mtr_tokens(wqe, qix, arg);
18515896eafSGregory Etelson }
18615896eafSGregory Etelson 
18715896eafSGregory Etelson static __rte_always_inline void
mlx5_quota_set_init_wqe(volatile struct mlx5_aso_wqe * restrict wqe,__rte_unused struct mlx5_quota_ctx * qctx,uint32_t qix,__rte_unused uint32_t queue,void * arg)18815896eafSGregory Etelson mlx5_quota_set_init_wqe(volatile struct mlx5_aso_wqe *restrict wqe,
18915896eafSGregory Etelson 			__rte_unused struct mlx5_quota_ctx *qctx, uint32_t qix,
19015896eafSGregory Etelson 			__rte_unused uint32_t queue, void *arg)
19115896eafSGregory Etelson {
19215896eafSGregory Etelson 	volatile struct mlx5_aso_mtr_dseg *mtr_dseg;
19315896eafSGregory Etelson 	const struct rte_flow_action_quota *conf = arg;
19415896eafSGregory Etelson 	const struct mlx5_quota *qobj = mlx5_ipool_get(qctx->quota_ipool, qix + 1);
19515896eafSGregory Etelson 
19615896eafSGregory Etelson 	if (is_aso_mtr1_obj(qix)) {
19715896eafSGregory Etelson 		wqe->aso_cseg.data_mask =
19815896eafSGregory Etelson 			rte_cpu_to_be_64(MLX5_ASO_MTR1_INIT_MASK);
19915896eafSGregory Etelson 		mtr_dseg = wqe->aso_dseg.mtrs + 1;
20015896eafSGregory Etelson 	} else {
20115896eafSGregory Etelson 		wqe->aso_cseg.data_mask =
20215896eafSGregory Etelson 			rte_cpu_to_be_64(MLX5_ASO_MTR0_INIT_MASK);
20315896eafSGregory Etelson 		mtr_dseg = wqe->aso_dseg.mtrs;
20415896eafSGregory Etelson 	}
20515896eafSGregory Etelson 	mtr_dseg->e_tokens = -1;
20615896eafSGregory Etelson 	mtr_dseg->c_tokens = rte_cpu_to_be_32(conf->quota);
20715896eafSGregory Etelson 	mtr_dseg->v_bo_sc_bbog_mm |= rte_cpu_to_be_32
20815896eafSGregory Etelson 		(qobj->mode << ASO_DSEG_MTR_MODE);
20915896eafSGregory Etelson }
21015896eafSGregory Etelson 
21115896eafSGregory Etelson static __rte_always_inline void
mlx5_quota_cmd_completed_status(struct mlx5_aso_sq * sq,uint16_t n)21215896eafSGregory Etelson mlx5_quota_cmd_completed_status(struct mlx5_aso_sq *sq, uint16_t n)
21315896eafSGregory Etelson {
21415896eafSGregory Etelson 	uint16_t i, mask = (1 << sq->log_desc_n) - 1;
21515896eafSGregory Etelson 
21615896eafSGregory Etelson 	for (i = 0; i < n; i++) {
21715896eafSGregory Etelson 		uint8_t state = MLX5_QUOTA_STATE_WAIT;
21815896eafSGregory Etelson 		struct mlx5_quota *quota_obj =
21915896eafSGregory Etelson 			sq->elts[(sq->tail + i) & mask].quota_obj;
22015896eafSGregory Etelson 
221*e12a0166STyler Retzlaff 		rte_atomic_compare_exchange_strong_explicit(&quota_obj->state, &state,
222*e12a0166STyler Retzlaff 					    MLX5_QUOTA_STATE_READY,
223*e12a0166STyler Retzlaff 					    rte_memory_order_relaxed, rte_memory_order_relaxed);
22415896eafSGregory Etelson 	}
22515896eafSGregory Etelson }
22615896eafSGregory Etelson 
22715896eafSGregory Etelson static void
mlx5_quota_cmd_completion_handle(struct mlx5_aso_sq * sq)22815896eafSGregory Etelson mlx5_quota_cmd_completion_handle(struct mlx5_aso_sq *sq)
22915896eafSGregory Etelson {
23015896eafSGregory Etelson 	struct mlx5_aso_cq *cq = &sq->cq;
23115896eafSGregory Etelson 	volatile struct mlx5_cqe *restrict cqe;
23215896eafSGregory Etelson 	const unsigned int cq_size = 1 << cq->log_desc_n;
23315896eafSGregory Etelson 	const unsigned int mask = cq_size - 1;
23415896eafSGregory Etelson 	uint32_t idx;
23515896eafSGregory Etelson 	uint32_t next_idx = cq->cq_ci & mask;
23615896eafSGregory Etelson 	uint16_t max;
23715896eafSGregory Etelson 	uint16_t n = 0;
23815896eafSGregory Etelson 	int ret;
23915896eafSGregory Etelson 
24015896eafSGregory Etelson 	MLX5_ASSERT(rte_spinlock_is_locked(&sq->sqsl));
24115896eafSGregory Etelson 	max = (uint16_t)(sq->head - sq->tail);
24215896eafSGregory Etelson 	if (unlikely(!max))
24315896eafSGregory Etelson 		return;
24415896eafSGregory Etelson 	do {
24515896eafSGregory Etelson 		idx = next_idx;
24615896eafSGregory Etelson 		next_idx = (cq->cq_ci + 1) & mask;
24715896eafSGregory Etelson 		rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
24815896eafSGregory Etelson 		cqe = &cq->cq_obj.cqes[idx];
24915896eafSGregory Etelson 		ret = check_cqe(cqe, cq_size, cq->cq_ci);
25015896eafSGregory Etelson 		/*
25115896eafSGregory Etelson 		 * Be sure owner read is done before any other cookie field or
25215896eafSGregory Etelson 		 * opaque field.
25315896eafSGregory Etelson 		 */
25415896eafSGregory Etelson 		rte_io_rmb();
25515896eafSGregory Etelson 		if (ret != MLX5_CQE_STATUS_SW_OWN) {
25615896eafSGregory Etelson 			if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
25715896eafSGregory Etelson 				break;
25815896eafSGregory Etelson 			mlx5_aso_cqe_err_handle(sq);
25915896eafSGregory Etelson 		} else {
26015896eafSGregory Etelson 			n++;
26115896eafSGregory Etelson 		}
26215896eafSGregory Etelson 		cq->cq_ci++;
26315896eafSGregory Etelson 	} while (1);
26415896eafSGregory Etelson 	if (likely(n)) {
26515896eafSGregory Etelson 		mlx5_quota_cmd_completed_status(sq, n);
26615896eafSGregory Etelson 		sq->tail += n;
26715896eafSGregory Etelson 		rte_io_wmb();
26815896eafSGregory Etelson 		cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
26915896eafSGregory Etelson 	}
27015896eafSGregory Etelson }
27115896eafSGregory Etelson 
27215896eafSGregory Etelson static int
mlx5_quota_cmd_wait_cmpl(struct mlx5_aso_sq * sq,struct mlx5_quota * quota_obj)27315896eafSGregory Etelson mlx5_quota_cmd_wait_cmpl(struct mlx5_aso_sq *sq, struct mlx5_quota *quota_obj)
27415896eafSGregory Etelson {
27515896eafSGregory Etelson 	uint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
27615896eafSGregory Etelson 
27715896eafSGregory Etelson 	do {
27815896eafSGregory Etelson 		rte_spinlock_lock(&sq->sqsl);
27915896eafSGregory Etelson 		mlx5_quota_cmd_completion_handle(sq);
28015896eafSGregory Etelson 		rte_spinlock_unlock(&sq->sqsl);
281*e12a0166STyler Retzlaff 		if (rte_atomic_load_explicit(&quota_obj->state, rte_memory_order_relaxed) ==
28215896eafSGregory Etelson 		    MLX5_QUOTA_STATE_READY)
28315896eafSGregory Etelson 			return 0;
28415896eafSGregory Etelson 	} while (poll_cqe_times -= MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
28515896eafSGregory Etelson 	DRV_LOG(ERR, "QUOTA: failed to poll command CQ");
28615896eafSGregory Etelson 	return -1;
28715896eafSGregory Etelson }
28815896eafSGregory Etelson 
28915896eafSGregory Etelson static int
mlx5_quota_cmd_wqe(struct rte_eth_dev * dev,struct mlx5_quota * quota_obj,quota_wqe_cmd_t wqe_cmd,uint32_t qix,uint32_t queue,struct mlx5_hw_q_job * job,bool push,void * arg)29015896eafSGregory Etelson mlx5_quota_cmd_wqe(struct rte_eth_dev *dev, struct mlx5_quota *quota_obj,
29115896eafSGregory Etelson 		   quota_wqe_cmd_t wqe_cmd, uint32_t qix, uint32_t queue,
29215896eafSGregory Etelson 		   struct mlx5_hw_q_job *job, bool push, void *arg)
29315896eafSGregory Etelson {
29415896eafSGregory Etelson 	struct mlx5_priv *priv = dev->data->dev_private;
29515896eafSGregory Etelson 	struct mlx5_dev_ctx_shared *sh = priv->sh;
29615896eafSGregory Etelson 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
29715896eafSGregory Etelson 	struct mlx5_aso_sq *sq = qctx->sq + queue;
29815896eafSGregory Etelson 	uint32_t head, sq_mask = (1 << sq->log_desc_n) - 1;
29915896eafSGregory Etelson 	bool sync_queue = is_quota_sync_queue(priv, queue);
30015896eafSGregory Etelson 	volatile struct mlx5_aso_wqe *restrict wqe;
30115896eafSGregory Etelson 	int ret = 0;
30215896eafSGregory Etelson 
30315896eafSGregory Etelson 	if (sync_queue)
30415896eafSGregory Etelson 		rte_spinlock_lock(&sq->sqsl);
30515896eafSGregory Etelson 	head = sq->head & sq_mask;
30615896eafSGregory Etelson 	wqe = &sq->sq_obj.aso_wqes[head];
30715896eafSGregory Etelson 	wqe_cmd(wqe, qctx, qix, queue, arg);
30815896eafSGregory Etelson 	wqe->general_cseg.misc = rte_cpu_to_be_32(qctx->devx_obj->id + (qix >> 1));
30915896eafSGregory Etelson 	wqe->general_cseg.opcode = rte_cpu_to_be_32
31015896eafSGregory Etelson 		(ASO_OPC_MOD_POLICER << WQE_CSEG_OPC_MOD_OFFSET |
31115896eafSGregory Etelson 		 sq->pi << WQE_CSEG_WQE_INDEX_OFFSET | MLX5_OPCODE_ACCESS_ASO);
31215896eafSGregory Etelson 	sq->head++;
31315896eafSGregory Etelson 	sq->pi += 2; /* Each WQE contains 2 WQEBB */
31415896eafSGregory Etelson 	if (push) {
31515896eafSGregory Etelson 		mlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,
31615896eafSGregory Etelson 				   sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],
31715896eafSGregory Etelson 				   !sh->tx_uar.dbnc);
31815896eafSGregory Etelson 		sq->db_pi = sq->pi;
31915896eafSGregory Etelson 	}
32015896eafSGregory Etelson 	sq->db = wqe;
32115896eafSGregory Etelson 	job->query.hw = qctx->read_buf[queue] +
32215896eafSGregory Etelson 			mlx5_quota_wqe_read_offset(qix, head);
32315896eafSGregory Etelson 	sq->elts[head].quota_obj = sync_queue ?
32415896eafSGregory Etelson 				   quota_obj : (typeof(quota_obj))job;
32515896eafSGregory Etelson 	if (sync_queue) {
32615896eafSGregory Etelson 		rte_spinlock_unlock(&sq->sqsl);
32715896eafSGregory Etelson 		ret = mlx5_quota_cmd_wait_cmpl(sq, quota_obj);
32815896eafSGregory Etelson 	}
32915896eafSGregory Etelson 	return ret;
33015896eafSGregory Etelson }
33115896eafSGregory Etelson 
33215896eafSGregory Etelson static void
mlx5_quota_destroy_sq(struct mlx5_priv * priv)33315896eafSGregory Etelson mlx5_quota_destroy_sq(struct mlx5_priv *priv)
33415896eafSGregory Etelson {
33515896eafSGregory Etelson 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
33615896eafSGregory Etelson 	uint32_t i, nb_queues = priv->nb_queue;
33715896eafSGregory Etelson 
33815896eafSGregory Etelson 	if (!qctx->sq)
33915896eafSGregory Etelson 		return;
34015896eafSGregory Etelson 	for (i = 0; i < nb_queues; i++)
34115896eafSGregory Etelson 		mlx5_aso_destroy_sq(qctx->sq + i);
34215896eafSGregory Etelson 	mlx5_free(qctx->sq);
34315896eafSGregory Etelson }
34415896eafSGregory Etelson 
34515896eafSGregory Etelson static __rte_always_inline void
mlx5_quota_wqe_init_common(struct mlx5_aso_sq * sq,volatile struct mlx5_aso_wqe * restrict wqe)34615896eafSGregory Etelson mlx5_quota_wqe_init_common(struct mlx5_aso_sq *sq,
34715896eafSGregory Etelson 			   volatile struct mlx5_aso_wqe *restrict wqe)
34815896eafSGregory Etelson {
34915896eafSGregory Etelson #define ASO_MTR_DW0 RTE_BE32(1 << ASO_DSEG_VALID_OFFSET                  | \
35015896eafSGregory Etelson 			     MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET)
35115896eafSGregory Etelson 
35215896eafSGregory Etelson 	memset((void *)(uintptr_t)wqe, 0, sizeof(*wqe));
35315896eafSGregory Etelson 	wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
35415896eafSGregory Etelson 						   (sizeof(*wqe) >> 4));
35515896eafSGregory Etelson 	wqe->aso_cseg.operand_masks = RTE_BE32
35615896eafSGregory Etelson 	(0u | (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
35715896eafSGregory Etelson 	 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
35815896eafSGregory Etelson 	 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
35915896eafSGregory Etelson 	 (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
36015896eafSGregory Etelson 	wqe->general_cseg.flags = RTE_BE32
36115896eafSGregory Etelson 	(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET);
36215896eafSGregory Etelson 	wqe->aso_dseg.mtrs[0].v_bo_sc_bbog_mm = ASO_MTR_DW0;
36315896eafSGregory Etelson 	/**
36415896eafSGregory Etelson 	 * ASO Meter tokens auto-update must be disabled in quota action.
36515896eafSGregory Etelson 	 * Tokens auto-update is disabled when Meter when *IR values set to
36615896eafSGregory Etelson 	 * ((0x1u << 16) | (0x1Eu << 24)) **NOT** 0x00
36715896eafSGregory Etelson 	 */
36815896eafSGregory Etelson 	wqe->aso_dseg.mtrs[0].cbs_cir = RTE_BE32((0x1u << 16) | (0x1Eu << 24));
36915896eafSGregory Etelson 	wqe->aso_dseg.mtrs[0].ebs_eir = RTE_BE32((0x1u << 16) | (0x1Eu << 24));
37015896eafSGregory Etelson 	wqe->aso_dseg.mtrs[1].v_bo_sc_bbog_mm = ASO_MTR_DW0;
37115896eafSGregory Etelson 	wqe->aso_dseg.mtrs[1].cbs_cir = RTE_BE32((0x1u << 16) | (0x1Eu << 24));
37215896eafSGregory Etelson 	wqe->aso_dseg.mtrs[1].ebs_eir = RTE_BE32((0x1u << 16) | (0x1Eu << 24));
37315896eafSGregory Etelson #undef ASO_MTR_DW0
37415896eafSGregory Etelson }
37515896eafSGregory Etelson 
37615896eafSGregory Etelson static void
mlx5_quota_init_sq(struct mlx5_aso_sq * sq)37715896eafSGregory Etelson mlx5_quota_init_sq(struct mlx5_aso_sq *sq)
37815896eafSGregory Etelson {
37915896eafSGregory Etelson 	uint32_t i, size = 1 << sq->log_desc_n;
38015896eafSGregory Etelson 
38115896eafSGregory Etelson 	for (i = 0; i < size; i++)
38215896eafSGregory Etelson 		mlx5_quota_wqe_init_common(sq, sq->sq_obj.aso_wqes + i);
38315896eafSGregory Etelson }
38415896eafSGregory Etelson 
38515896eafSGregory Etelson static int
mlx5_quota_alloc_sq(struct mlx5_priv * priv)38615896eafSGregory Etelson mlx5_quota_alloc_sq(struct mlx5_priv *priv)
38715896eafSGregory Etelson {
38815896eafSGregory Etelson 	struct mlx5_dev_ctx_shared *sh = priv->sh;
38915896eafSGregory Etelson 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
39015896eafSGregory Etelson 	uint32_t i, nb_queues = priv->nb_queue;
39115896eafSGregory Etelson 
39215896eafSGregory Etelson 	qctx->sq = mlx5_malloc(MLX5_MEM_ZERO,
39315896eafSGregory Etelson 			       sizeof(qctx->sq[0]) * nb_queues,
39415896eafSGregory Etelson 			       0, SOCKET_ID_ANY);
39515896eafSGregory Etelson 	if (!qctx->sq) {
39615896eafSGregory Etelson 		DRV_LOG(DEBUG, "QUOTA: failed to allocate SQ pool");
39715896eafSGregory Etelson 		return -ENOMEM;
39815896eafSGregory Etelson 	}
39915896eafSGregory Etelson 	for (i = 0; i < nb_queues; i++) {
40015896eafSGregory Etelson 		int ret = mlx5_aso_sq_create
40115896eafSGregory Etelson 				(sh->cdev, qctx->sq + i, sh->tx_uar.obj,
40215896eafSGregory Etelson 				 rte_log2_u32(priv->hw_q[i].size));
40315896eafSGregory Etelson 		if (ret) {
40415896eafSGregory Etelson 			DRV_LOG(DEBUG, "QUOTA: failed to allocate SQ[%u]", i);
40515896eafSGregory Etelson 			return -ENOMEM;
40615896eafSGregory Etelson 		}
40715896eafSGregory Etelson 		mlx5_quota_init_sq(qctx->sq + i);
40815896eafSGregory Etelson 	}
40915896eafSGregory Etelson 	return 0;
41015896eafSGregory Etelson }
41115896eafSGregory Etelson 
41215896eafSGregory Etelson static void
mlx5_quota_destroy_read_buf(struct mlx5_priv * priv)41315896eafSGregory Etelson mlx5_quota_destroy_read_buf(struct mlx5_priv *priv)
41415896eafSGregory Etelson {
41515896eafSGregory Etelson 	struct mlx5_dev_ctx_shared *sh = priv->sh;
41615896eafSGregory Etelson 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
41715896eafSGregory Etelson 
41815896eafSGregory Etelson 	if (qctx->mr.lkey) {
41915896eafSGregory Etelson 		void *addr = qctx->mr.addr;
42015896eafSGregory Etelson 		sh->cdev->mr_scache.dereg_mr_cb(&qctx->mr);
42115896eafSGregory Etelson 		mlx5_free(addr);
42215896eafSGregory Etelson 	}
42315896eafSGregory Etelson 	if (qctx->read_buf)
42415896eafSGregory Etelson 		mlx5_free(qctx->read_buf);
42515896eafSGregory Etelson }
42615896eafSGregory Etelson 
42715896eafSGregory Etelson static int
mlx5_quota_alloc_read_buf(struct mlx5_priv * priv)42815896eafSGregory Etelson mlx5_quota_alloc_read_buf(struct mlx5_priv *priv)
42915896eafSGregory Etelson {
43015896eafSGregory Etelson 	struct mlx5_dev_ctx_shared *sh = priv->sh;
43115896eafSGregory Etelson 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
43215896eafSGregory Etelson 	uint32_t i, nb_queues = priv->nb_queue;
43315896eafSGregory Etelson 	uint32_t sq_size_sum;
43415896eafSGregory Etelson 	size_t page_size = rte_mem_page_size();
43515896eafSGregory Etelson 	struct mlx5_aso_mtr_dseg *buf;
43615896eafSGregory Etelson 	size_t rd_buf_size;
43715896eafSGregory Etelson 	int ret;
43815896eafSGregory Etelson 
43915896eafSGregory Etelson 	for (i = 0, sq_size_sum = 0; i < nb_queues; i++)
44015896eafSGregory Etelson 		sq_size_sum += priv->hw_q[i].size;
44115896eafSGregory Etelson 	/* ACCESS MTR ASO WQE reads 2 MTR objects */
44215896eafSGregory Etelson 	rd_buf_size = 2 * sq_size_sum * sizeof(buf[0]);
44315896eafSGregory Etelson 	buf = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, rd_buf_size,
44415896eafSGregory Etelson 			  page_size, SOCKET_ID_ANY);
44515896eafSGregory Etelson 	if (!buf) {
44615896eafSGregory Etelson 		DRV_LOG(DEBUG, "QUOTA: failed to allocate MTR ASO READ buffer [1]");
44715896eafSGregory Etelson 		return -ENOMEM;
44815896eafSGregory Etelson 	}
44915896eafSGregory Etelson 	ret = sh->cdev->mr_scache.reg_mr_cb(sh->cdev->pd, buf,
45015896eafSGregory Etelson 					    rd_buf_size, &qctx->mr);
45115896eafSGregory Etelson 	if (ret) {
45215896eafSGregory Etelson 		DRV_LOG(DEBUG, "QUOTA: failed to register MTR ASO READ MR");
45315896eafSGregory Etelson 		return -errno;
45415896eafSGregory Etelson 	}
45515896eafSGregory Etelson 	qctx->read_buf = mlx5_malloc(MLX5_MEM_ZERO,
45615896eafSGregory Etelson 				     sizeof(qctx->read_buf[0]) * nb_queues,
45715896eafSGregory Etelson 				     0, SOCKET_ID_ANY);
45815896eafSGregory Etelson 	if (!qctx->read_buf) {
45915896eafSGregory Etelson 		DRV_LOG(DEBUG, "QUOTA: failed to allocate MTR ASO READ buffer [2]");
46015896eafSGregory Etelson 		return -ENOMEM;
46115896eafSGregory Etelson 	}
46215896eafSGregory Etelson 	for (i = 0; i < nb_queues; i++) {
46315896eafSGregory Etelson 		qctx->read_buf[i] = buf;
46415896eafSGregory Etelson 		buf += 2 * priv->hw_q[i].size;
46515896eafSGregory Etelson 	}
46615896eafSGregory Etelson 	return 0;
46715896eafSGregory Etelson }
46815896eafSGregory Etelson 
46915896eafSGregory Etelson static __rte_always_inline int
mlx5_quota_check_ready(struct mlx5_quota * qobj,struct rte_flow_error * error)47015896eafSGregory Etelson mlx5_quota_check_ready(struct mlx5_quota *qobj, struct rte_flow_error *error)
47115896eafSGregory Etelson {
47215896eafSGregory Etelson 	uint8_t state = MLX5_QUOTA_STATE_READY;
473*e12a0166STyler Retzlaff 	bool verdict = rte_atomic_compare_exchange_strong_explicit
474*e12a0166STyler Retzlaff 		(&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
475*e12a0166STyler Retzlaff 		 rte_memory_order_relaxed, rte_memory_order_relaxed);
47615896eafSGregory Etelson 
47715896eafSGregory Etelson 	if (!verdict)
47815896eafSGregory Etelson 		return rte_flow_error_set(error, EBUSY,
47915896eafSGregory Etelson 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL, "action is busy");
48015896eafSGregory Etelson 	return 0;
48115896eafSGregory Etelson }
48215896eafSGregory Etelson 
48315896eafSGregory Etelson int
mlx5_quota_query(struct rte_eth_dev * dev,uint32_t queue,const struct rte_flow_action_handle * handle,struct rte_flow_query_quota * query,struct mlx5_hw_q_job * async_job,bool push,struct rte_flow_error * error)48415896eafSGregory Etelson mlx5_quota_query(struct rte_eth_dev *dev, uint32_t queue,
48515896eafSGregory Etelson 		 const struct rte_flow_action_handle *handle,
48615896eafSGregory Etelson 		 struct rte_flow_query_quota *query,
48715896eafSGregory Etelson 		 struct mlx5_hw_q_job *async_job, bool push,
48815896eafSGregory Etelson 		 struct rte_flow_error *error)
48915896eafSGregory Etelson {
49015896eafSGregory Etelson 	struct mlx5_priv *priv = dev->data->dev_private;
49115896eafSGregory Etelson 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
49215896eafSGregory Etelson 	uint32_t work_queue = !is_quota_sync_queue(priv, queue) ?
49315896eafSGregory Etelson 			      queue : quota_sync_queue(priv);
49415896eafSGregory Etelson 	uint32_t id = MLX5_INDIRECT_ACTION_IDX_GET(handle);
49515896eafSGregory Etelson 	uint32_t qix = id - 1;
49615896eafSGregory Etelson 	struct mlx5_quota *qobj = mlx5_ipool_get(qctx->quota_ipool, id);
49715896eafSGregory Etelson 	struct mlx5_hw_q_job sync_job;
49815896eafSGregory Etelson 	int ret;
49915896eafSGregory Etelson 
50015896eafSGregory Etelson 	if (!qobj)
50115896eafSGregory Etelson 		return rte_flow_error_set(error, EINVAL,
50215896eafSGregory Etelson 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
50315896eafSGregory Etelson 					  "invalid query handle");
50415896eafSGregory Etelson 	ret = mlx5_quota_check_ready(qobj, error);
50515896eafSGregory Etelson 	if (ret)
50615896eafSGregory Etelson 		return ret;
50715896eafSGregory Etelson 	ret = mlx5_quota_cmd_wqe(dev, qobj, mlx5_quota_wqe_query, qix, work_queue,
50815896eafSGregory Etelson 				 async_job ? async_job : &sync_job, push, NULL);
50915896eafSGregory Etelson 	if (ret) {
510*e12a0166STyler Retzlaff 		rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
511*e12a0166STyler Retzlaff 				 rte_memory_order_relaxed);
51215896eafSGregory Etelson 		return rte_flow_error_set(error, EAGAIN,
51315896eafSGregory Etelson 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
51415896eafSGregory Etelson 	}
51515896eafSGregory Etelson 	if (is_quota_sync_queue(priv, queue))
51615896eafSGregory Etelson 		query->quota = mlx5_quota_fetch_tokens(sync_job.query.hw);
51715896eafSGregory Etelson 	return 0;
51815896eafSGregory Etelson }
51915896eafSGregory Etelson 
52015896eafSGregory Etelson int
mlx5_quota_query_update(struct rte_eth_dev * dev,uint32_t queue,struct rte_flow_action_handle * handle,const struct rte_flow_action * update,struct rte_flow_query_quota * query,struct mlx5_hw_q_job * async_job,bool push,struct rte_flow_error * error)52115896eafSGregory Etelson mlx5_quota_query_update(struct rte_eth_dev *dev, uint32_t queue,
52215896eafSGregory Etelson 			struct rte_flow_action_handle *handle,
52315896eafSGregory Etelson 			const struct rte_flow_action *update,
52415896eafSGregory Etelson 			struct rte_flow_query_quota *query,
52515896eafSGregory Etelson 			struct mlx5_hw_q_job *async_job, bool push,
52615896eafSGregory Etelson 			struct rte_flow_error *error)
52715896eafSGregory Etelson {
52815896eafSGregory Etelson 	struct mlx5_priv *priv = dev->data->dev_private;
52915896eafSGregory Etelson 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
53015896eafSGregory Etelson 	const struct rte_flow_update_quota *conf = update->conf;
53115896eafSGregory Etelson 	uint32_t work_queue = !is_quota_sync_queue(priv, queue) ?
53215896eafSGregory Etelson 			       queue : quota_sync_queue(priv);
53315896eafSGregory Etelson 	uint32_t id = MLX5_INDIRECT_ACTION_IDX_GET(handle);
53415896eafSGregory Etelson 	uint32_t qix = id - 1;
53515896eafSGregory Etelson 	struct mlx5_quota *qobj = mlx5_ipool_get(qctx->quota_ipool, id);
53615896eafSGregory Etelson 	struct mlx5_hw_q_job sync_job;
53715896eafSGregory Etelson 	quota_wqe_cmd_t wqe_cmd = query ?
53815896eafSGregory Etelson 				  mlx5_quota_wqe_query_update :
53915896eafSGregory Etelson 				  mlx5_quota_wqe_update;
54015896eafSGregory Etelson 	int ret;
54115896eafSGregory Etelson 
54215896eafSGregory Etelson 	if (conf->quota > MLX5_MTR_MAX_TOKEN_VALUE)
54315896eafSGregory Etelson 		return rte_flow_error_set(error, E2BIG,
54415896eafSGregory Etelson 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL, "update value too big");
54515896eafSGregory Etelson 	if (!qobj)
54615896eafSGregory Etelson 		return rte_flow_error_set(error, EINVAL,
54715896eafSGregory Etelson 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
54815896eafSGregory Etelson 					  "invalid query_update handle");
54915896eafSGregory Etelson 	if (conf->op == RTE_FLOW_UPDATE_QUOTA_ADD &&
55015896eafSGregory Etelson 	    qobj->last_update == RTE_FLOW_UPDATE_QUOTA_ADD)
55115896eafSGregory Etelson 		return rte_flow_error_set(error, EINVAL,
55215896eafSGregory Etelson 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL, "cannot add twice");
55315896eafSGregory Etelson 	ret = mlx5_quota_check_ready(qobj, error);
55415896eafSGregory Etelson 	if (ret)
55515896eafSGregory Etelson 		return ret;
55615896eafSGregory Etelson 	ret = mlx5_quota_cmd_wqe(dev, qobj, wqe_cmd, qix, work_queue,
55715896eafSGregory Etelson 				 async_job ? async_job : &sync_job, push,
55815896eafSGregory Etelson 				 (void *)(uintptr_t)update->conf);
55915896eafSGregory Etelson 	if (ret) {
560*e12a0166STyler Retzlaff 		rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
561*e12a0166STyler Retzlaff 				 rte_memory_order_relaxed);
56215896eafSGregory Etelson 		return rte_flow_error_set(error, EAGAIN,
56315896eafSGregory Etelson 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
56415896eafSGregory Etelson 	}
56515896eafSGregory Etelson 	qobj->last_update = conf->op;
56615896eafSGregory Etelson 	if (query && is_quota_sync_queue(priv, queue))
56715896eafSGregory Etelson 		query->quota = mlx5_quota_fetch_tokens(sync_job.query.hw);
56815896eafSGregory Etelson 	return 0;
56915896eafSGregory Etelson }
57015896eafSGregory Etelson 
57115896eafSGregory Etelson struct rte_flow_action_handle *
mlx5_quota_alloc(struct rte_eth_dev * dev,uint32_t queue,const struct rte_flow_action_quota * conf,struct mlx5_hw_q_job * job,bool push,struct rte_flow_error * error)57215896eafSGregory Etelson mlx5_quota_alloc(struct rte_eth_dev *dev, uint32_t queue,
57315896eafSGregory Etelson 		 const struct rte_flow_action_quota *conf,
57415896eafSGregory Etelson 		 struct mlx5_hw_q_job *job, bool push,
57515896eafSGregory Etelson 		 struct rte_flow_error *error)
57615896eafSGregory Etelson {
57715896eafSGregory Etelson 	struct mlx5_priv *priv = dev->data->dev_private;
57815896eafSGregory Etelson 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
57915896eafSGregory Etelson 	uint32_t id;
58015896eafSGregory Etelson 	struct mlx5_quota *qobj;
58115896eafSGregory Etelson 	uintptr_t handle = (uintptr_t)MLX5_INDIRECT_ACTION_TYPE_QUOTA <<
58215896eafSGregory Etelson 			   MLX5_INDIRECT_ACTION_TYPE_OFFSET;
58315896eafSGregory Etelson 	uint32_t work_queue = !is_quota_sync_queue(priv, queue) ?
58415896eafSGregory Etelson 			      queue : quota_sync_queue(priv);
58515896eafSGregory Etelson 	struct mlx5_hw_q_job sync_job;
58615896eafSGregory Etelson 	uint8_t state = MLX5_QUOTA_STATE_FREE;
58715896eafSGregory Etelson 	bool verdict;
58815896eafSGregory Etelson 	int ret;
58915896eafSGregory Etelson 
59015896eafSGregory Etelson 	qobj = mlx5_ipool_malloc(qctx->quota_ipool, &id);
59115896eafSGregory Etelson 	if (!qobj) {
59215896eafSGregory Etelson 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
59315896eafSGregory Etelson 				   NULL, "quota: failed to allocate quota object");
59415896eafSGregory Etelson 		return NULL;
59515896eafSGregory Etelson 	}
596*e12a0166STyler Retzlaff 	verdict = rte_atomic_compare_exchange_strong_explicit
597*e12a0166STyler Retzlaff 		(&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
598*e12a0166STyler Retzlaff 		 rte_memory_order_relaxed, rte_memory_order_relaxed);
59915896eafSGregory Etelson 	if (!verdict) {
60015896eafSGregory Etelson 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
60115896eafSGregory Etelson 				   NULL, "quota: new quota object has invalid state");
60215896eafSGregory Etelson 		return NULL;
60315896eafSGregory Etelson 	}
60415896eafSGregory Etelson 	switch (conf->mode) {
60515896eafSGregory Etelson 	case RTE_FLOW_QUOTA_MODE_L2:
60615896eafSGregory Etelson 		qobj->mode = MLX5_METER_MODE_L2_LEN;
60715896eafSGregory Etelson 		break;
60815896eafSGregory Etelson 	case RTE_FLOW_QUOTA_MODE_PACKET:
60915896eafSGregory Etelson 		qobj->mode = MLX5_METER_MODE_PKT;
61015896eafSGregory Etelson 		break;
61115896eafSGregory Etelson 	default:
61215896eafSGregory Etelson 		qobj->mode = MLX5_METER_MODE_IP_LEN;
61315896eafSGregory Etelson 	}
61415896eafSGregory Etelson 	ret = mlx5_quota_cmd_wqe(dev, qobj, mlx5_quota_set_init_wqe, id - 1,
61515896eafSGregory Etelson 				 work_queue, job ? job : &sync_job, push,
61615896eafSGregory Etelson 				 (void *)(uintptr_t)conf);
61715896eafSGregory Etelson 	if (ret) {
61815896eafSGregory Etelson 		mlx5_ipool_free(qctx->quota_ipool, id);
619*e12a0166STyler Retzlaff 		rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_FREE,
620*e12a0166STyler Retzlaff 				 rte_memory_order_relaxed);
62115896eafSGregory Etelson 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
62215896eafSGregory Etelson 				   NULL, "quota: WR failure");
62315896eafSGregory Etelson 		return 0;
62415896eafSGregory Etelson 	}
62515896eafSGregory Etelson 	return (struct rte_flow_action_handle *)(handle | id);
62615896eafSGregory Etelson }
62715896eafSGregory Etelson 
62815896eafSGregory Etelson int
mlx5_flow_quota_destroy(struct rte_eth_dev * dev)62915896eafSGregory Etelson mlx5_flow_quota_destroy(struct rte_eth_dev *dev)
63015896eafSGregory Etelson {
63115896eafSGregory Etelson 	struct mlx5_priv *priv = dev->data->dev_private;
63215896eafSGregory Etelson 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
63315896eafSGregory Etelson 	int ret;
63415896eafSGregory Etelson 
63515896eafSGregory Etelson 	if (qctx->dr_action) {
63615896eafSGregory Etelson 		ret = mlx5dr_action_destroy(qctx->dr_action);
63715896eafSGregory Etelson 		if (ret)
63815896eafSGregory Etelson 			DRV_LOG(ERR, "QUOTA: failed to destroy DR action");
63915896eafSGregory Etelson 	}
640f92ae446SGregory Etelson 	if (!priv->shared_host) {
641f92ae446SGregory Etelson 		if (qctx->quota_ipool)
642f92ae446SGregory Etelson 			mlx5_ipool_destroy(qctx->quota_ipool);
643f92ae446SGregory Etelson 		mlx5_quota_destroy_sq(priv);
644f92ae446SGregory Etelson 		mlx5_quota_destroy_read_buf(priv);
64515896eafSGregory Etelson 		if (qctx->devx_obj) {
64615896eafSGregory Etelson 			ret = mlx5_devx_cmd_destroy(qctx->devx_obj);
64715896eafSGregory Etelson 			if (ret)
648f92ae446SGregory Etelson 				DRV_LOG(ERR,
649f92ae446SGregory Etelson 					"QUOTA: failed to destroy MTR ASO object");
650f92ae446SGregory Etelson 		}
65115896eafSGregory Etelson 	}
65215896eafSGregory Etelson 	memset(qctx, 0, sizeof(*qctx));
65315896eafSGregory Etelson 	return 0;
65415896eafSGregory Etelson }
65515896eafSGregory Etelson 
65615896eafSGregory Etelson #define MLX5_QUOTA_IPOOL_TRUNK_SIZE (1u << 12)
65715896eafSGregory Etelson #define MLX5_QUOTA_IPOOL_CACHE_SIZE (1u << 13)
658f92ae446SGregory Etelson 
659f92ae446SGregory Etelson static int
mlx5_quota_init_guest(struct mlx5_priv * priv)660f92ae446SGregory Etelson mlx5_quota_init_guest(struct mlx5_priv *priv)
66115896eafSGregory Etelson {
662f92ae446SGregory Etelson 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
663f92ae446SGregory Etelson 	struct rte_eth_dev *host_dev = priv->shared_host;
664f92ae446SGregory Etelson 	struct mlx5_priv *host_priv = host_dev->data->dev_private;
665f92ae446SGregory Etelson 
666f92ae446SGregory Etelson 	/**
667f92ae446SGregory Etelson 	 * Shared quota object can be used in flow rules only.
668f92ae446SGregory Etelson 	 * DR5 flow action needs access to ASO abjects.
669f92ae446SGregory Etelson 	 */
670f92ae446SGregory Etelson 	qctx->devx_obj = host_priv->quota_ctx.devx_obj;
671f92ae446SGregory Etelson 	return 0;
672f92ae446SGregory Etelson }
673f92ae446SGregory Etelson 
674f92ae446SGregory Etelson static int
mlx5_quota_init_host(struct mlx5_priv * priv,uint32_t nb_quotas)675f92ae446SGregory Etelson mlx5_quota_init_host(struct mlx5_priv *priv, uint32_t nb_quotas)
676f92ae446SGregory Etelson {
67715896eafSGregory Etelson 	struct mlx5_dev_ctx_shared *sh = priv->sh;
67815896eafSGregory Etelson 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
67915896eafSGregory Etelson 	struct mlx5_indexed_pool_config quota_ipool_cfg = {
68015896eafSGregory Etelson 		.size = sizeof(struct mlx5_quota),
68115896eafSGregory Etelson 		.trunk_size = RTE_MIN(nb_quotas, MLX5_QUOTA_IPOOL_TRUNK_SIZE),
68215896eafSGregory Etelson 		.need_lock = 1,
68315896eafSGregory Etelson 		.release_mem_en = !!priv->sh->config.reclaim_mode,
68415896eafSGregory Etelson 		.malloc = mlx5_malloc,
68515896eafSGregory Etelson 		.max_idx = nb_quotas,
68615896eafSGregory Etelson 		.free = mlx5_free,
68715896eafSGregory Etelson 		.type = "mlx5_flow_quota_index_pool"
68815896eafSGregory Etelson 	};
68915896eafSGregory Etelson 	int ret;
69015896eafSGregory Etelson 
69115896eafSGregory Etelson 	if (!nb_quotas) {
69215896eafSGregory Etelson 		DRV_LOG(DEBUG, "QUOTA: cannot create quota with 0 objects");
69315896eafSGregory Etelson 		return -EINVAL;
69415896eafSGregory Etelson 	}
69515896eafSGregory Etelson 	if (!priv->mtr_en || !sh->meter_aso_en) {
69615896eafSGregory Etelson 		DRV_LOG(DEBUG, "QUOTA: no MTR support");
69715896eafSGregory Etelson 		return -ENOTSUP;
69815896eafSGregory Etelson 	}
69915896eafSGregory Etelson 	qctx->devx_obj = mlx5_devx_cmd_create_flow_meter_aso_obj
70015896eafSGregory Etelson 		(sh->cdev->ctx, sh->cdev->pdn, rte_log2_u32(nb_quotas >> 1));
70115896eafSGregory Etelson 	if (!qctx->devx_obj) {
70215896eafSGregory Etelson 		DRV_LOG(DEBUG, "QUOTA: cannot allocate MTR ASO objects");
70315896eafSGregory Etelson 		return -ENOMEM;
70415896eafSGregory Etelson 	}
70515896eafSGregory Etelson 	ret = mlx5_quota_alloc_read_buf(priv);
70615896eafSGregory Etelson 	if (ret)
707f92ae446SGregory Etelson 		return ret;
70815896eafSGregory Etelson 	ret = mlx5_quota_alloc_sq(priv);
70915896eafSGregory Etelson 	if (ret)
710f92ae446SGregory Etelson 		return ret;
71115896eafSGregory Etelson 	if (nb_quotas < MLX5_QUOTA_IPOOL_TRUNK_SIZE)
71215896eafSGregory Etelson 		quota_ipool_cfg.per_core_cache = 0;
71315896eafSGregory Etelson 	else if (nb_quotas < MLX5_HW_IPOOL_SIZE_THRESHOLD)
71415896eafSGregory Etelson 		quota_ipool_cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
71515896eafSGregory Etelson 	else
71615896eafSGregory Etelson 		quota_ipool_cfg.per_core_cache = MLX5_QUOTA_IPOOL_CACHE_SIZE;
71715896eafSGregory Etelson 	qctx->quota_ipool = mlx5_ipool_create(&quota_ipool_cfg);
71815896eafSGregory Etelson 	if (!qctx->quota_ipool) {
71915896eafSGregory Etelson 		DRV_LOG(DEBUG, "QUOTA: failed to allocate quota pool");
720f92ae446SGregory Etelson 		return -ENOMEM;
721f92ae446SGregory Etelson 	}
722f92ae446SGregory Etelson 	return 0;
723f92ae446SGregory Etelson }
724f92ae446SGregory Etelson 
725f92ae446SGregory Etelson int
mlx5_flow_quota_init(struct rte_eth_dev * dev,uint32_t nb_quotas)726f92ae446SGregory Etelson mlx5_flow_quota_init(struct rte_eth_dev *dev, uint32_t nb_quotas)
727f92ae446SGregory Etelson {
728f92ae446SGregory Etelson 	struct mlx5_priv *priv = dev->data->dev_private;
729f92ae446SGregory Etelson 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
730f92ae446SGregory Etelson 	uint32_t flags = MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;
731f92ae446SGregory Etelson 	int reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, NULL);
732f92ae446SGregory Etelson 	int ret;
733f92ae446SGregory Etelson 
734f92ae446SGregory Etelson 	if (reg_id < 0) {
735f92ae446SGregory Etelson 		DRV_LOG(DEBUG, "QUOTA: MRT register not available");
736f92ae446SGregory Etelson 		return -ENOTSUP;
737f92ae446SGregory Etelson 	}
738f92ae446SGregory Etelson 	if (!priv->shared_host)
739f92ae446SGregory Etelson 		ret = mlx5_quota_init_host(priv, nb_quotas);
740f92ae446SGregory Etelson 	else
741f92ae446SGregory Etelson 		ret = mlx5_quota_init_guest(priv);
742f92ae446SGregory Etelson 	if (ret)
743f92ae446SGregory Etelson 		goto err;
744f92ae446SGregory Etelson 	if (priv->sh->config.dv_esw_en && priv->master)
745f92ae446SGregory Etelson 		flags |= MLX5DR_ACTION_FLAG_HWS_FDB;
746f92ae446SGregory Etelson 	qctx->dr_action = mlx5dr_action_create_aso_meter
747f92ae446SGregory Etelson 		(priv->dr_ctx, (struct mlx5dr_devx_obj *)qctx->devx_obj,
748f92ae446SGregory Etelson 		 reg_id - REG_C_0, flags);
749f92ae446SGregory Etelson 	if (!qctx->dr_action) {
750f92ae446SGregory Etelson 		DRV_LOG(DEBUG, "QUOTA: failed to create DR action");
75115896eafSGregory Etelson 		ret = -ENOMEM;
75215896eafSGregory Etelson 		goto err;
75315896eafSGregory Etelson 	}
75415896eafSGregory Etelson 	return 0;
75515896eafSGregory Etelson err:
75615896eafSGregory Etelson 	mlx5_flow_quota_destroy(dev);
75715896eafSGregory Etelson 	return ret;
75815896eafSGregory Etelson }
75915896eafSGregory Etelson 
76015896eafSGregory Etelson #endif /* defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) */
761