xref: /dpdk/drivers/net/mlx5/mlx5_flow_quota.c (revision e12a0166c80f65e35408f4715b2f3a60763c3741)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Nvidia Inc. All rights reserved.
3  */
4 
5 #include <stddef.h>
6 #include <rte_eal_paging.h>
7 
8 #include "mlx5_utils.h"
9 #include "mlx5_flow.h"
10 
11 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
12 
13 typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
14 				struct mlx5_quota_ctx *, uint32_t, uint32_t,
15 				void *);
16 
17 #define MLX5_ASO_MTR1_INIT_MASK 0xffffffffULL
18 #define MLX5_ASO_MTR0_INIT_MASK ((MLX5_ASO_MTR1_INIT_MASK) << 32)
19 
20 static __rte_always_inline bool
is_aso_mtr1_obj(uint32_t qix)21 is_aso_mtr1_obj(uint32_t qix)
22 {
23 	return (qix & 1) != 0;
24 }
25 
26 static __rte_always_inline bool
is_quota_sync_queue(const struct mlx5_priv * priv,uint32_t queue)27 is_quota_sync_queue(const struct mlx5_priv *priv, uint32_t queue)
28 {
29 	return queue >= priv->nb_queue - 1;
30 }
31 
32 static __rte_always_inline uint32_t
quota_sync_queue(const struct mlx5_priv * priv)33 quota_sync_queue(const struct mlx5_priv *priv)
34 {
35 	return priv->nb_queue - 1;
36 }
37 
38 static __rte_always_inline uint32_t
mlx5_quota_wqe_read_offset(uint32_t qix,uint32_t sq_index)39 mlx5_quota_wqe_read_offset(uint32_t qix, uint32_t sq_index)
40 {
41 	return 2 * sq_index + (qix & 1);
42 }
43 
44 static int32_t
mlx5_quota_fetch_tokens(const struct mlx5_aso_mtr_dseg * rd_buf)45 mlx5_quota_fetch_tokens(const struct mlx5_aso_mtr_dseg *rd_buf)
46 {
47 	int c_tok = (int)rte_be_to_cpu_32(rd_buf->c_tokens);
48 	int e_tok = (int)rte_be_to_cpu_32(rd_buf->e_tokens);
49 	int result;
50 
51 	DRV_LOG(DEBUG, "c_tokens %d e_tokens %d\n",
52 		rte_be_to_cpu_32(rd_buf->c_tokens),
53 		rte_be_to_cpu_32(rd_buf->e_tokens));
54 	/* Query after SET ignores negative E tokens */
55 	if (c_tok >= 0 && e_tok < 0)
56 		result = c_tok;
57 	/**
58 	 * If number of tokens in Meter bucket is zero or above,
59 	 * Meter hardware will use that bucket and can set number of tokens to
60 	 * negative value.
61 	 * Quota can discard negative C tokens in query report.
62 	 * That is a known hardware limitation.
63 	 * Use case example:
64 	 *
65 	 *      C     E   Result
66 	 *     250   250   500
67 	 *      50   250   300
68 	 *    -150   250   100
69 	 *    -150    50    50 *
70 	 *    -150  -150  -300
71 	 *
72 	 */
73 	else if (c_tok < 0 && e_tok >= 0 && (c_tok + e_tok) < 0)
74 		result = e_tok;
75 	else
76 		result = c_tok + e_tok;
77 
78 	return result;
79 }
80 
81 static void
mlx5_quota_query_update_async_cmpl(struct mlx5_hw_q_job * job)82 mlx5_quota_query_update_async_cmpl(struct mlx5_hw_q_job *job)
83 {
84 	struct rte_flow_query_quota *query = job->query.user;
85 
86 	query->quota = mlx5_quota_fetch_tokens(job->query.hw);
87 }
88 
89 void
mlx5_quota_async_completion(struct rte_eth_dev * dev,uint32_t queue,struct mlx5_hw_q_job * job)90 mlx5_quota_async_completion(struct rte_eth_dev *dev, uint32_t queue,
91 			    struct mlx5_hw_q_job *job)
92 {
93 	struct mlx5_priv *priv = dev->data->dev_private;
94 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
95 	uint32_t qix = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
96 	struct mlx5_quota *qobj = mlx5_ipool_get(qctx->quota_ipool, qix);
97 
98 	RTE_SET_USED(queue);
99 	qobj->state = MLX5_QUOTA_STATE_READY;
100 	switch (job->type) {
101 	case MLX5_HW_Q_JOB_TYPE_CREATE:
102 		break;
103 	case MLX5_HW_Q_JOB_TYPE_QUERY:
104 	case MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY:
105 		mlx5_quota_query_update_async_cmpl(job);
106 		break;
107 	default:
108 		break;
109 	}
110 }
111 
112 static __rte_always_inline void
mlx5_quota_wqe_set_aso_read(volatile struct mlx5_aso_wqe * restrict wqe,struct mlx5_quota_ctx * qctx,uint32_t queue)113 mlx5_quota_wqe_set_aso_read(volatile struct mlx5_aso_wqe *restrict wqe,
114 			    struct mlx5_quota_ctx *qctx, uint32_t queue)
115 {
116 	struct mlx5_aso_sq *sq = qctx->sq + queue;
117 	uint32_t sq_mask = (1 << sq->log_desc_n) - 1;
118 	uint32_t sq_head = sq->head & sq_mask;
119 	uint64_t rd_addr = (uint64_t)(qctx->read_buf[queue] + 2 * sq_head);
120 
121 	wqe->aso_cseg.lkey = rte_cpu_to_be_32(qctx->mr.lkey);
122 	wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(rd_addr >> 32));
123 	wqe->aso_cseg.va_l_r = rte_cpu_to_be_32(((uint32_t)rd_addr) |
124 						MLX5_ASO_CSEG_READ_ENABLE);
125 }
126 
127 #define MLX5_ASO_MTR1_ADD_MASK 0x00000F00ULL
128 #define MLX5_ASO_MTR1_SET_MASK 0x000F0F00ULL
129 #define MLX5_ASO_MTR0_ADD_MASK ((MLX5_ASO_MTR1_ADD_MASK) << 32)
130 #define MLX5_ASO_MTR0_SET_MASK ((MLX5_ASO_MTR1_SET_MASK) << 32)
131 
132 static __rte_always_inline void
mlx5_quota_wqe_set_mtr_tokens(volatile struct mlx5_aso_wqe * restrict wqe,uint32_t qix,void * arg)133 mlx5_quota_wqe_set_mtr_tokens(volatile struct mlx5_aso_wqe *restrict wqe,
134 			      uint32_t qix, void *arg)
135 {
136 	volatile struct mlx5_aso_mtr_dseg *mtr_dseg;
137 	const struct rte_flow_update_quota *conf = arg;
138 	bool set_op = (conf->op == RTE_FLOW_UPDATE_QUOTA_SET);
139 
140 	if (is_aso_mtr1_obj(qix)) {
141 		wqe->aso_cseg.data_mask = set_op ?
142 					  RTE_BE64(MLX5_ASO_MTR1_SET_MASK) :
143 					  RTE_BE64(MLX5_ASO_MTR1_ADD_MASK);
144 		mtr_dseg = wqe->aso_dseg.mtrs + 1;
145 	} else {
146 		wqe->aso_cseg.data_mask = set_op ?
147 					  RTE_BE64(MLX5_ASO_MTR0_SET_MASK) :
148 					  RTE_BE64(MLX5_ASO_MTR0_ADD_MASK);
149 		mtr_dseg = wqe->aso_dseg.mtrs;
150 	}
151 	if (set_op) {
152 		/* prevent using E tokens when C tokens exhausted */
153 		mtr_dseg->e_tokens = -1;
154 		mtr_dseg->c_tokens = rte_cpu_to_be_32(conf->quota);
155 	} else {
156 		mtr_dseg->e_tokens = rte_cpu_to_be_32(conf->quota);
157 	}
158 }
159 
160 static __rte_always_inline void
mlx5_quota_wqe_query(volatile struct mlx5_aso_wqe * restrict wqe,struct mlx5_quota_ctx * qctx,__rte_unused uint32_t qix,uint32_t queue,__rte_unused void * arg)161 mlx5_quota_wqe_query(volatile struct mlx5_aso_wqe *restrict wqe,
162 		     struct mlx5_quota_ctx *qctx, __rte_unused uint32_t qix,
163 		     uint32_t queue, __rte_unused void *arg)
164 {
165 	mlx5_quota_wqe_set_aso_read(wqe, qctx, queue);
166 	wqe->aso_cseg.data_mask = 0ull; /* clear MTR ASO data modification */
167 }
168 
169 static __rte_always_inline void
mlx5_quota_wqe_update(volatile struct mlx5_aso_wqe * restrict wqe,__rte_unused struct mlx5_quota_ctx * qctx,uint32_t qix,__rte_unused uint32_t queue,void * arg)170 mlx5_quota_wqe_update(volatile struct mlx5_aso_wqe *restrict wqe,
171 		      __rte_unused struct mlx5_quota_ctx *qctx, uint32_t qix,
172 		      __rte_unused uint32_t queue, void *arg)
173 {
174 	mlx5_quota_wqe_set_mtr_tokens(wqe, qix, arg);
175 	wqe->aso_cseg.va_l_r = 0; /* clear READ flag */
176 }
177 
178 static __rte_always_inline void
mlx5_quota_wqe_query_update(volatile struct mlx5_aso_wqe * restrict wqe,struct mlx5_quota_ctx * qctx,uint32_t qix,uint32_t queue,void * arg)179 mlx5_quota_wqe_query_update(volatile struct mlx5_aso_wqe *restrict wqe,
180 			    struct mlx5_quota_ctx *qctx, uint32_t qix,
181 			    uint32_t queue, void *arg)
182 {
183 	mlx5_quota_wqe_set_aso_read(wqe, qctx, queue);
184 	mlx5_quota_wqe_set_mtr_tokens(wqe, qix, arg);
185 }
186 
187 static __rte_always_inline void
mlx5_quota_set_init_wqe(volatile struct mlx5_aso_wqe * restrict wqe,__rte_unused struct mlx5_quota_ctx * qctx,uint32_t qix,__rte_unused uint32_t queue,void * arg)188 mlx5_quota_set_init_wqe(volatile struct mlx5_aso_wqe *restrict wqe,
189 			__rte_unused struct mlx5_quota_ctx *qctx, uint32_t qix,
190 			__rte_unused uint32_t queue, void *arg)
191 {
192 	volatile struct mlx5_aso_mtr_dseg *mtr_dseg;
193 	const struct rte_flow_action_quota *conf = arg;
194 	const struct mlx5_quota *qobj = mlx5_ipool_get(qctx->quota_ipool, qix + 1);
195 
196 	if (is_aso_mtr1_obj(qix)) {
197 		wqe->aso_cseg.data_mask =
198 			rte_cpu_to_be_64(MLX5_ASO_MTR1_INIT_MASK);
199 		mtr_dseg = wqe->aso_dseg.mtrs + 1;
200 	} else {
201 		wqe->aso_cseg.data_mask =
202 			rte_cpu_to_be_64(MLX5_ASO_MTR0_INIT_MASK);
203 		mtr_dseg = wqe->aso_dseg.mtrs;
204 	}
205 	mtr_dseg->e_tokens = -1;
206 	mtr_dseg->c_tokens = rte_cpu_to_be_32(conf->quota);
207 	mtr_dseg->v_bo_sc_bbog_mm |= rte_cpu_to_be_32
208 		(qobj->mode << ASO_DSEG_MTR_MODE);
209 }
210 
211 static __rte_always_inline void
mlx5_quota_cmd_completed_status(struct mlx5_aso_sq * sq,uint16_t n)212 mlx5_quota_cmd_completed_status(struct mlx5_aso_sq *sq, uint16_t n)
213 {
214 	uint16_t i, mask = (1 << sq->log_desc_n) - 1;
215 
216 	for (i = 0; i < n; i++) {
217 		uint8_t state = MLX5_QUOTA_STATE_WAIT;
218 		struct mlx5_quota *quota_obj =
219 			sq->elts[(sq->tail + i) & mask].quota_obj;
220 
221 		rte_atomic_compare_exchange_strong_explicit(&quota_obj->state, &state,
222 					    MLX5_QUOTA_STATE_READY,
223 					    rte_memory_order_relaxed, rte_memory_order_relaxed);
224 	}
225 }
226 
227 static void
mlx5_quota_cmd_completion_handle(struct mlx5_aso_sq * sq)228 mlx5_quota_cmd_completion_handle(struct mlx5_aso_sq *sq)
229 {
230 	struct mlx5_aso_cq *cq = &sq->cq;
231 	volatile struct mlx5_cqe *restrict cqe;
232 	const unsigned int cq_size = 1 << cq->log_desc_n;
233 	const unsigned int mask = cq_size - 1;
234 	uint32_t idx;
235 	uint32_t next_idx = cq->cq_ci & mask;
236 	uint16_t max;
237 	uint16_t n = 0;
238 	int ret;
239 
240 	MLX5_ASSERT(rte_spinlock_is_locked(&sq->sqsl));
241 	max = (uint16_t)(sq->head - sq->tail);
242 	if (unlikely(!max))
243 		return;
244 	do {
245 		idx = next_idx;
246 		next_idx = (cq->cq_ci + 1) & mask;
247 		rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
248 		cqe = &cq->cq_obj.cqes[idx];
249 		ret = check_cqe(cqe, cq_size, cq->cq_ci);
250 		/*
251 		 * Be sure owner read is done before any other cookie field or
252 		 * opaque field.
253 		 */
254 		rte_io_rmb();
255 		if (ret != MLX5_CQE_STATUS_SW_OWN) {
256 			if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
257 				break;
258 			mlx5_aso_cqe_err_handle(sq);
259 		} else {
260 			n++;
261 		}
262 		cq->cq_ci++;
263 	} while (1);
264 	if (likely(n)) {
265 		mlx5_quota_cmd_completed_status(sq, n);
266 		sq->tail += n;
267 		rte_io_wmb();
268 		cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
269 	}
270 }
271 
272 static int
mlx5_quota_cmd_wait_cmpl(struct mlx5_aso_sq * sq,struct mlx5_quota * quota_obj)273 mlx5_quota_cmd_wait_cmpl(struct mlx5_aso_sq *sq, struct mlx5_quota *quota_obj)
274 {
275 	uint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
276 
277 	do {
278 		rte_spinlock_lock(&sq->sqsl);
279 		mlx5_quota_cmd_completion_handle(sq);
280 		rte_spinlock_unlock(&sq->sqsl);
281 		if (rte_atomic_load_explicit(&quota_obj->state, rte_memory_order_relaxed) ==
282 		    MLX5_QUOTA_STATE_READY)
283 			return 0;
284 	} while (poll_cqe_times -= MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
285 	DRV_LOG(ERR, "QUOTA: failed to poll command CQ");
286 	return -1;
287 }
288 
289 static int
mlx5_quota_cmd_wqe(struct rte_eth_dev * dev,struct mlx5_quota * quota_obj,quota_wqe_cmd_t wqe_cmd,uint32_t qix,uint32_t queue,struct mlx5_hw_q_job * job,bool push,void * arg)290 mlx5_quota_cmd_wqe(struct rte_eth_dev *dev, struct mlx5_quota *quota_obj,
291 		   quota_wqe_cmd_t wqe_cmd, uint32_t qix, uint32_t queue,
292 		   struct mlx5_hw_q_job *job, bool push, void *arg)
293 {
294 	struct mlx5_priv *priv = dev->data->dev_private;
295 	struct mlx5_dev_ctx_shared *sh = priv->sh;
296 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
297 	struct mlx5_aso_sq *sq = qctx->sq + queue;
298 	uint32_t head, sq_mask = (1 << sq->log_desc_n) - 1;
299 	bool sync_queue = is_quota_sync_queue(priv, queue);
300 	volatile struct mlx5_aso_wqe *restrict wqe;
301 	int ret = 0;
302 
303 	if (sync_queue)
304 		rte_spinlock_lock(&sq->sqsl);
305 	head = sq->head & sq_mask;
306 	wqe = &sq->sq_obj.aso_wqes[head];
307 	wqe_cmd(wqe, qctx, qix, queue, arg);
308 	wqe->general_cseg.misc = rte_cpu_to_be_32(qctx->devx_obj->id + (qix >> 1));
309 	wqe->general_cseg.opcode = rte_cpu_to_be_32
310 		(ASO_OPC_MOD_POLICER << WQE_CSEG_OPC_MOD_OFFSET |
311 		 sq->pi << WQE_CSEG_WQE_INDEX_OFFSET | MLX5_OPCODE_ACCESS_ASO);
312 	sq->head++;
313 	sq->pi += 2; /* Each WQE contains 2 WQEBB */
314 	if (push) {
315 		mlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,
316 				   sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],
317 				   !sh->tx_uar.dbnc);
318 		sq->db_pi = sq->pi;
319 	}
320 	sq->db = wqe;
321 	job->query.hw = qctx->read_buf[queue] +
322 			mlx5_quota_wqe_read_offset(qix, head);
323 	sq->elts[head].quota_obj = sync_queue ?
324 				   quota_obj : (typeof(quota_obj))job;
325 	if (sync_queue) {
326 		rte_spinlock_unlock(&sq->sqsl);
327 		ret = mlx5_quota_cmd_wait_cmpl(sq, quota_obj);
328 	}
329 	return ret;
330 }
331 
332 static void
mlx5_quota_destroy_sq(struct mlx5_priv * priv)333 mlx5_quota_destroy_sq(struct mlx5_priv *priv)
334 {
335 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
336 	uint32_t i, nb_queues = priv->nb_queue;
337 
338 	if (!qctx->sq)
339 		return;
340 	for (i = 0; i < nb_queues; i++)
341 		mlx5_aso_destroy_sq(qctx->sq + i);
342 	mlx5_free(qctx->sq);
343 }
344 
345 static __rte_always_inline void
mlx5_quota_wqe_init_common(struct mlx5_aso_sq * sq,volatile struct mlx5_aso_wqe * restrict wqe)346 mlx5_quota_wqe_init_common(struct mlx5_aso_sq *sq,
347 			   volatile struct mlx5_aso_wqe *restrict wqe)
348 {
349 #define ASO_MTR_DW0 RTE_BE32(1 << ASO_DSEG_VALID_OFFSET                  | \
350 			     MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET)
351 
352 	memset((void *)(uintptr_t)wqe, 0, sizeof(*wqe));
353 	wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
354 						   (sizeof(*wqe) >> 4));
355 	wqe->aso_cseg.operand_masks = RTE_BE32
356 	(0u | (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
357 	 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
358 	 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
359 	 (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
360 	wqe->general_cseg.flags = RTE_BE32
361 	(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET);
362 	wqe->aso_dseg.mtrs[0].v_bo_sc_bbog_mm = ASO_MTR_DW0;
363 	/**
364 	 * ASO Meter tokens auto-update must be disabled in quota action.
365 	 * Tokens auto-update is disabled when Meter when *IR values set to
366 	 * ((0x1u << 16) | (0x1Eu << 24)) **NOT** 0x00
367 	 */
368 	wqe->aso_dseg.mtrs[0].cbs_cir = RTE_BE32((0x1u << 16) | (0x1Eu << 24));
369 	wqe->aso_dseg.mtrs[0].ebs_eir = RTE_BE32((0x1u << 16) | (0x1Eu << 24));
370 	wqe->aso_dseg.mtrs[1].v_bo_sc_bbog_mm = ASO_MTR_DW0;
371 	wqe->aso_dseg.mtrs[1].cbs_cir = RTE_BE32((0x1u << 16) | (0x1Eu << 24));
372 	wqe->aso_dseg.mtrs[1].ebs_eir = RTE_BE32((0x1u << 16) | (0x1Eu << 24));
373 #undef ASO_MTR_DW0
374 }
375 
376 static void
mlx5_quota_init_sq(struct mlx5_aso_sq * sq)377 mlx5_quota_init_sq(struct mlx5_aso_sq *sq)
378 {
379 	uint32_t i, size = 1 << sq->log_desc_n;
380 
381 	for (i = 0; i < size; i++)
382 		mlx5_quota_wqe_init_common(sq, sq->sq_obj.aso_wqes + i);
383 }
384 
385 static int
mlx5_quota_alloc_sq(struct mlx5_priv * priv)386 mlx5_quota_alloc_sq(struct mlx5_priv *priv)
387 {
388 	struct mlx5_dev_ctx_shared *sh = priv->sh;
389 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
390 	uint32_t i, nb_queues = priv->nb_queue;
391 
392 	qctx->sq = mlx5_malloc(MLX5_MEM_ZERO,
393 			       sizeof(qctx->sq[0]) * nb_queues,
394 			       0, SOCKET_ID_ANY);
395 	if (!qctx->sq) {
396 		DRV_LOG(DEBUG, "QUOTA: failed to allocate SQ pool");
397 		return -ENOMEM;
398 	}
399 	for (i = 0; i < nb_queues; i++) {
400 		int ret = mlx5_aso_sq_create
401 				(sh->cdev, qctx->sq + i, sh->tx_uar.obj,
402 				 rte_log2_u32(priv->hw_q[i].size));
403 		if (ret) {
404 			DRV_LOG(DEBUG, "QUOTA: failed to allocate SQ[%u]", i);
405 			return -ENOMEM;
406 		}
407 		mlx5_quota_init_sq(qctx->sq + i);
408 	}
409 	return 0;
410 }
411 
412 static void
mlx5_quota_destroy_read_buf(struct mlx5_priv * priv)413 mlx5_quota_destroy_read_buf(struct mlx5_priv *priv)
414 {
415 	struct mlx5_dev_ctx_shared *sh = priv->sh;
416 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
417 
418 	if (qctx->mr.lkey) {
419 		void *addr = qctx->mr.addr;
420 		sh->cdev->mr_scache.dereg_mr_cb(&qctx->mr);
421 		mlx5_free(addr);
422 	}
423 	if (qctx->read_buf)
424 		mlx5_free(qctx->read_buf);
425 }
426 
427 static int
mlx5_quota_alloc_read_buf(struct mlx5_priv * priv)428 mlx5_quota_alloc_read_buf(struct mlx5_priv *priv)
429 {
430 	struct mlx5_dev_ctx_shared *sh = priv->sh;
431 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
432 	uint32_t i, nb_queues = priv->nb_queue;
433 	uint32_t sq_size_sum;
434 	size_t page_size = rte_mem_page_size();
435 	struct mlx5_aso_mtr_dseg *buf;
436 	size_t rd_buf_size;
437 	int ret;
438 
439 	for (i = 0, sq_size_sum = 0; i < nb_queues; i++)
440 		sq_size_sum += priv->hw_q[i].size;
441 	/* ACCESS MTR ASO WQE reads 2 MTR objects */
442 	rd_buf_size = 2 * sq_size_sum * sizeof(buf[0]);
443 	buf = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, rd_buf_size,
444 			  page_size, SOCKET_ID_ANY);
445 	if (!buf) {
446 		DRV_LOG(DEBUG, "QUOTA: failed to allocate MTR ASO READ buffer [1]");
447 		return -ENOMEM;
448 	}
449 	ret = sh->cdev->mr_scache.reg_mr_cb(sh->cdev->pd, buf,
450 					    rd_buf_size, &qctx->mr);
451 	if (ret) {
452 		DRV_LOG(DEBUG, "QUOTA: failed to register MTR ASO READ MR");
453 		return -errno;
454 	}
455 	qctx->read_buf = mlx5_malloc(MLX5_MEM_ZERO,
456 				     sizeof(qctx->read_buf[0]) * nb_queues,
457 				     0, SOCKET_ID_ANY);
458 	if (!qctx->read_buf) {
459 		DRV_LOG(DEBUG, "QUOTA: failed to allocate MTR ASO READ buffer [2]");
460 		return -ENOMEM;
461 	}
462 	for (i = 0; i < nb_queues; i++) {
463 		qctx->read_buf[i] = buf;
464 		buf += 2 * priv->hw_q[i].size;
465 	}
466 	return 0;
467 }
468 
469 static __rte_always_inline int
mlx5_quota_check_ready(struct mlx5_quota * qobj,struct rte_flow_error * error)470 mlx5_quota_check_ready(struct mlx5_quota *qobj, struct rte_flow_error *error)
471 {
472 	uint8_t state = MLX5_QUOTA_STATE_READY;
473 	bool verdict = rte_atomic_compare_exchange_strong_explicit
474 		(&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
475 		 rte_memory_order_relaxed, rte_memory_order_relaxed);
476 
477 	if (!verdict)
478 		return rte_flow_error_set(error, EBUSY,
479 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL, "action is busy");
480 	return 0;
481 }
482 
483 int
mlx5_quota_query(struct rte_eth_dev * dev,uint32_t queue,const struct rte_flow_action_handle * handle,struct rte_flow_query_quota * query,struct mlx5_hw_q_job * async_job,bool push,struct rte_flow_error * error)484 mlx5_quota_query(struct rte_eth_dev *dev, uint32_t queue,
485 		 const struct rte_flow_action_handle *handle,
486 		 struct rte_flow_query_quota *query,
487 		 struct mlx5_hw_q_job *async_job, bool push,
488 		 struct rte_flow_error *error)
489 {
490 	struct mlx5_priv *priv = dev->data->dev_private;
491 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
492 	uint32_t work_queue = !is_quota_sync_queue(priv, queue) ?
493 			      queue : quota_sync_queue(priv);
494 	uint32_t id = MLX5_INDIRECT_ACTION_IDX_GET(handle);
495 	uint32_t qix = id - 1;
496 	struct mlx5_quota *qobj = mlx5_ipool_get(qctx->quota_ipool, id);
497 	struct mlx5_hw_q_job sync_job;
498 	int ret;
499 
500 	if (!qobj)
501 		return rte_flow_error_set(error, EINVAL,
502 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
503 					  "invalid query handle");
504 	ret = mlx5_quota_check_ready(qobj, error);
505 	if (ret)
506 		return ret;
507 	ret = mlx5_quota_cmd_wqe(dev, qobj, mlx5_quota_wqe_query, qix, work_queue,
508 				 async_job ? async_job : &sync_job, push, NULL);
509 	if (ret) {
510 		rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
511 				 rte_memory_order_relaxed);
512 		return rte_flow_error_set(error, EAGAIN,
513 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
514 	}
515 	if (is_quota_sync_queue(priv, queue))
516 		query->quota = mlx5_quota_fetch_tokens(sync_job.query.hw);
517 	return 0;
518 }
519 
520 int
mlx5_quota_query_update(struct rte_eth_dev * dev,uint32_t queue,struct rte_flow_action_handle * handle,const struct rte_flow_action * update,struct rte_flow_query_quota * query,struct mlx5_hw_q_job * async_job,bool push,struct rte_flow_error * error)521 mlx5_quota_query_update(struct rte_eth_dev *dev, uint32_t queue,
522 			struct rte_flow_action_handle *handle,
523 			const struct rte_flow_action *update,
524 			struct rte_flow_query_quota *query,
525 			struct mlx5_hw_q_job *async_job, bool push,
526 			struct rte_flow_error *error)
527 {
528 	struct mlx5_priv *priv = dev->data->dev_private;
529 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
530 	const struct rte_flow_update_quota *conf = update->conf;
531 	uint32_t work_queue = !is_quota_sync_queue(priv, queue) ?
532 			       queue : quota_sync_queue(priv);
533 	uint32_t id = MLX5_INDIRECT_ACTION_IDX_GET(handle);
534 	uint32_t qix = id - 1;
535 	struct mlx5_quota *qobj = mlx5_ipool_get(qctx->quota_ipool, id);
536 	struct mlx5_hw_q_job sync_job;
537 	quota_wqe_cmd_t wqe_cmd = query ?
538 				  mlx5_quota_wqe_query_update :
539 				  mlx5_quota_wqe_update;
540 	int ret;
541 
542 	if (conf->quota > MLX5_MTR_MAX_TOKEN_VALUE)
543 		return rte_flow_error_set(error, E2BIG,
544 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL, "update value too big");
545 	if (!qobj)
546 		return rte_flow_error_set(error, EINVAL,
547 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
548 					  "invalid query_update handle");
549 	if (conf->op == RTE_FLOW_UPDATE_QUOTA_ADD &&
550 	    qobj->last_update == RTE_FLOW_UPDATE_QUOTA_ADD)
551 		return rte_flow_error_set(error, EINVAL,
552 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL, "cannot add twice");
553 	ret = mlx5_quota_check_ready(qobj, error);
554 	if (ret)
555 		return ret;
556 	ret = mlx5_quota_cmd_wqe(dev, qobj, wqe_cmd, qix, work_queue,
557 				 async_job ? async_job : &sync_job, push,
558 				 (void *)(uintptr_t)update->conf);
559 	if (ret) {
560 		rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
561 				 rte_memory_order_relaxed);
562 		return rte_flow_error_set(error, EAGAIN,
563 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
564 	}
565 	qobj->last_update = conf->op;
566 	if (query && is_quota_sync_queue(priv, queue))
567 		query->quota = mlx5_quota_fetch_tokens(sync_job.query.hw);
568 	return 0;
569 }
570 
571 struct rte_flow_action_handle *
mlx5_quota_alloc(struct rte_eth_dev * dev,uint32_t queue,const struct rte_flow_action_quota * conf,struct mlx5_hw_q_job * job,bool push,struct rte_flow_error * error)572 mlx5_quota_alloc(struct rte_eth_dev *dev, uint32_t queue,
573 		 const struct rte_flow_action_quota *conf,
574 		 struct mlx5_hw_q_job *job, bool push,
575 		 struct rte_flow_error *error)
576 {
577 	struct mlx5_priv *priv = dev->data->dev_private;
578 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
579 	uint32_t id;
580 	struct mlx5_quota *qobj;
581 	uintptr_t handle = (uintptr_t)MLX5_INDIRECT_ACTION_TYPE_QUOTA <<
582 			   MLX5_INDIRECT_ACTION_TYPE_OFFSET;
583 	uint32_t work_queue = !is_quota_sync_queue(priv, queue) ?
584 			      queue : quota_sync_queue(priv);
585 	struct mlx5_hw_q_job sync_job;
586 	uint8_t state = MLX5_QUOTA_STATE_FREE;
587 	bool verdict;
588 	int ret;
589 
590 	qobj = mlx5_ipool_malloc(qctx->quota_ipool, &id);
591 	if (!qobj) {
592 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
593 				   NULL, "quota: failed to allocate quota object");
594 		return NULL;
595 	}
596 	verdict = rte_atomic_compare_exchange_strong_explicit
597 		(&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
598 		 rte_memory_order_relaxed, rte_memory_order_relaxed);
599 	if (!verdict) {
600 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
601 				   NULL, "quota: new quota object has invalid state");
602 		return NULL;
603 	}
604 	switch (conf->mode) {
605 	case RTE_FLOW_QUOTA_MODE_L2:
606 		qobj->mode = MLX5_METER_MODE_L2_LEN;
607 		break;
608 	case RTE_FLOW_QUOTA_MODE_PACKET:
609 		qobj->mode = MLX5_METER_MODE_PKT;
610 		break;
611 	default:
612 		qobj->mode = MLX5_METER_MODE_IP_LEN;
613 	}
614 	ret = mlx5_quota_cmd_wqe(dev, qobj, mlx5_quota_set_init_wqe, id - 1,
615 				 work_queue, job ? job : &sync_job, push,
616 				 (void *)(uintptr_t)conf);
617 	if (ret) {
618 		mlx5_ipool_free(qctx->quota_ipool, id);
619 		rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_FREE,
620 				 rte_memory_order_relaxed);
621 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
622 				   NULL, "quota: WR failure");
623 		return 0;
624 	}
625 	return (struct rte_flow_action_handle *)(handle | id);
626 }
627 
628 int
mlx5_flow_quota_destroy(struct rte_eth_dev * dev)629 mlx5_flow_quota_destroy(struct rte_eth_dev *dev)
630 {
631 	struct mlx5_priv *priv = dev->data->dev_private;
632 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
633 	int ret;
634 
635 	if (qctx->dr_action) {
636 		ret = mlx5dr_action_destroy(qctx->dr_action);
637 		if (ret)
638 			DRV_LOG(ERR, "QUOTA: failed to destroy DR action");
639 	}
640 	if (!priv->shared_host) {
641 		if (qctx->quota_ipool)
642 			mlx5_ipool_destroy(qctx->quota_ipool);
643 		mlx5_quota_destroy_sq(priv);
644 		mlx5_quota_destroy_read_buf(priv);
645 		if (qctx->devx_obj) {
646 			ret = mlx5_devx_cmd_destroy(qctx->devx_obj);
647 			if (ret)
648 				DRV_LOG(ERR,
649 					"QUOTA: failed to destroy MTR ASO object");
650 		}
651 	}
652 	memset(qctx, 0, sizeof(*qctx));
653 	return 0;
654 }
655 
656 #define MLX5_QUOTA_IPOOL_TRUNK_SIZE (1u << 12)
657 #define MLX5_QUOTA_IPOOL_CACHE_SIZE (1u << 13)
658 
659 static int
mlx5_quota_init_guest(struct mlx5_priv * priv)660 mlx5_quota_init_guest(struct mlx5_priv *priv)
661 {
662 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
663 	struct rte_eth_dev *host_dev = priv->shared_host;
664 	struct mlx5_priv *host_priv = host_dev->data->dev_private;
665 
666 	/**
667 	 * Shared quota object can be used in flow rules only.
668 	 * DR5 flow action needs access to ASO abjects.
669 	 */
670 	qctx->devx_obj = host_priv->quota_ctx.devx_obj;
671 	return 0;
672 }
673 
674 static int
mlx5_quota_init_host(struct mlx5_priv * priv,uint32_t nb_quotas)675 mlx5_quota_init_host(struct mlx5_priv *priv, uint32_t nb_quotas)
676 {
677 	struct mlx5_dev_ctx_shared *sh = priv->sh;
678 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
679 	struct mlx5_indexed_pool_config quota_ipool_cfg = {
680 		.size = sizeof(struct mlx5_quota),
681 		.trunk_size = RTE_MIN(nb_quotas, MLX5_QUOTA_IPOOL_TRUNK_SIZE),
682 		.need_lock = 1,
683 		.release_mem_en = !!priv->sh->config.reclaim_mode,
684 		.malloc = mlx5_malloc,
685 		.max_idx = nb_quotas,
686 		.free = mlx5_free,
687 		.type = "mlx5_flow_quota_index_pool"
688 	};
689 	int ret;
690 
691 	if (!nb_quotas) {
692 		DRV_LOG(DEBUG, "QUOTA: cannot create quota with 0 objects");
693 		return -EINVAL;
694 	}
695 	if (!priv->mtr_en || !sh->meter_aso_en) {
696 		DRV_LOG(DEBUG, "QUOTA: no MTR support");
697 		return -ENOTSUP;
698 	}
699 	qctx->devx_obj = mlx5_devx_cmd_create_flow_meter_aso_obj
700 		(sh->cdev->ctx, sh->cdev->pdn, rte_log2_u32(nb_quotas >> 1));
701 	if (!qctx->devx_obj) {
702 		DRV_LOG(DEBUG, "QUOTA: cannot allocate MTR ASO objects");
703 		return -ENOMEM;
704 	}
705 	ret = mlx5_quota_alloc_read_buf(priv);
706 	if (ret)
707 		return ret;
708 	ret = mlx5_quota_alloc_sq(priv);
709 	if (ret)
710 		return ret;
711 	if (nb_quotas < MLX5_QUOTA_IPOOL_TRUNK_SIZE)
712 		quota_ipool_cfg.per_core_cache = 0;
713 	else if (nb_quotas < MLX5_HW_IPOOL_SIZE_THRESHOLD)
714 		quota_ipool_cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
715 	else
716 		quota_ipool_cfg.per_core_cache = MLX5_QUOTA_IPOOL_CACHE_SIZE;
717 	qctx->quota_ipool = mlx5_ipool_create(&quota_ipool_cfg);
718 	if (!qctx->quota_ipool) {
719 		DRV_LOG(DEBUG, "QUOTA: failed to allocate quota pool");
720 		return -ENOMEM;
721 	}
722 	return 0;
723 }
724 
725 int
mlx5_flow_quota_init(struct rte_eth_dev * dev,uint32_t nb_quotas)726 mlx5_flow_quota_init(struct rte_eth_dev *dev, uint32_t nb_quotas)
727 {
728 	struct mlx5_priv *priv = dev->data->dev_private;
729 	struct mlx5_quota_ctx *qctx = &priv->quota_ctx;
730 	uint32_t flags = MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;
731 	int reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, NULL);
732 	int ret;
733 
734 	if (reg_id < 0) {
735 		DRV_LOG(DEBUG, "QUOTA: MRT register not available");
736 		return -ENOTSUP;
737 	}
738 	if (!priv->shared_host)
739 		ret = mlx5_quota_init_host(priv, nb_quotas);
740 	else
741 		ret = mlx5_quota_init_guest(priv);
742 	if (ret)
743 		goto err;
744 	if (priv->sh->config.dv_esw_en && priv->master)
745 		flags |= MLX5DR_ACTION_FLAG_HWS_FDB;
746 	qctx->dr_action = mlx5dr_action_create_aso_meter
747 		(priv->dr_ctx, (struct mlx5dr_devx_obj *)qctx->devx_obj,
748 		 reg_id - REG_C_0, flags);
749 	if (!qctx->dr_action) {
750 		DRV_LOG(DEBUG, "QUOTA: failed to create DR action");
751 		ret = -ENOMEM;
752 		goto err;
753 	}
754 	return 0;
755 err:
756 	mlx5_flow_quota_destroy(dev);
757 	return ret;
758 }
759 
760 #endif /* defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) */
761