xref: /dpdk/drivers/net/mlx5/mlx5_txpp.c (revision aef1e20ebeb2777d0af1f72b8afa9dc00e5b5fe9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 #include <rte_ether.h>
5 #include <rte_ethdev_driver.h>
6 #include <rte_interrupts.h>
7 #include <rte_alarm.h>
8 #include <rte_malloc.h>
9 #include <rte_cycles.h>
10 
11 #include "mlx5.h"
12 #include "mlx5_rxtx.h"
13 #include "mlx5_common_os.h"
14 
15 /* Destroy Event Queue Notification Channel. */
16 static void
17 mlx5_txpp_destroy_eqn(struct mlx5_dev_ctx_shared *sh)
18 {
19 	if (sh->txpp.echan) {
20 		mlx5_glue->devx_destroy_event_channel(sh->txpp.echan);
21 		sh->txpp.echan = NULL;
22 	}
23 	sh->txpp.eqn = 0;
24 }
25 
26 /* Create Event Queue Notification Channel. */
27 static int
28 mlx5_txpp_create_eqn(struct mlx5_dev_ctx_shared *sh)
29 {
30 	uint32_t lcore;
31 
32 	MLX5_ASSERT(!sh->txpp.echan);
33 	lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
34 	if (mlx5_glue->devx_query_eqn(sh->ctx, lcore, &sh->txpp.eqn)) {
35 		rte_errno = errno;
36 		DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
37 		sh->txpp.eqn = 0;
38 		return -rte_errno;
39 	}
40 	sh->txpp.echan = mlx5_glue->devx_create_event_channel(sh->ctx,
41 			MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
42 	if (!sh->txpp.echan) {
43 		sh->txpp.eqn = 0;
44 		rte_errno = errno;
45 		DRV_LOG(ERR, "Failed to create event channel %d.",
46 			rte_errno);
47 		return -rte_errno;
48 	}
49 	return 0;
50 }
51 
52 static void
53 mlx5_txpp_free_pp_index(struct mlx5_dev_ctx_shared *sh)
54 {
55 	if (sh->txpp.pp) {
56 		mlx5_glue->dv_free_pp(sh->txpp.pp);
57 		sh->txpp.pp = NULL;
58 		sh->txpp.pp_id = 0;
59 	}
60 }
61 
62 /* Allocate Packet Pacing index from kernel via mlx5dv call. */
63 static int
64 mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh)
65 {
66 #ifdef HAVE_MLX5DV_PP_ALLOC
67 	uint32_t pp[MLX5_ST_SZ_DW(set_pp_rate_limit_context)];
68 	uint64_t rate;
69 
70 	MLX5_ASSERT(!sh->txpp.pp);
71 	memset(&pp, 0, sizeof(pp));
72 	rate = NS_PER_S / sh->txpp.tick;
73 	if (rate * sh->txpp.tick != NS_PER_S)
74 		DRV_LOG(WARNING, "Packet pacing frequency is not precise.");
75 	if (sh->txpp.test) {
76 		uint32_t len;
77 
78 		len = RTE_MAX(MLX5_TXPP_TEST_PKT_SIZE,
79 			      (size_t)RTE_ETHER_MIN_LEN);
80 		MLX5_SET(set_pp_rate_limit_context, &pp,
81 			 burst_upper_bound, len);
82 		MLX5_SET(set_pp_rate_limit_context, &pp,
83 			 typical_packet_size, len);
84 		/* Convert packets per second into kilobits. */
85 		rate = (rate * len) / (1000ul / CHAR_BIT);
86 		DRV_LOG(INFO, "Packet pacing rate set to %" PRIu64, rate);
87 	}
88 	MLX5_SET(set_pp_rate_limit_context, &pp, rate_limit, rate);
89 	MLX5_SET(set_pp_rate_limit_context, &pp, rate_mode,
90 		 sh->txpp.test ? MLX5_DATA_RATE : MLX5_WQE_RATE);
91 	sh->txpp.pp = mlx5_glue->dv_alloc_pp
92 				(sh->ctx, sizeof(pp), &pp,
93 				 MLX5DV_PP_ALLOC_FLAGS_DEDICATED_INDEX);
94 	if (sh->txpp.pp == NULL) {
95 		DRV_LOG(ERR, "Failed to allocate packet pacing index.");
96 		rte_errno = errno;
97 		return -errno;
98 	}
99 	if (!sh->txpp.pp->index) {
100 		DRV_LOG(ERR, "Zero packet pacing index allocated.");
101 		mlx5_txpp_free_pp_index(sh);
102 		rte_errno = ENOTSUP;
103 		return -ENOTSUP;
104 	}
105 	sh->txpp.pp_id = sh->txpp.pp->index;
106 	return 0;
107 #else
108 	RTE_SET_USED(sh);
109 	DRV_LOG(ERR, "Allocating pacing index is not supported.");
110 	rte_errno = ENOTSUP;
111 	return -ENOTSUP;
112 #endif
113 }
114 
115 static void
116 mlx5_txpp_destroy_send_queue(struct mlx5_txpp_wq *wq)
117 {
118 	if (wq->sq)
119 		claim_zero(mlx5_devx_cmd_destroy(wq->sq));
120 	if (wq->sq_umem)
121 		claim_zero(mlx5_glue->devx_umem_dereg(wq->sq_umem));
122 	if (wq->sq_buf)
123 		rte_free((void *)(uintptr_t)wq->sq_buf);
124 	if (wq->cq)
125 		claim_zero(mlx5_devx_cmd_destroy(wq->cq));
126 	if (wq->cq_umem)
127 		claim_zero(mlx5_glue->devx_umem_dereg(wq->cq_umem));
128 	if (wq->cq_buf)
129 		rte_free((void *)(uintptr_t)wq->cq_buf);
130 	memset(wq, 0, sizeof(*wq));
131 }
132 
133 static void
134 mlx5_txpp_destroy_rearm_queue(struct mlx5_dev_ctx_shared *sh)
135 {
136 	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
137 
138 	mlx5_txpp_destroy_send_queue(wq);
139 }
140 
141 static void
142 mlx5_txpp_destroy_clock_queue(struct mlx5_dev_ctx_shared *sh)
143 {
144 	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
145 
146 	mlx5_txpp_destroy_send_queue(wq);
147 }
148 
149 static void
150 mlx5_txpp_fill_cqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
151 {
152 	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
153 	struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;
154 	uint32_t i;
155 
156 	for (i = 0; i < MLX5_TXPP_REARM_CQ_SIZE; i++) {
157 		cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
158 		++cqe;
159 	}
160 }
161 
162 static void
163 mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
164 {
165 	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
166 	struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes;
167 	uint32_t i;
168 
169 	for (i = 0; i < wq->sq_size; i += 2) {
170 		struct mlx5_wqe_cseg *cs;
171 		struct mlx5_wqe_qseg *qs;
172 		uint32_t index;
173 
174 		/* Build SEND_EN request with slave WQE index. */
175 		cs = &wqe[i + 0].cseg;
176 		cs->opcode = RTE_BE32(MLX5_OPCODE_SEND_EN | 0);
177 		cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2);
178 		cs->flags = RTE_BE32(MLX5_COMP_ALWAYS <<
179 				     MLX5_COMP_MODE_OFFSET);
180 		cs->misc = RTE_BE32(0);
181 		qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
182 		index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM) &
183 			((1 << MLX5_WQ_INDEX_WIDTH) - 1);
184 		qs->max_index = rte_cpu_to_be_32(index);
185 		qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.sq->id);
186 		/* Build WAIT request with slave CQE index. */
187 		cs = &wqe[i + 1].cseg;
188 		cs->opcode = RTE_BE32(MLX5_OPCODE_WAIT | 0);
189 		cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2);
190 		cs->flags = RTE_BE32(MLX5_COMP_ONLY_ERR <<
191 				     MLX5_COMP_MODE_OFFSET);
192 		cs->misc = RTE_BE32(0);
193 		qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
194 		index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM / 2) &
195 			((1 << MLX5_CQ_INDEX_WIDTH) - 1);
196 		qs->max_index = rte_cpu_to_be_32(index);
197 		qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.cq->id);
198 	}
199 }
200 
201 /* Creates the Rearm Queue to fire the requests to Clock Queue in realtime. */
202 static int
203 mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
204 {
205 	struct mlx5_devx_create_sq_attr sq_attr = { 0 };
206 	struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
207 	struct mlx5_devx_cq_attr cq_attr = { 0 };
208 	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
209 	size_t page_size = sysconf(_SC_PAGESIZE);
210 	uint32_t umem_size, umem_dbrec;
211 	int ret;
212 
213 	/* Allocate memory buffer for CQEs and doorbell record. */
214 	umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_REARM_CQ_SIZE;
215 	umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
216 	umem_size += MLX5_DBR_SIZE;
217 	wq->cq_buf = rte_zmalloc_socket(__func__, umem_size,
218 					page_size, sh->numa_node);
219 	if (!wq->cq_buf) {
220 		DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
221 		return -ENOMEM;
222 	}
223 	/* Register allocated buffer in user space with DevX. */
224 	wq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
225 					       (void *)(uintptr_t)wq->cq_buf,
226 					       umem_size,
227 					       IBV_ACCESS_LOCAL_WRITE);
228 	if (!wq->cq_umem) {
229 		rte_errno = errno;
230 		DRV_LOG(ERR, "Failed to register umem for Rearm Queue.");
231 		goto error;
232 	}
233 	/* Create completion queue object for Rearm Queue. */
234 	cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
235 			    MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
236 	cq_attr.uar_page_id = sh->tx_uar->page_id;
237 	cq_attr.eqn = sh->txpp.eqn;
238 	cq_attr.q_umem_valid = 1;
239 	cq_attr.q_umem_offset = 0;
240 	cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
241 	cq_attr.db_umem_valid = 1;
242 	cq_attr.db_umem_offset = umem_dbrec;
243 	cq_attr.db_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
244 	cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_REARM_CQ_SIZE);
245 	cq_attr.log_page_size = rte_log2_u32(page_size);
246 	wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
247 	if (!wq->cq) {
248 		rte_errno = errno;
249 		DRV_LOG(ERR, "Failed to create CQ for Rearm Queue.");
250 		goto error;
251 	}
252 	wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec);
253 	wq->cq_ci = 0;
254 	wq->arm_sn = 0;
255 	/* Mark all CQEs initially as invalid. */
256 	mlx5_txpp_fill_cqe_rearm_queue(sh);
257 	/*
258 	 * Allocate memory buffer for Send Queue WQEs.
259 	 * There should be no WQE leftovers in the cyclic queue.
260 	 */
261 	wq->sq_size = MLX5_TXPP_REARM_SQ_SIZE;
262 	MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
263 	umem_size =  MLX5_WQE_SIZE * wq->sq_size;
264 	umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
265 	umem_size += MLX5_DBR_SIZE;
266 	wq->sq_buf = rte_zmalloc_socket(__func__, umem_size,
267 					page_size, sh->numa_node);
268 	if (!wq->sq_buf) {
269 		DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
270 		rte_errno = ENOMEM;
271 		goto error;
272 	}
273 	/* Register allocated buffer in user space with DevX. */
274 	wq->sq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
275 					       (void *)(uintptr_t)wq->sq_buf,
276 					       umem_size,
277 					       IBV_ACCESS_LOCAL_WRITE);
278 	if (!wq->sq_umem) {
279 		rte_errno = errno;
280 		DRV_LOG(ERR, "Failed to register umem for Rearm Queue.");
281 		goto error;
282 	}
283 	/* Create send queue object for Rearm Queue. */
284 	sq_attr.state = MLX5_SQC_STATE_RST;
285 	sq_attr.tis_lst_sz = 1;
286 	sq_attr.tis_num = sh->tis->id;
287 	sq_attr.cqn = wq->cq->id;
288 	sq_attr.cd_master = 1;
289 	sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
290 	sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
291 	sq_attr.wq_attr.pd = sh->pdn;
292 	sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
293 	sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
294 	sq_attr.wq_attr.dbr_umem_valid = 1;
295 	sq_attr.wq_attr.dbr_addr = umem_dbrec;
296 	sq_attr.wq_attr.dbr_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
297 	sq_attr.wq_attr.wq_umem_valid = 1;
298 	sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
299 	sq_attr.wq_attr.wq_umem_offset = 0;
300 	wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
301 	if (!wq->sq) {
302 		rte_errno = errno;
303 		DRV_LOG(ERR, "Failed to create SQ for Rearm Queue.");
304 		goto error;
305 	}
306 	wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec +
307 				   MLX5_SND_DBR * sizeof(uint32_t));
308 	/* Build the WQEs in the Send Queue before goto Ready state. */
309 	mlx5_txpp_fill_wqe_rearm_queue(sh);
310 	/* Change queue state to ready. */
311 	msq_attr.sq_state = MLX5_SQC_STATE_RST;
312 	msq_attr.state = MLX5_SQC_STATE_RDY;
313 	ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr);
314 	if (ret) {
315 		DRV_LOG(ERR, "Failed to set SQ ready state Rearm Queue.");
316 		goto error;
317 	}
318 	return 0;
319 error:
320 	ret = -rte_errno;
321 	mlx5_txpp_destroy_rearm_queue(sh);
322 	rte_errno = -ret;
323 	return ret;
324 }
325 
326 static void
327 mlx5_txpp_fill_wqe_clock_queue(struct mlx5_dev_ctx_shared *sh)
328 {
329 	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
330 	struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes;
331 	struct mlx5_wqe_cseg *cs = &wqe->cseg;
332 	uint32_t wqe_size, opcode, i;
333 	uint8_t *dst;
334 
335 	/* For test purposes fill the WQ with SEND inline packet. */
336 	if (sh->txpp.test) {
337 		wqe_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE +
338 				     MLX5_WQE_CSEG_SIZE +
339 				     2 * MLX5_WQE_ESEG_SIZE -
340 				     MLX5_ESEG_MIN_INLINE_SIZE,
341 				     MLX5_WSEG_SIZE);
342 		opcode = MLX5_OPCODE_SEND;
343 	} else {
344 		wqe_size = MLX5_WSEG_SIZE;
345 		opcode = MLX5_OPCODE_NOP;
346 	}
347 	cs->opcode = rte_cpu_to_be_32(opcode | 0); /* Index is ignored. */
348 	cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) |
349 				     (wqe_size / MLX5_WSEG_SIZE));
350 	cs->flags = RTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET);
351 	cs->misc = RTE_BE32(0);
352 	wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE);
353 	if (sh->txpp.test) {
354 		struct mlx5_wqe_eseg *es = &wqe->eseg;
355 		struct rte_ether_hdr *eth_hdr;
356 		struct rte_ipv4_hdr *ip_hdr;
357 		struct rte_udp_hdr *udp_hdr;
358 
359 		/* Build the inline test packet pattern. */
360 		MLX5_ASSERT(wqe_size <= MLX5_WQE_SIZE_MAX);
361 		MLX5_ASSERT(MLX5_TXPP_TEST_PKT_SIZE >=
362 				(sizeof(struct rte_ether_hdr) +
363 				 sizeof(struct rte_ipv4_hdr)));
364 		es->flags = 0;
365 		es->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
366 		es->swp_offs = 0;
367 		es->metadata = 0;
368 		es->swp_flags = 0;
369 		es->mss = 0;
370 		es->inline_hdr_sz = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE);
371 		/* Build test packet L2 header (Ethernet). */
372 		dst = (uint8_t *)&es->inline_data;
373 		eth_hdr = (struct rte_ether_hdr *)dst;
374 		rte_eth_random_addr(&eth_hdr->d_addr.addr_bytes[0]);
375 		rte_eth_random_addr(&eth_hdr->s_addr.addr_bytes[0]);
376 		eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
377 		/* Build test packet L3 header (IP v4). */
378 		dst += sizeof(struct rte_ether_hdr);
379 		ip_hdr = (struct rte_ipv4_hdr *)dst;
380 		ip_hdr->version_ihl = RTE_IPV4_VHL_DEF;
381 		ip_hdr->type_of_service = 0;
382 		ip_hdr->fragment_offset = 0;
383 		ip_hdr->time_to_live = 64;
384 		ip_hdr->next_proto_id = IPPROTO_UDP;
385 		ip_hdr->packet_id = 0;
386 		ip_hdr->total_length = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE -
387 						sizeof(struct rte_ether_hdr));
388 		/* use RFC5735 / RFC2544 reserved network test addresses */
389 		ip_hdr->src_addr = RTE_BE32((198U << 24) | (18 << 16) |
390 					    (0 << 8) | 1);
391 		ip_hdr->dst_addr = RTE_BE32((198U << 24) | (18 << 16) |
392 					    (0 << 8) | 2);
393 		if (MLX5_TXPP_TEST_PKT_SIZE <
394 					(sizeof(struct rte_ether_hdr) +
395 					 sizeof(struct rte_ipv4_hdr) +
396 					 sizeof(struct rte_udp_hdr)))
397 			goto wcopy;
398 		/* Build test packet L4 header (UDP). */
399 		dst += sizeof(struct rte_ipv4_hdr);
400 		udp_hdr = (struct rte_udp_hdr *)dst;
401 		udp_hdr->src_port = RTE_BE16(9); /* RFC863 Discard. */
402 		udp_hdr->dst_port = RTE_BE16(9);
403 		udp_hdr->dgram_len = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE -
404 					      sizeof(struct rte_ether_hdr) -
405 					      sizeof(struct rte_ipv4_hdr));
406 		udp_hdr->dgram_cksum = 0;
407 		/* Fill the test packet data. */
408 		dst += sizeof(struct rte_udp_hdr);
409 		for (i = sizeof(struct rte_ether_hdr) +
410 			sizeof(struct rte_ipv4_hdr) +
411 			sizeof(struct rte_udp_hdr);
412 				i < MLX5_TXPP_TEST_PKT_SIZE; i++)
413 			*dst++ = (uint8_t)(i & 0xFF);
414 	}
415 wcopy:
416 	/* Duplicate the pattern to the next WQEs. */
417 	dst = (uint8_t *)(uintptr_t)wq->sq_buf;
418 	for (i = 1; i < MLX5_TXPP_CLKQ_SIZE; i++) {
419 		dst += wqe_size;
420 		rte_memcpy(dst, (void *)(uintptr_t)wq->sq_buf, wqe_size);
421 	}
422 }
423 
424 /* Creates the Clock Queue for packet pacing, returns zero on success. */
425 static int
426 mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
427 {
428 	struct mlx5_devx_create_sq_attr sq_attr = { 0 };
429 	struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
430 	struct mlx5_devx_cq_attr cq_attr = { 0 };
431 	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
432 	size_t page_size = sysconf(_SC_PAGESIZE);
433 	uint32_t umem_size, umem_dbrec;
434 	int ret;
435 
436 	/* Allocate memory buffer for CQEs and doorbell record. */
437 	umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_CLKQ_SIZE;
438 	umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
439 	umem_size += MLX5_DBR_SIZE;
440 	wq->cq_buf = rte_zmalloc_socket(__func__, umem_size,
441 					page_size, sh->numa_node);
442 	if (!wq->cq_buf) {
443 		DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
444 		return -ENOMEM;
445 	}
446 	/* Register allocated buffer in user space with DevX. */
447 	wq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
448 					       (void *)(uintptr_t)wq->cq_buf,
449 					       umem_size,
450 					       IBV_ACCESS_LOCAL_WRITE);
451 	if (!wq->cq_umem) {
452 		rte_errno = errno;
453 		DRV_LOG(ERR, "Failed to register umem for Clock Queue.");
454 		goto error;
455 	}
456 	/* Create completion queue object for Clock Queue. */
457 	cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
458 			    MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
459 	cq_attr.use_first_only = 1;
460 	cq_attr.overrun_ignore = 1;
461 	cq_attr.uar_page_id = sh->tx_uar->page_id;
462 	cq_attr.eqn = sh->txpp.eqn;
463 	cq_attr.q_umem_valid = 1;
464 	cq_attr.q_umem_offset = 0;
465 	cq_attr.q_umem_id = wq->cq_umem->umem_id;
466 	cq_attr.db_umem_valid = 1;
467 	cq_attr.db_umem_offset = umem_dbrec;
468 	cq_attr.db_umem_id = wq->cq_umem->umem_id;
469 	cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_CLKQ_SIZE);
470 	cq_attr.log_page_size = rte_log2_u32(page_size);
471 	wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
472 	if (!wq->cq) {
473 		rte_errno = errno;
474 		DRV_LOG(ERR, "Failed to create CQ for Clock Queue.");
475 		goto error;
476 	}
477 	wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec);
478 	wq->cq_ci = 0;
479 	/* Allocate memory buffer for Send Queue WQEs. */
480 	if (sh->txpp.test) {
481 		wq->sq_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE +
482 					MLX5_WQE_CSEG_SIZE +
483 					2 * MLX5_WQE_ESEG_SIZE -
484 					MLX5_ESEG_MIN_INLINE_SIZE,
485 					MLX5_WQE_SIZE) / MLX5_WQE_SIZE;
486 		wq->sq_size *= MLX5_TXPP_CLKQ_SIZE;
487 	} else {
488 		wq->sq_size = MLX5_TXPP_CLKQ_SIZE;
489 	}
490 	/* There should not be WQE leftovers in the cyclic queue. */
491 	MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
492 	umem_size =  MLX5_WQE_SIZE * wq->sq_size;
493 	umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
494 	umem_size += MLX5_DBR_SIZE;
495 	wq->sq_buf = rte_zmalloc_socket(__func__, umem_size,
496 					page_size, sh->numa_node);
497 	if (!wq->sq_buf) {
498 		DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
499 		rte_errno = ENOMEM;
500 		goto error;
501 	}
502 	/* Register allocated buffer in user space with DevX. */
503 	wq->sq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
504 					       (void *)(uintptr_t)wq->sq_buf,
505 					       umem_size,
506 					       IBV_ACCESS_LOCAL_WRITE);
507 	if (!wq->sq_umem) {
508 		rte_errno = errno;
509 		DRV_LOG(ERR, "Failed to register umem for Clock Queue.");
510 		goto error;
511 	}
512 	/* Create send queue object for Clock Queue. */
513 	if (sh->txpp.test) {
514 		sq_attr.tis_lst_sz = 1;
515 		sq_attr.tis_num = sh->tis->id;
516 		sq_attr.non_wire = 0;
517 		sq_attr.static_sq_wq = 1;
518 	} else {
519 		sq_attr.non_wire = 1;
520 		sq_attr.static_sq_wq = 1;
521 	}
522 	sq_attr.state = MLX5_SQC_STATE_RST;
523 	sq_attr.cqn = wq->cq->id;
524 	sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
525 	sq_attr.wq_attr.cd_slave = 1;
526 	sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
527 	sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
528 	sq_attr.wq_attr.pd = sh->pdn;
529 	sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
530 	sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
531 	sq_attr.wq_attr.dbr_umem_valid = 1;
532 	sq_attr.wq_attr.dbr_addr = umem_dbrec;
533 	sq_attr.wq_attr.dbr_umem_id = wq->sq_umem->umem_id;
534 	sq_attr.wq_attr.wq_umem_valid = 1;
535 	sq_attr.wq_attr.wq_umem_id = wq->sq_umem->umem_id;
536 	/* umem_offset must be zero for static_sq_wq queue. */
537 	sq_attr.wq_attr.wq_umem_offset = 0;
538 	wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
539 	if (!wq->sq) {
540 		rte_errno = errno;
541 		DRV_LOG(ERR, "Failed to create SQ for Clock Queue.");
542 		goto error;
543 	}
544 	wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec +
545 				   MLX5_SND_DBR * sizeof(uint32_t));
546 	/* Build the WQEs in the Send Queue before goto Ready state. */
547 	mlx5_txpp_fill_wqe_clock_queue(sh);
548 	/* Change queue state to ready. */
549 	msq_attr.sq_state = MLX5_SQC_STATE_RST;
550 	msq_attr.state = MLX5_SQC_STATE_RDY;
551 	wq->sq_ci = 0;
552 	ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr);
553 	if (ret) {
554 		DRV_LOG(ERR, "Failed to set SQ ready state Clock Queue.");
555 		goto error;
556 	}
557 	return 0;
558 error:
559 	ret = -rte_errno;
560 	mlx5_txpp_destroy_clock_queue(sh);
561 	rte_errno = -ret;
562 	return ret;
563 }
564 
565 /*
566  * The routine initializes the packet pacing infrastructure:
567  * - allocates PP context
568  * - Clock CQ/SQ
569  * - Rearm CQ/SQ
570  * - attaches rearm interrupt handler
571  * - starts Clock Queue
572  *
573  * Returns 0 on success, negative otherwise
574  */
575 static int
576 mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh, struct mlx5_priv *priv)
577 {
578 	int tx_pp = priv->config.tx_pp;
579 	int ret;
580 
581 	/* Store the requested pacing parameters. */
582 	sh->txpp.tick = tx_pp >= 0 ? tx_pp : -tx_pp;
583 	sh->txpp.test = !!(tx_pp < 0);
584 	sh->txpp.skew = priv->config.tx_skew;
585 	sh->txpp.freq = priv->config.hca_attr.dev_freq_khz;
586 	ret = mlx5_txpp_create_eqn(sh);
587 	if (ret)
588 		goto exit;
589 	ret = mlx5_txpp_alloc_pp_index(sh);
590 	if (ret)
591 		goto exit;
592 	ret = mlx5_txpp_create_clock_queue(sh);
593 	if (ret)
594 		goto exit;
595 	ret = mlx5_txpp_create_rearm_queue(sh);
596 	if (ret)
597 		goto exit;
598 exit:
599 	if (ret) {
600 		mlx5_txpp_destroy_rearm_queue(sh);
601 		mlx5_txpp_destroy_clock_queue(sh);
602 		mlx5_txpp_free_pp_index(sh);
603 		mlx5_txpp_destroy_eqn(sh);
604 		sh->txpp.tick = 0;
605 		sh->txpp.test = 0;
606 		sh->txpp.skew = 0;
607 	}
608 	return ret;
609 }
610 
611 /*
612  * The routine destroys the packet pacing infrastructure:
613  * - detaches rearm interrupt handler
614  * - Rearm CQ/SQ
615  * - Clock CQ/SQ
616  * - PP context
617  */
618 static void
619 mlx5_txpp_destroy(struct mlx5_dev_ctx_shared *sh)
620 {
621 	mlx5_txpp_destroy_rearm_queue(sh);
622 	mlx5_txpp_destroy_clock_queue(sh);
623 	mlx5_txpp_free_pp_index(sh);
624 	mlx5_txpp_destroy_eqn(sh);
625 	sh->txpp.tick = 0;
626 	sh->txpp.test = 0;
627 	sh->txpp.skew = 0;
628 }
629 
630 /**
631  * Creates and starts packet pacing infrastructure on specified device.
632  *
633  * @param dev
634  *   Pointer to Ethernet device structure.
635  *
636  * @return
637  *   0 on success, a negative errno value otherwise and rte_errno is set.
638  */
639 int
640 mlx5_txpp_start(struct rte_eth_dev *dev)
641 {
642 	struct mlx5_priv *priv = dev->data->dev_private;
643 	struct mlx5_dev_ctx_shared *sh = priv->sh;
644 	int err = 0;
645 	int ret;
646 
647 	if (!priv->config.tx_pp) {
648 		/* Packet pacing is not requested for the device. */
649 		MLX5_ASSERT(priv->txpp_en == 0);
650 		return 0;
651 	}
652 	if (priv->txpp_en) {
653 		/* Packet pacing is already enabled for the device. */
654 		MLX5_ASSERT(sh->txpp.refcnt);
655 		return 0;
656 	}
657 	if (priv->config.tx_pp > 0) {
658 		ret = rte_mbuf_dynflag_lookup
659 				(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
660 		if (ret < 0)
661 			return 0;
662 	}
663 	ret = pthread_mutex_lock(&sh->txpp.mutex);
664 	MLX5_ASSERT(!ret);
665 	RTE_SET_USED(ret);
666 	if (sh->txpp.refcnt) {
667 		priv->txpp_en = 1;
668 		++sh->txpp.refcnt;
669 	} else {
670 		err = mlx5_txpp_create(sh, priv);
671 		if (!err) {
672 			MLX5_ASSERT(sh->txpp.tick);
673 			priv->txpp_en = 1;
674 			sh->txpp.refcnt = 1;
675 		} else {
676 			rte_errno = -err;
677 		}
678 	}
679 	ret = pthread_mutex_unlock(&sh->txpp.mutex);
680 	MLX5_ASSERT(!ret);
681 	RTE_SET_USED(ret);
682 	return err;
683 }
684 
685 /**
686  * Stops and destroys packet pacing infrastructure on specified device.
687  *
688  * @param dev
689  *   Pointer to Ethernet device structure.
690  *
691  * @return
692  *   0 on success, a negative errno value otherwise and rte_errno is set.
693  */
694 void
695 mlx5_txpp_stop(struct rte_eth_dev *dev)
696 {
697 	struct mlx5_priv *priv = dev->data->dev_private;
698 	struct mlx5_dev_ctx_shared *sh = priv->sh;
699 	int ret;
700 
701 	if (!priv->txpp_en) {
702 		/* Packet pacing is already disabled for the device. */
703 		return;
704 	}
705 	priv->txpp_en = 0;
706 	ret = pthread_mutex_lock(&sh->txpp.mutex);
707 	MLX5_ASSERT(!ret);
708 	RTE_SET_USED(ret);
709 	MLX5_ASSERT(sh->txpp.refcnt);
710 	if (!sh->txpp.refcnt || --sh->txpp.refcnt)
711 		return;
712 	/* No references any more, do actual destroy. */
713 	mlx5_txpp_destroy(sh);
714 	ret = pthread_mutex_unlock(&sh->txpp.mutex);
715 	MLX5_ASSERT(!ret);
716 	RTE_SET_USED(ret);
717 }
718