xref: /dpdk/drivers/net/mlx5/mlx5_txpp.c (revision 9fa82d287f6505a4ce24dd5d6e57b9b62b52501c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 #include <fcntl.h>
5 #include <stdint.h>
6 
7 #include <rte_ether.h>
8 #include <ethdev_driver.h>
9 #include <rte_interrupts.h>
10 #include <rte_alarm.h>
11 #include <rte_malloc.h>
12 #include <rte_cycles.h>
13 #include <rte_eal_paging.h>
14 
15 #include <mlx5_malloc.h>
16 #include <mlx5_common_devx.h>
17 
18 #include "mlx5.h"
19 #include "mlx5_rx.h"
20 #include "mlx5_tx.h"
21 #include "mlx5_common_os.h"
22 
23 static_assert(sizeof(struct mlx5_cqe_ts) == sizeof(rte_int128_t),
24 		"Wrong timestamp CQE part size");
25 
26 static const char * const mlx5_txpp_stat_names[] = {
27 	"tx_pp_missed_interrupt_errors", /* Missed service interrupt. */
28 	"tx_pp_rearm_queue_errors", /* Rearm Queue errors. */
29 	"tx_pp_clock_queue_errors", /* Clock Queue errors. */
30 	"tx_pp_timestamp_past_errors", /* Timestamp in the past. */
31 	"tx_pp_timestamp_future_errors", /* Timestamp in the distant future. */
32 	"tx_pp_jitter", /* Timestamp jitter (one Clock Queue completion). */
33 	"tx_pp_wander", /* Timestamp wander (half of Clock Queue CQEs). */
34 	"tx_pp_sync_lost", /* Scheduling synchronization lost. */
35 };
36 
37 /* Destroy Event Queue Notification Channel. */
38 static void
39 mlx5_txpp_destroy_event_channel(struct mlx5_dev_ctx_shared *sh)
40 {
41 	if (sh->txpp.echan) {
42 		mlx5_os_devx_destroy_event_channel(sh->txpp.echan);
43 		sh->txpp.echan = NULL;
44 	}
45 }
46 
47 /* Create Event Queue Notification Channel. */
48 static int
49 mlx5_txpp_create_event_channel(struct mlx5_dev_ctx_shared *sh)
50 {
51 	MLX5_ASSERT(!sh->txpp.echan);
52 	sh->txpp.echan = mlx5_os_devx_create_event_channel(sh->cdev->ctx,
53 			MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
54 	if (!sh->txpp.echan) {
55 		rte_errno = errno;
56 		DRV_LOG(ERR, "Failed to create event channel %d.", rte_errno);
57 		return -rte_errno;
58 	}
59 	return 0;
60 }
61 
62 static void
63 mlx5_txpp_free_pp_index(struct mlx5_dev_ctx_shared *sh)
64 {
65 #ifdef HAVE_MLX5DV_PP_ALLOC
66 	if (sh->txpp.pp) {
67 		mlx5_glue->dv_free_pp(sh->txpp.pp);
68 		sh->txpp.pp = NULL;
69 		sh->txpp.pp_id = 0;
70 	}
71 #else
72 	RTE_SET_USED(sh);
73 	DRV_LOG(ERR, "Freeing pacing index is not supported.");
74 #endif
75 }
76 
77 /* Allocate Packet Pacing index from kernel via mlx5dv call. */
78 static int
79 mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh)
80 {
81 #ifdef HAVE_MLX5DV_PP_ALLOC
82 	uint32_t pp[MLX5_ST_SZ_DW(set_pp_rate_limit_context)];
83 	uint64_t rate;
84 
85 	MLX5_ASSERT(!sh->txpp.pp);
86 	memset(&pp, 0, sizeof(pp));
87 	rate = NS_PER_S / sh->txpp.tick;
88 	if (rate * sh->txpp.tick != NS_PER_S)
89 		DRV_LOG(WARNING, "Packet pacing frequency is not precise.");
90 	if (sh->txpp.test) {
91 		uint32_t len;
92 
93 		len = RTE_MAX(MLX5_TXPP_TEST_PKT_SIZE,
94 			      (size_t)RTE_ETHER_MIN_LEN);
95 		MLX5_SET(set_pp_rate_limit_context, &pp,
96 			 burst_upper_bound, len);
97 		MLX5_SET(set_pp_rate_limit_context, &pp,
98 			 typical_packet_size, len);
99 		/* Convert packets per second into kilobits. */
100 		rate = (rate * len) / (1000ul / CHAR_BIT);
101 		DRV_LOG(INFO, "Packet pacing rate set to %" PRIu64, rate);
102 	}
103 	MLX5_SET(set_pp_rate_limit_context, &pp, rate_limit, rate);
104 	MLX5_SET(set_pp_rate_limit_context, &pp, rate_mode,
105 		 sh->txpp.test ? MLX5_DATA_RATE : MLX5_WQE_RATE);
106 	sh->txpp.pp = mlx5_glue->dv_alloc_pp
107 				(sh->cdev->ctx, sizeof(pp), &pp,
108 				 MLX5DV_PP_ALLOC_FLAGS_DEDICATED_INDEX);
109 	if (sh->txpp.pp == NULL) {
110 		DRV_LOG(ERR, "Failed to allocate packet pacing index.");
111 		rte_errno = errno;
112 		return -errno;
113 	}
114 	if (!((struct mlx5dv_pp *)sh->txpp.pp)->index) {
115 		DRV_LOG(ERR, "Zero packet pacing index allocated.");
116 		mlx5_txpp_free_pp_index(sh);
117 		rte_errno = ENOTSUP;
118 		return -ENOTSUP;
119 	}
120 	sh->txpp.pp_id = ((struct mlx5dv_pp *)(sh->txpp.pp))->index;
121 	return 0;
122 #else
123 	RTE_SET_USED(sh);
124 	DRV_LOG(ERR, "Allocating pacing index is not supported.");
125 	rte_errno = ENOTSUP;
126 	return -ENOTSUP;
127 #endif
128 }
129 
130 static void
131 mlx5_txpp_destroy_send_queue(struct mlx5_txpp_wq *wq)
132 {
133 	mlx5_devx_sq_destroy(&wq->sq_obj);
134 	mlx5_devx_cq_destroy(&wq->cq_obj);
135 	memset(wq, 0, sizeof(*wq));
136 }
137 
138 static void
139 mlx5_txpp_destroy_rearm_queue(struct mlx5_dev_ctx_shared *sh)
140 {
141 	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
142 
143 	mlx5_txpp_destroy_send_queue(wq);
144 }
145 
146 static void
147 mlx5_txpp_destroy_clock_queue(struct mlx5_dev_ctx_shared *sh)
148 {
149 	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
150 
151 	mlx5_txpp_destroy_send_queue(wq);
152 	if (sh->txpp.tsa) {
153 		mlx5_free(sh->txpp.tsa);
154 		sh->txpp.tsa = NULL;
155 	}
156 }
157 
158 static void
159 mlx5_txpp_doorbell_rearm_queue(struct mlx5_dev_ctx_shared *sh, uint16_t ci)
160 {
161 	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
162 	struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->sq_obj.wqes;
163 	union {
164 		uint32_t w32[2];
165 		uint64_t w64;
166 	} cs;
167 	void *reg_addr;
168 
169 	wq->sq_ci = ci + 1;
170 	cs.w32[0] = rte_cpu_to_be_32(rte_be_to_cpu_32
171 			(wqe[ci & (wq->sq_size - 1)].ctrl[0]) | (ci - 1) << 8);
172 	cs.w32[1] = wqe[ci & (wq->sq_size - 1)].ctrl[1];
173 	/* Update SQ doorbell record with new SQ ci. */
174 	rte_compiler_barrier();
175 	*wq->sq_obj.db_rec = rte_cpu_to_be_32(wq->sq_ci);
176 	/* Make sure the doorbell record is updated. */
177 	rte_wmb();
178 	/* Write to doorbel register to start processing. */
179 	reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
180 	__mlx5_uar_write64_relaxed(cs.w64, reg_addr, NULL);
181 	rte_wmb();
182 }
183 
184 static void
185 mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
186 {
187 	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
188 	struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->sq_obj.wqes;
189 	uint32_t i;
190 
191 	for (i = 0; i < wq->sq_size; i += 2) {
192 		struct mlx5_wqe_cseg *cs;
193 		struct mlx5_wqe_qseg *qs;
194 		uint32_t index;
195 
196 		/* Build SEND_EN request with slave WQE index. */
197 		cs = &wqe[i + 0].cseg;
198 		cs->opcode = RTE_BE32(MLX5_OPCODE_SEND_EN | 0);
199 		cs->sq_ds = rte_cpu_to_be_32((wq->sq_obj.sq->id << 8) | 2);
200 		cs->flags = RTE_BE32(MLX5_COMP_ALWAYS <<
201 				     MLX5_COMP_MODE_OFFSET);
202 		cs->misc = RTE_BE32(0);
203 		qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
204 		index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM) &
205 			((1 << MLX5_WQ_INDEX_WIDTH) - 1);
206 		qs->max_index = rte_cpu_to_be_32(index);
207 		qs->qpn_cqn =
208 			   rte_cpu_to_be_32(sh->txpp.clock_queue.sq_obj.sq->id);
209 		/* Build WAIT request with slave CQE index. */
210 		cs = &wqe[i + 1].cseg;
211 		cs->opcode = RTE_BE32(MLX5_OPCODE_WAIT | 0);
212 		cs->sq_ds = rte_cpu_to_be_32((wq->sq_obj.sq->id << 8) | 2);
213 		cs->flags = RTE_BE32(MLX5_COMP_ONLY_ERR <<
214 				     MLX5_COMP_MODE_OFFSET);
215 		cs->misc = RTE_BE32(0);
216 		qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
217 		index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM / 2) &
218 			((1 << MLX5_CQ_INDEX_WIDTH) - 1);
219 		qs->max_index = rte_cpu_to_be_32(index);
220 		qs->qpn_cqn =
221 			   rte_cpu_to_be_32(sh->txpp.clock_queue.cq_obj.cq->id);
222 	}
223 }
224 
225 /* Creates the Rearm Queue to fire the requests to Clock Queue in realtime. */
226 static int
227 mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
228 {
229 	struct mlx5_devx_create_sq_attr sq_attr = {
230 		.cd_master = 1,
231 		.state = MLX5_SQC_STATE_RST,
232 		.tis_lst_sz = 1,
233 		.tis_num = sh->tis[0]->id,
234 		.wq_attr = (struct mlx5_devx_wq_attr){
235 			.pd = sh->cdev->pdn,
236 			.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
237 		},
238 		.ts_format = mlx5_ts_format_conv
239 				       (sh->cdev->config.hca_attr.sq_ts_format),
240 	};
241 	struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
242 	struct mlx5_devx_cq_attr cq_attr = {
243 		.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
244 	};
245 	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
246 	int ret;
247 
248 	/* Create completion queue object for Rearm Queue. */
249 	ret = mlx5_devx_cq_create(sh->cdev->ctx, &wq->cq_obj,
250 				  log2above(MLX5_TXPP_REARM_CQ_SIZE), &cq_attr,
251 				  sh->numa_node);
252 	if (ret) {
253 		DRV_LOG(ERR, "Failed to create CQ for Rearm Queue.");
254 		return ret;
255 	}
256 	wq->cq_ci = 0;
257 	wq->arm_sn = 0;
258 	wq->sq_size = MLX5_TXPP_REARM_SQ_SIZE;
259 	MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
260 	/* Create send queue object for Rearm Queue. */
261 	sq_attr.cqn = wq->cq_obj.cq->id;
262 	/* There should be no WQE leftovers in the cyclic queue. */
263 	ret = mlx5_devx_sq_create(sh->cdev->ctx, &wq->sq_obj,
264 				  log2above(MLX5_TXPP_REARM_SQ_SIZE), &sq_attr,
265 				  sh->numa_node);
266 	if (ret) {
267 		rte_errno = errno;
268 		DRV_LOG(ERR, "Failed to create SQ for Rearm Queue.");
269 		goto error;
270 	}
271 	/* Build the WQEs in the Send Queue before goto Ready state. */
272 	mlx5_txpp_fill_wqe_rearm_queue(sh);
273 	/* Change queue state to ready. */
274 	msq_attr.sq_state = MLX5_SQC_STATE_RST;
275 	msq_attr.state = MLX5_SQC_STATE_RDY;
276 	ret = mlx5_devx_cmd_modify_sq(wq->sq_obj.sq, &msq_attr);
277 	if (ret) {
278 		DRV_LOG(ERR, "Failed to set SQ ready state Rearm Queue.");
279 		goto error;
280 	}
281 	return 0;
282 error:
283 	ret = -rte_errno;
284 	mlx5_txpp_destroy_rearm_queue(sh);
285 	rte_errno = -ret;
286 	return ret;
287 }
288 
289 static void
290 mlx5_txpp_fill_wqe_clock_queue(struct mlx5_dev_ctx_shared *sh)
291 {
292 	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
293 	struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->sq_obj.wqes;
294 	struct mlx5_wqe_cseg *cs = &wqe->cseg;
295 	uint32_t wqe_size, opcode, i;
296 	uint8_t *dst;
297 
298 	/* For test purposes fill the WQ with SEND inline packet. */
299 	if (sh->txpp.test) {
300 		wqe_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE +
301 				     MLX5_WQE_CSEG_SIZE +
302 				     2 * MLX5_WQE_ESEG_SIZE -
303 				     MLX5_ESEG_MIN_INLINE_SIZE,
304 				     MLX5_WSEG_SIZE);
305 		opcode = MLX5_OPCODE_SEND;
306 	} else {
307 		wqe_size = MLX5_WSEG_SIZE;
308 		opcode = MLX5_OPCODE_NOP;
309 	}
310 	cs->opcode = rte_cpu_to_be_32(opcode | 0); /* Index is ignored. */
311 	cs->sq_ds = rte_cpu_to_be_32((wq->sq_obj.sq->id << 8) |
312 				     (wqe_size / MLX5_WSEG_SIZE));
313 	cs->flags = RTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET);
314 	cs->misc = RTE_BE32(0);
315 	wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE);
316 	if (sh->txpp.test) {
317 		struct mlx5_wqe_eseg *es = &wqe->eseg;
318 		struct rte_ether_hdr *eth_hdr;
319 		struct rte_ipv4_hdr *ip_hdr;
320 		struct rte_udp_hdr *udp_hdr;
321 
322 		/* Build the inline test packet pattern. */
323 		MLX5_ASSERT(wqe_size <= MLX5_WQE_SIZE_MAX);
324 		MLX5_ASSERT(MLX5_TXPP_TEST_PKT_SIZE >=
325 				(sizeof(struct rte_ether_hdr) +
326 				 sizeof(struct rte_ipv4_hdr)));
327 		es->flags = 0;
328 		es->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
329 		es->swp_offs = 0;
330 		es->metadata = 0;
331 		es->swp_flags = 0;
332 		es->mss = 0;
333 		es->inline_hdr_sz = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE);
334 		/* Build test packet L2 header (Ethernet). */
335 		dst = (uint8_t *)&es->inline_data;
336 		eth_hdr = (struct rte_ether_hdr *)dst;
337 		rte_eth_random_addr(&eth_hdr->dst_addr.addr_bytes[0]);
338 		rte_eth_random_addr(&eth_hdr->src_addr.addr_bytes[0]);
339 		eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
340 		/* Build test packet L3 header (IP v4). */
341 		dst += sizeof(struct rte_ether_hdr);
342 		ip_hdr = (struct rte_ipv4_hdr *)dst;
343 		ip_hdr->version_ihl = RTE_IPV4_VHL_DEF;
344 		ip_hdr->type_of_service = 0;
345 		ip_hdr->fragment_offset = 0;
346 		ip_hdr->time_to_live = 64;
347 		ip_hdr->next_proto_id = IPPROTO_UDP;
348 		ip_hdr->packet_id = 0;
349 		ip_hdr->total_length = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE -
350 						sizeof(struct rte_ether_hdr));
351 		/* use RFC5735 / RFC2544 reserved network test addresses */
352 		ip_hdr->src_addr = RTE_BE32((198U << 24) | (18 << 16) |
353 					    (0 << 8) | 1);
354 		ip_hdr->dst_addr = RTE_BE32((198U << 24) | (18 << 16) |
355 					    (0 << 8) | 2);
356 		if (MLX5_TXPP_TEST_PKT_SIZE <
357 					(sizeof(struct rte_ether_hdr) +
358 					 sizeof(struct rte_ipv4_hdr) +
359 					 sizeof(struct rte_udp_hdr)))
360 			goto wcopy;
361 		/* Build test packet L4 header (UDP). */
362 		dst += sizeof(struct rte_ipv4_hdr);
363 		udp_hdr = (struct rte_udp_hdr *)dst;
364 		udp_hdr->src_port = RTE_BE16(9); /* RFC863 Discard. */
365 		udp_hdr->dst_port = RTE_BE16(9);
366 		udp_hdr->dgram_len = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE -
367 					      sizeof(struct rte_ether_hdr) -
368 					      sizeof(struct rte_ipv4_hdr));
369 		udp_hdr->dgram_cksum = 0;
370 		/* Fill the test packet data. */
371 		dst += sizeof(struct rte_udp_hdr);
372 		for (i = sizeof(struct rte_ether_hdr) +
373 			sizeof(struct rte_ipv4_hdr) +
374 			sizeof(struct rte_udp_hdr);
375 				i < MLX5_TXPP_TEST_PKT_SIZE; i++)
376 			*dst++ = (uint8_t)(i & 0xFF);
377 	}
378 wcopy:
379 	/* Duplicate the pattern to the next WQEs. */
380 	dst = (uint8_t *)(uintptr_t)wq->sq_obj.umem_buf;
381 	for (i = 1; i < MLX5_TXPP_CLKQ_SIZE; i++) {
382 		dst += wqe_size;
383 		rte_memcpy(dst, (void *)(uintptr_t)wq->sq_obj.umem_buf,
384 			   wqe_size);
385 	}
386 }
387 
388 /* Creates the Clock Queue for packet pacing, returns zero on success. */
389 static int
390 mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
391 {
392 	struct mlx5_devx_create_sq_attr sq_attr = { 0 };
393 	struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
394 	struct mlx5_devx_cq_attr cq_attr = {
395 		.use_first_only = 1,
396 		.overrun_ignore = 1,
397 		.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
398 	};
399 	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
400 	int ret;
401 
402 	sh->txpp.tsa = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
403 				   MLX5_TXPP_REARM_SQ_SIZE *
404 				   sizeof(struct mlx5_txpp_ts),
405 				   0, sh->numa_node);
406 	if (!sh->txpp.tsa) {
407 		DRV_LOG(ERR, "Failed to allocate memory for CQ stats.");
408 		return -ENOMEM;
409 	}
410 	sh->txpp.ts_p = 0;
411 	sh->txpp.ts_n = 0;
412 	/* Create completion queue object for Clock Queue. */
413 	ret = mlx5_devx_cq_create(sh->cdev->ctx, &wq->cq_obj,
414 				  log2above(MLX5_TXPP_CLKQ_SIZE), &cq_attr,
415 				  sh->numa_node);
416 	if (ret) {
417 		DRV_LOG(ERR, "Failed to create CQ for Clock Queue.");
418 		goto error;
419 	}
420 	wq->cq_ci = 0;
421 	/* Allocate memory buffer for Send Queue WQEs. */
422 	if (sh->txpp.test) {
423 		wq->sq_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE +
424 					MLX5_WQE_CSEG_SIZE +
425 					2 * MLX5_WQE_ESEG_SIZE -
426 					MLX5_ESEG_MIN_INLINE_SIZE,
427 					MLX5_WQE_SIZE) / MLX5_WQE_SIZE;
428 		wq->sq_size *= MLX5_TXPP_CLKQ_SIZE;
429 	} else {
430 		wq->sq_size = MLX5_TXPP_CLKQ_SIZE;
431 	}
432 	/* There should not be WQE leftovers in the cyclic queue. */
433 	MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
434 	/* Create send queue object for Clock Queue. */
435 	if (sh->txpp.test) {
436 		sq_attr.tis_lst_sz = 1;
437 		sq_attr.tis_num = sh->tis[0]->id;
438 		sq_attr.non_wire = 0;
439 		sq_attr.static_sq_wq = 1;
440 	} else {
441 		sq_attr.non_wire = 1;
442 		sq_attr.static_sq_wq = 1;
443 	}
444 	sq_attr.cqn = wq->cq_obj.cq->id;
445 	sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
446 	sq_attr.wq_attr.cd_slave = 1;
447 	sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
448 	sq_attr.wq_attr.pd = sh->cdev->pdn;
449 	sq_attr.ts_format =
450 		mlx5_ts_format_conv(sh->cdev->config.hca_attr.sq_ts_format);
451 	ret = mlx5_devx_sq_create(sh->cdev->ctx, &wq->sq_obj,
452 				  log2above(wq->sq_size),
453 				  &sq_attr, sh->numa_node);
454 	if (ret) {
455 		rte_errno = errno;
456 		DRV_LOG(ERR, "Failed to create SQ for Clock Queue.");
457 		goto error;
458 	}
459 	/* Build the WQEs in the Send Queue before goto Ready state. */
460 	mlx5_txpp_fill_wqe_clock_queue(sh);
461 	/* Change queue state to ready. */
462 	msq_attr.sq_state = MLX5_SQC_STATE_RST;
463 	msq_attr.state = MLX5_SQC_STATE_RDY;
464 	wq->sq_ci = 0;
465 	ret = mlx5_devx_cmd_modify_sq(wq->sq_obj.sq, &msq_attr);
466 	if (ret) {
467 		DRV_LOG(ERR, "Failed to set SQ ready state Clock Queue.");
468 		goto error;
469 	}
470 	return 0;
471 error:
472 	ret = -rte_errno;
473 	mlx5_txpp_destroy_clock_queue(sh);
474 	rte_errno = -ret;
475 	return ret;
476 }
477 
478 /* Enable notification from the Rearm Queue CQ. */
479 static inline void
480 mlx5_txpp_cq_arm(struct mlx5_dev_ctx_shared *sh)
481 {
482 	void *base_addr;
483 
484 	struct mlx5_txpp_wq *aq = &sh->txpp.rearm_queue;
485 	uint32_t arm_sn = aq->arm_sn << MLX5_CQ_SQN_OFFSET;
486 	uint32_t db_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | aq->cq_ci;
487 	uint64_t db_be =
488 		rte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq_obj.cq->id);
489 	base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar);
490 	uint32_t *addr = RTE_PTR_ADD(base_addr, MLX5_CQ_DOORBELL);
491 
492 	rte_compiler_barrier();
493 	aq->cq_obj.db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(db_hi);
494 	rte_wmb();
495 #ifdef RTE_ARCH_64
496 	*(uint64_t *)addr = db_be;
497 #else
498 	*(uint32_t *)addr = db_be;
499 	rte_io_wmb();
500 	*((uint32_t *)addr + 1) = db_be >> 32;
501 #endif
502 	aq->arm_sn++;
503 }
504 
505 #if defined(RTE_ARCH_X86_64)
506 static inline int
507 mlx5_atomic128_compare_exchange(rte_int128_t *dst,
508 				rte_int128_t *exp,
509 				const rte_int128_t *src)
510 {
511 	uint8_t res;
512 
513 	asm volatile (MPLOCKED
514 		      "cmpxchg16b %[dst];"
515 		      " sete %[res]"
516 		      : [dst] "=m" (dst->val[0]),
517 			"=a" (exp->val[0]),
518 			"=d" (exp->val[1]),
519 			[res] "=r" (res)
520 		      : "b" (src->val[0]),
521 			"c" (src->val[1]),
522 			"a" (exp->val[0]),
523 			"d" (exp->val[1]),
524 			"m" (dst->val[0])
525 		      : "memory");
526 
527 	return res;
528 }
529 #endif
530 
531 static inline void
532 mlx5_atomic_read_cqe(rte_int128_t *from, rte_int128_t *ts)
533 {
534 	/*
535 	 * The only CQE of Clock Queue is being continuously
536 	 * updated by hardware with specified rate. We must
537 	 * read timestamp and WQE completion index atomically.
538 	 */
539 #if defined(RTE_ARCH_X86_64)
540 	rte_int128_t src;
541 
542 	memset(&src, 0, sizeof(src));
543 	*ts = src;
544 	/* if (*from == *ts) *from = *src else *ts = *from; */
545 	mlx5_atomic128_compare_exchange(from, ts, &src);
546 #else
547 	uint64_t *cqe = (uint64_t *)from;
548 
549 	/*
550 	 * Power architecture does not support 16B compare-and-swap.
551 	 * ARM implements it in software, code below is more relevant.
552 	 */
553 	for (;;) {
554 		uint64_t tm, op;
555 		uint64_t *ps;
556 
557 		rte_compiler_barrier();
558 		tm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED);
559 		op = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED);
560 		rte_compiler_barrier();
561 		if (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED))
562 			continue;
563 		if (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED))
564 			continue;
565 		ps = (uint64_t *)ts;
566 		ps[0] = tm;
567 		ps[1] = op;
568 		return;
569 	}
570 #endif
571 }
572 
573 /* Stores timestamp in the cache structure to share data with datapath. */
574 static inline void
575 mlx5_txpp_cache_timestamp(struct mlx5_dev_ctx_shared *sh,
576 			   uint64_t ts, uint64_t ci)
577 {
578 	ci = ci << (64 - MLX5_CQ_INDEX_WIDTH);
579 	ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH;
580 	rte_compiler_barrier();
581 	__atomic_store_n(&sh->txpp.ts.ts, ts, __ATOMIC_RELAXED);
582 	__atomic_store_n(&sh->txpp.ts.ci_ts, ci, __ATOMIC_RELAXED);
583 	rte_wmb();
584 }
585 
586 /* Reads timestamp from Clock Queue CQE and stores in the cache. */
587 static inline void
588 mlx5_txpp_update_timestamp(struct mlx5_dev_ctx_shared *sh)
589 {
590 	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
591 	struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cq_obj.cqes;
592 	union {
593 		rte_int128_t u128;
594 		struct mlx5_cqe_ts cts;
595 	} to;
596 	uint64_t ts;
597 	uint16_t ci;
598 	uint8_t opcode;
599 
600 	mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
601 	opcode = MLX5_CQE_OPCODE(to.cts.op_own);
602 	if (opcode) {
603 		if (opcode != MLX5_CQE_INVALID) {
604 			/*
605 			 * Commit the error state if and only if
606 			 * we have got at least one actual completion.
607 			 */
608 			DRV_LOG(DEBUG,
609 				"Clock Queue error sync lost (%X).", opcode);
610 				__atomic_fetch_add(&sh->txpp.err_clock_queue,
611 				   1, __ATOMIC_RELAXED);
612 			sh->txpp.sync_lost = 1;
613 		}
614 		return;
615 	}
616 	ci = rte_be_to_cpu_16(to.cts.wqe_counter);
617 	ts = rte_be_to_cpu_64(to.cts.timestamp);
618 	ts = mlx5_txpp_convert_rx_ts(sh, ts);
619 	wq->cq_ci += (ci - wq->sq_ci) & UINT16_MAX;
620 	wq->sq_ci = ci;
621 	mlx5_txpp_cache_timestamp(sh, ts, wq->cq_ci);
622 }
623 
624 /* Waits for the first completion on Clock Queue to init timestamp. */
625 static inline void
626 mlx5_txpp_init_timestamp(struct mlx5_dev_ctx_shared *sh)
627 {
628 	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
629 	uint32_t wait;
630 
631 	sh->txpp.ts_p = 0;
632 	sh->txpp.ts_n = 0;
633 	for (wait = 0; wait < MLX5_TXPP_WAIT_INIT_TS; wait++) {
634 		mlx5_txpp_update_timestamp(sh);
635 		if (wq->sq_ci)
636 			return;
637 		/* Wait one millisecond and try again. */
638 		rte_delay_us_sleep(US_PER_S / MS_PER_S);
639 	}
640 	DRV_LOG(ERR, "Unable to initialize timestamp.");
641 	sh->txpp.sync_lost = 1;
642 }
643 
644 #ifdef HAVE_IBV_DEVX_EVENT
645 /* Gather statistics for timestamp from Clock Queue CQE. */
646 static inline void
647 mlx5_txpp_gather_timestamp(struct mlx5_dev_ctx_shared *sh)
648 {
649 	/* Check whether we have a valid timestamp. */
650 	if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n)
651 		return;
652 	MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE);
653 	__atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ts,
654 			 sh->txpp.ts.ts, __ATOMIC_RELAXED);
655 	__atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
656 			 sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
657 	if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE)
658 		sh->txpp.ts_p = 0;
659 	if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE)
660 		++sh->txpp.ts_n;
661 }
662 
663 /* Handles Rearm Queue completions in periodic service. */
664 static __rte_always_inline void
665 mlx5_txpp_handle_rearm_queue(struct mlx5_dev_ctx_shared *sh)
666 {
667 	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
668 	uint32_t cq_ci = wq->cq_ci;
669 	bool error = false;
670 	int ret;
671 
672 	do {
673 		volatile struct mlx5_cqe *cqe;
674 
675 		cqe = &wq->cq_obj.cqes[cq_ci & (MLX5_TXPP_REARM_CQ_SIZE - 1)];
676 		ret = check_cqe(cqe, MLX5_TXPP_REARM_CQ_SIZE, cq_ci);
677 		switch (ret) {
678 		case MLX5_CQE_STATUS_ERR:
679 			error = true;
680 			++cq_ci;
681 			break;
682 		case MLX5_CQE_STATUS_SW_OWN:
683 			wq->sq_ci += 2;
684 			++cq_ci;
685 			break;
686 		case MLX5_CQE_STATUS_HW_OWN:
687 			break;
688 		default:
689 			MLX5_ASSERT(false);
690 			break;
691 		}
692 	} while (ret != MLX5_CQE_STATUS_HW_OWN);
693 	if (likely(cq_ci != wq->cq_ci)) {
694 		/* Check whether we have missed interrupts. */
695 		if (cq_ci - wq->cq_ci != 1) {
696 			DRV_LOG(DEBUG, "Rearm Queue missed interrupt.");
697 			__atomic_fetch_add(&sh->txpp.err_miss_int,
698 					   1, __ATOMIC_RELAXED);
699 			/* Check sync lost on wqe index. */
700 			if (cq_ci - wq->cq_ci >=
701 				(((1UL << MLX5_WQ_INDEX_WIDTH) /
702 				  MLX5_TXPP_REARM) - 1))
703 				error = 1;
704 		}
705 		/* Update doorbell record to notify hardware. */
706 		rte_compiler_barrier();
707 		*wq->cq_obj.db_rec = rte_cpu_to_be_32(cq_ci);
708 		rte_wmb();
709 		wq->cq_ci = cq_ci;
710 		/* Fire new requests to Rearm Queue. */
711 		if (error) {
712 			DRV_LOG(DEBUG, "Rearm Queue error sync lost.");
713 			__atomic_fetch_add(&sh->txpp.err_rearm_queue,
714 					   1, __ATOMIC_RELAXED);
715 			sh->txpp.sync_lost = 1;
716 		}
717 	}
718 }
719 
720 /* Handles Clock Queue completions in periodic service. */
721 static __rte_always_inline void
722 mlx5_txpp_handle_clock_queue(struct mlx5_dev_ctx_shared *sh)
723 {
724 	mlx5_txpp_update_timestamp(sh);
725 	mlx5_txpp_gather_timestamp(sh);
726 }
727 #endif
728 
729 /* Invoked periodically on Rearm Queue completions. */
730 void
731 mlx5_txpp_interrupt_handler(void *cb_arg)
732 {
733 #ifndef HAVE_IBV_DEVX_EVENT
734 	RTE_SET_USED(cb_arg);
735 	return;
736 #else
737 	struct mlx5_dev_ctx_shared *sh = cb_arg;
738 	union {
739 		struct mlx5dv_devx_async_event_hdr event_resp;
740 		uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
741 	} out;
742 
743 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
744 	/* Process events in the loop. Only rearm completions are expected. */
745 	while (mlx5_glue->devx_get_event
746 				(sh->txpp.echan,
747 				 &out.event_resp,
748 				 sizeof(out.buf)) >=
749 				 (ssize_t)sizeof(out.event_resp.cookie)) {
750 		mlx5_txpp_handle_rearm_queue(sh);
751 		mlx5_txpp_handle_clock_queue(sh);
752 		mlx5_txpp_cq_arm(sh);
753 		mlx5_txpp_doorbell_rearm_queue
754 					(sh, sh->txpp.rearm_queue.sq_ci - 1);
755 	}
756 #endif /* HAVE_IBV_DEVX_ASYNC */
757 }
758 
759 static void
760 mlx5_txpp_stop_service(struct mlx5_dev_ctx_shared *sh)
761 {
762 	if (!rte_intr_fd_get(sh->txpp.intr_handle))
763 		return;
764 	mlx5_intr_callback_unregister(sh->txpp.intr_handle,
765 				      mlx5_txpp_interrupt_handler, sh);
766 	rte_intr_instance_free(sh->txpp.intr_handle);
767 }
768 
769 /* Attach interrupt handler and fires first request to Rearm Queue. */
770 static int
771 mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh)
772 {
773 	uint16_t event_nums[1] = {0};
774 	int ret;
775 	int fd;
776 
777 	sh->txpp.err_miss_int = 0;
778 	sh->txpp.err_rearm_queue = 0;
779 	sh->txpp.err_clock_queue = 0;
780 	sh->txpp.err_ts_past = 0;
781 	sh->txpp.err_ts_future = 0;
782 	/* Attach interrupt handler to process Rearm Queue completions. */
783 	fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
784 	ret = mlx5_os_set_nonblock_channel_fd(fd);
785 	if (ret) {
786 		DRV_LOG(ERR, "Failed to change event channel FD.");
787 		rte_errno = errno;
788 		return -rte_errno;
789 	}
790 	sh->txpp.intr_handle =
791 			rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
792 	if (sh->txpp.intr_handle == NULL) {
793 		DRV_LOG(ERR, "Fail to allocate intr_handle");
794 		return -ENOMEM;
795 	}
796 	fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
797 	if (rte_intr_fd_set(sh->txpp.intr_handle, fd))
798 		return -rte_errno;
799 
800 	if (rte_intr_type_set(sh->txpp.intr_handle, RTE_INTR_HANDLE_EXT))
801 		return -rte_errno;
802 
803 	if (rte_intr_callback_register(sh->txpp.intr_handle,
804 				       mlx5_txpp_interrupt_handler, sh)) {
805 		rte_intr_fd_set(sh->txpp.intr_handle, 0);
806 		DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
807 		return -rte_errno;
808 	}
809 	/* Subscribe CQ event to the event channel controlled by the driver. */
810 	ret = mlx5_os_devx_subscribe_devx_event(sh->txpp.echan,
811 					    sh->txpp.rearm_queue.cq_obj.cq->obj,
812 					     sizeof(event_nums), event_nums, 0);
813 	if (ret) {
814 		DRV_LOG(ERR, "Failed to subscribe CQE event.");
815 		rte_errno = errno;
816 		return -errno;
817 	}
818 	/* Enable interrupts in the CQ. */
819 	mlx5_txpp_cq_arm(sh);
820 	/* Fire the first request on Rearm Queue. */
821 	mlx5_txpp_doorbell_rearm_queue(sh, sh->txpp.rearm_queue.sq_size - 1);
822 	mlx5_txpp_init_timestamp(sh);
823 	return 0;
824 }
825 
826 /*
827  * The routine initializes the packet pacing infrastructure:
828  * - allocates PP context
829  * - Clock CQ/SQ
830  * - Rearm CQ/SQ
831  * - attaches rearm interrupt handler
832  * - starts Clock Queue
833  *
834  * Returns 0 on success, negative otherwise
835  */
836 static int
837 mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh, struct mlx5_priv *priv)
838 {
839 	int tx_pp = priv->config.tx_pp;
840 	int ret;
841 
842 	/* Store the requested pacing parameters. */
843 	sh->txpp.tick = tx_pp >= 0 ? tx_pp : -tx_pp;
844 	sh->txpp.test = !!(tx_pp < 0);
845 	sh->txpp.skew = priv->config.tx_skew;
846 	sh->txpp.freq = priv->config.hca_attr.dev_freq_khz;
847 	ret = mlx5_txpp_create_event_channel(sh);
848 	if (ret)
849 		goto exit;
850 	ret = mlx5_txpp_alloc_pp_index(sh);
851 	if (ret)
852 		goto exit;
853 	ret = mlx5_txpp_create_clock_queue(sh);
854 	if (ret)
855 		goto exit;
856 	ret = mlx5_txpp_create_rearm_queue(sh);
857 	if (ret)
858 		goto exit;
859 	ret = mlx5_txpp_start_service(sh);
860 	if (ret)
861 		goto exit;
862 exit:
863 	if (ret) {
864 		mlx5_txpp_stop_service(sh);
865 		mlx5_txpp_destroy_rearm_queue(sh);
866 		mlx5_txpp_destroy_clock_queue(sh);
867 		mlx5_txpp_free_pp_index(sh);
868 		mlx5_txpp_destroy_event_channel(sh);
869 		sh->txpp.tick = 0;
870 		sh->txpp.test = 0;
871 		sh->txpp.skew = 0;
872 	}
873 	return ret;
874 }
875 
876 /*
877  * The routine destroys the packet pacing infrastructure:
878  * - detaches rearm interrupt handler
879  * - Rearm CQ/SQ
880  * - Clock CQ/SQ
881  * - PP context
882  */
883 static void
884 mlx5_txpp_destroy(struct mlx5_dev_ctx_shared *sh)
885 {
886 	mlx5_txpp_stop_service(sh);
887 	mlx5_txpp_destroy_rearm_queue(sh);
888 	mlx5_txpp_destroy_clock_queue(sh);
889 	mlx5_txpp_free_pp_index(sh);
890 	mlx5_txpp_destroy_event_channel(sh);
891 	sh->txpp.tick = 0;
892 	sh->txpp.test = 0;
893 	sh->txpp.skew = 0;
894 }
895 
896 /**
897  * Creates and starts packet pacing infrastructure on specified device.
898  *
899  * @param dev
900  *   Pointer to Ethernet device structure.
901  *
902  * @return
903  *   0 on success, a negative errno value otherwise and rte_errno is set.
904  */
905 int
906 mlx5_txpp_start(struct rte_eth_dev *dev)
907 {
908 	struct mlx5_priv *priv = dev->data->dev_private;
909 	struct mlx5_dev_ctx_shared *sh = priv->sh;
910 	int err = 0;
911 	int ret;
912 
913 	if (!priv->config.tx_pp) {
914 		/* Packet pacing is not requested for the device. */
915 		MLX5_ASSERT(priv->txpp_en == 0);
916 		return 0;
917 	}
918 	if (priv->txpp_en) {
919 		/* Packet pacing is already enabled for the device. */
920 		MLX5_ASSERT(sh->txpp.refcnt);
921 		return 0;
922 	}
923 	if (priv->config.tx_pp > 0) {
924 		ret = rte_mbuf_dynflag_lookup
925 				(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
926 		if (ret < 0)
927 			return 0;
928 	}
929 	ret = pthread_mutex_lock(&sh->txpp.mutex);
930 	MLX5_ASSERT(!ret);
931 	RTE_SET_USED(ret);
932 	if (sh->txpp.refcnt) {
933 		priv->txpp_en = 1;
934 		++sh->txpp.refcnt;
935 	} else {
936 		err = mlx5_txpp_create(sh, priv);
937 		if (!err) {
938 			MLX5_ASSERT(sh->txpp.tick);
939 			priv->txpp_en = 1;
940 			sh->txpp.refcnt = 1;
941 		} else {
942 			rte_errno = -err;
943 		}
944 	}
945 	ret = pthread_mutex_unlock(&sh->txpp.mutex);
946 	MLX5_ASSERT(!ret);
947 	RTE_SET_USED(ret);
948 	return err;
949 }
950 
951 /**
952  * Stops and destroys packet pacing infrastructure on specified device.
953  *
954  * @param dev
955  *   Pointer to Ethernet device structure.
956  *
957  * @return
958  *   0 on success, a negative errno value otherwise and rte_errno is set.
959  */
960 void
961 mlx5_txpp_stop(struct rte_eth_dev *dev)
962 {
963 	struct mlx5_priv *priv = dev->data->dev_private;
964 	struct mlx5_dev_ctx_shared *sh = priv->sh;
965 	int ret;
966 
967 	if (!priv->txpp_en) {
968 		/* Packet pacing is already disabled for the device. */
969 		return;
970 	}
971 	priv->txpp_en = 0;
972 	ret = pthread_mutex_lock(&sh->txpp.mutex);
973 	MLX5_ASSERT(!ret);
974 	RTE_SET_USED(ret);
975 	MLX5_ASSERT(sh->txpp.refcnt);
976 	if (!sh->txpp.refcnt || --sh->txpp.refcnt)
977 		return;
978 	/* No references any more, do actual destroy. */
979 	mlx5_txpp_destroy(sh);
980 	ret = pthread_mutex_unlock(&sh->txpp.mutex);
981 	MLX5_ASSERT(!ret);
982 	RTE_SET_USED(ret);
983 }
984 
985 /*
986  * Read the current clock counter of an Ethernet device
987  *
988  * This returns the current raw clock value of an Ethernet device. It is
989  * a raw amount of ticks, with no given time reference.
990  * The value returned here is from the same clock than the one
991  * filling timestamp field of Rx/Tx packets when using hardware timestamp
992  * offload. Therefore it can be used to compute a precise conversion of
993  * the device clock to the real time.
994  *
995  * @param dev
996  *   Pointer to Ethernet device structure.
997  * @param clock
998  *   Pointer to the uint64_t that holds the raw clock value.
999  *
1000  * @return
1001  *   - 0: Success.
1002  *   - -ENOTSUP: The function is not supported in this mode. Requires
1003  *     packet pacing module configured and started (tx_pp devarg)
1004  */
1005 int
1006 mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp)
1007 {
1008 	struct mlx5_priv *priv = dev->data->dev_private;
1009 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1010 	int ret;
1011 
1012 	if (sh->txpp.refcnt) {
1013 		struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
1014 		struct mlx5_cqe *cqe =
1015 				(struct mlx5_cqe *)(uintptr_t)wq->cq_obj.cqes;
1016 		union {
1017 			rte_int128_t u128;
1018 			struct mlx5_cqe_ts cts;
1019 		} to;
1020 		uint64_t ts;
1021 
1022 		mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
1023 		if (to.cts.op_own >> 4) {
1024 			DRV_LOG(DEBUG, "Clock Queue error sync lost.");
1025 			__atomic_fetch_add(&sh->txpp.err_clock_queue,
1026 					   1, __ATOMIC_RELAXED);
1027 			sh->txpp.sync_lost = 1;
1028 			return -EIO;
1029 		}
1030 		ts = rte_be_to_cpu_64(to.cts.timestamp);
1031 		ts = mlx5_txpp_convert_rx_ts(sh, ts);
1032 		*timestamp = ts;
1033 		return 0;
1034 	}
1035 	/* Not supported in isolated mode - kernel does not see the CQEs. */
1036 	if (priv->isolated || rte_eal_process_type() != RTE_PROC_PRIMARY)
1037 		return -ENOTSUP;
1038 	ret = mlx5_read_clock(dev, timestamp);
1039 	return ret;
1040 }
1041 
1042 /**
1043  * DPDK callback to clear device extended statistics.
1044  *
1045  * @param dev
1046  *   Pointer to Ethernet device structure.
1047  *
1048  * @return
1049  *   0 on success and stats is reset, negative errno value otherwise and
1050  *   rte_errno is set.
1051  */
1052 int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev)
1053 {
1054 	struct mlx5_priv *priv = dev->data->dev_private;
1055 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1056 
1057 	__atomic_store_n(&sh->txpp.err_miss_int, 0, __ATOMIC_RELAXED);
1058 	__atomic_store_n(&sh->txpp.err_rearm_queue, 0, __ATOMIC_RELAXED);
1059 	__atomic_store_n(&sh->txpp.err_clock_queue, 0, __ATOMIC_RELAXED);
1060 	__atomic_store_n(&sh->txpp.err_ts_past, 0, __ATOMIC_RELAXED);
1061 	__atomic_store_n(&sh->txpp.err_ts_future, 0, __ATOMIC_RELAXED);
1062 	return 0;
1063 }
1064 
1065 /**
1066  * Routine to retrieve names of extended device statistics
1067  * for packet send scheduling. It appends the specific stats names
1068  * after the parts filled by preceding modules (eth stats, etc.)
1069  *
1070  * @param dev
1071  *   Pointer to Ethernet device structure.
1072  * @param[out] xstats_names
1073  *   Buffer to insert names into.
1074  * @param n
1075  *   Number of names.
1076  * @param n_used
1077  *   Number of names filled by preceding statistics modules.
1078  *
1079  * @return
1080  *   Number of xstats names.
1081  */
1082 int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1083 			       struct rte_eth_xstat_name *xstats_names,
1084 			       unsigned int n, unsigned int n_used)
1085 {
1086 	unsigned int n_txpp = RTE_DIM(mlx5_txpp_stat_names);
1087 	unsigned int i;
1088 
1089 	if (n >= n_used + n_txpp && xstats_names) {
1090 		for (i = 0; i < n_txpp; ++i) {
1091 			strncpy(xstats_names[i + n_used].name,
1092 				mlx5_txpp_stat_names[i],
1093 				RTE_ETH_XSTATS_NAME_SIZE);
1094 			xstats_names[i + n_used].name
1095 					[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0;
1096 		}
1097 	}
1098 	return n_used + n_txpp;
1099 }
1100 
1101 static inline void
1102 mlx5_txpp_read_tsa(struct mlx5_dev_txpp *txpp,
1103 		   struct mlx5_txpp_ts *tsa, uint16_t idx)
1104 {
1105 	do {
1106 		uint64_t ts, ci;
1107 
1108 		ts = __atomic_load_n(&txpp->tsa[idx].ts, __ATOMIC_RELAXED);
1109 		ci = __atomic_load_n(&txpp->tsa[idx].ci_ts, __ATOMIC_RELAXED);
1110 		rte_compiler_barrier();
1111 		if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0)
1112 			continue;
1113 		if (__atomic_load_n(&txpp->tsa[idx].ts,
1114 				    __ATOMIC_RELAXED) != ts)
1115 			continue;
1116 		if (__atomic_load_n(&txpp->tsa[idx].ci_ts,
1117 				    __ATOMIC_RELAXED) != ci)
1118 			continue;
1119 		tsa->ts = ts;
1120 		tsa->ci_ts = ci;
1121 		return;
1122 	} while (true);
1123 }
1124 
1125 /*
1126  * Jitter reflects the clock change between
1127  * neighbours Clock Queue completions.
1128  */
1129 static uint64_t
1130 mlx5_txpp_xstats_jitter(struct mlx5_dev_txpp *txpp)
1131 {
1132 	struct mlx5_txpp_ts tsa0, tsa1;
1133 	int64_t dts, dci;
1134 	uint16_t ts_p;
1135 
1136 	if (txpp->ts_n < 2) {
1137 		/* No gathered enough reports yet. */
1138 		return 0;
1139 	}
1140 	do {
1141 		int ts_0, ts_1;
1142 
1143 		ts_p = txpp->ts_p;
1144 		rte_compiler_barrier();
1145 		ts_0 = ts_p - 2;
1146 		if (ts_0 < 0)
1147 			ts_0 += MLX5_TXPP_REARM_SQ_SIZE;
1148 		ts_1 = ts_p - 1;
1149 		if (ts_1 < 0)
1150 			ts_1 += MLX5_TXPP_REARM_SQ_SIZE;
1151 		mlx5_txpp_read_tsa(txpp, &tsa0, ts_0);
1152 		mlx5_txpp_read_tsa(txpp, &tsa1, ts_1);
1153 		rte_compiler_barrier();
1154 	} while (ts_p != txpp->ts_p);
1155 	/* We have two neighbor reports, calculate the jitter. */
1156 	dts = tsa1.ts - tsa0.ts;
1157 	dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) -
1158 	      (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH));
1159 	if (dci < 0)
1160 		dci += 1 << MLX5_CQ_INDEX_WIDTH;
1161 	dci *= txpp->tick;
1162 	return (dts > dci) ? dts - dci : dci - dts;
1163 }
1164 
1165 /*
1166  * Wander reflects the long-term clock change
1167  * over the entire length of all Clock Queue completions.
1168  */
1169 static uint64_t
1170 mlx5_txpp_xstats_wander(struct mlx5_dev_txpp *txpp)
1171 {
1172 	struct mlx5_txpp_ts tsa0, tsa1;
1173 	int64_t dts, dci;
1174 	uint16_t ts_p;
1175 
1176 	if (txpp->ts_n < MLX5_TXPP_REARM_SQ_SIZE) {
1177 		/* No gathered enough reports yet. */
1178 		return 0;
1179 	}
1180 	do {
1181 		int ts_0, ts_1;
1182 
1183 		ts_p = txpp->ts_p;
1184 		rte_compiler_barrier();
1185 		ts_0 = ts_p - MLX5_TXPP_REARM_SQ_SIZE / 2 - 1;
1186 		if (ts_0 < 0)
1187 			ts_0 += MLX5_TXPP_REARM_SQ_SIZE;
1188 		ts_1 = ts_p - 1;
1189 		if (ts_1 < 0)
1190 			ts_1 += MLX5_TXPP_REARM_SQ_SIZE;
1191 		mlx5_txpp_read_tsa(txpp, &tsa0, ts_0);
1192 		mlx5_txpp_read_tsa(txpp, &tsa1, ts_1);
1193 		rte_compiler_barrier();
1194 	} while (ts_p != txpp->ts_p);
1195 	/* We have two neighbor reports, calculate the jitter. */
1196 	dts = tsa1.ts - tsa0.ts;
1197 	dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) -
1198 	      (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH));
1199 	dci += 1 << MLX5_CQ_INDEX_WIDTH;
1200 	dci *= txpp->tick;
1201 	return (dts > dci) ? dts - dci : dci - dts;
1202 }
1203 
1204 /**
1205  * Routine to retrieve extended device statistics
1206  * for packet send scheduling. It appends the specific statistics
1207  * after the parts filled by preceding modules (eth stats, etc.)
1208  *
1209  * @param dev
1210  *   Pointer to Ethernet device.
1211  * @param[out] stats
1212  *   Pointer to rte extended stats table.
1213  * @param n
1214  *   The size of the stats table.
1215  * @param n_used
1216  *   Number of stats filled by preceding statistics modules.
1217  *
1218  * @return
1219  *   Number of extended stats on success and stats is filled,
1220  *   negative on error and rte_errno is set.
1221  */
1222 int
1223 mlx5_txpp_xstats_get(struct rte_eth_dev *dev,
1224 		     struct rte_eth_xstat *stats,
1225 		     unsigned int n, unsigned int n_used)
1226 {
1227 	unsigned int n_txpp = RTE_DIM(mlx5_txpp_stat_names);
1228 
1229 	if (n >= n_used + n_txpp && stats) {
1230 		struct mlx5_priv *priv = dev->data->dev_private;
1231 		struct mlx5_dev_ctx_shared *sh = priv->sh;
1232 		unsigned int i;
1233 
1234 		for (i = 0; i < n_txpp; ++i)
1235 			stats[n_used + i].id = n_used + i;
1236 		stats[n_used + 0].value =
1237 				__atomic_load_n(&sh->txpp.err_miss_int,
1238 						__ATOMIC_RELAXED);
1239 		stats[n_used + 1].value =
1240 				__atomic_load_n(&sh->txpp.err_rearm_queue,
1241 						__ATOMIC_RELAXED);
1242 		stats[n_used + 2].value =
1243 				__atomic_load_n(&sh->txpp.err_clock_queue,
1244 						__ATOMIC_RELAXED);
1245 		stats[n_used + 3].value =
1246 				__atomic_load_n(&sh->txpp.err_ts_past,
1247 						__ATOMIC_RELAXED);
1248 		stats[n_used + 4].value =
1249 				__atomic_load_n(&sh->txpp.err_ts_future,
1250 						__ATOMIC_RELAXED);
1251 		stats[n_used + 5].value = mlx5_txpp_xstats_jitter(&sh->txpp);
1252 		stats[n_used + 6].value = mlx5_txpp_xstats_wander(&sh->txpp);
1253 		stats[n_used + 7].value = sh->txpp.sync_lost;
1254 	}
1255 	return n_used + n_txpp;
1256 }
1257