xref: /dpdk/drivers/net/mlx5/mlx5_txpp.c (revision f8dbaebbf1c9efcbb2e2354b341ed62175466a57)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 #include <fcntl.h>
5 #include <stdint.h>
6 
7 #include <rte_ether.h>
8 #include <ethdev_driver.h>
9 #include <rte_interrupts.h>
10 #include <rte_alarm.h>
11 #include <rte_malloc.h>
12 #include <rte_cycles.h>
13 #include <rte_eal_paging.h>
14 
15 #include <mlx5_malloc.h>
16 #include <mlx5_common_devx.h>
17 
18 #include "mlx5.h"
19 #include "mlx5_rx.h"
20 #include "mlx5_tx.h"
21 #include "mlx5_common_os.h"
22 
23 static_assert(sizeof(struct mlx5_cqe_ts) == sizeof(rte_int128_t),
24 		"Wrong timestamp CQE part size");
25 
26 static const char * const mlx5_txpp_stat_names[] = {
27 	"tx_pp_missed_interrupt_errors", /* Missed service interrupt. */
28 	"tx_pp_rearm_queue_errors", /* Rearm Queue errors. */
29 	"tx_pp_clock_queue_errors", /* Clock Queue errors. */
30 	"tx_pp_timestamp_past_errors", /* Timestamp in the past. */
31 	"tx_pp_timestamp_future_errors", /* Timestamp in the distant future. */
32 	"tx_pp_jitter", /* Timestamp jitter (one Clock Queue completion). */
33 	"tx_pp_wander", /* Timestamp wander (half of Clock Queue CQEs). */
34 	"tx_pp_sync_lost", /* Scheduling synchronization lost. */
35 };
36 
37 /* Destroy Event Queue Notification Channel. */
38 static void
39 mlx5_txpp_destroy_event_channel(struct mlx5_dev_ctx_shared *sh)
40 {
41 	if (sh->txpp.echan) {
42 		mlx5_os_devx_destroy_event_channel(sh->txpp.echan);
43 		sh->txpp.echan = NULL;
44 	}
45 }
46 
47 /* Create Event Queue Notification Channel. */
48 static int
49 mlx5_txpp_create_event_channel(struct mlx5_dev_ctx_shared *sh)
50 {
51 	MLX5_ASSERT(!sh->txpp.echan);
52 	sh->txpp.echan = mlx5_os_devx_create_event_channel(sh->cdev->ctx,
53 			MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
54 	if (!sh->txpp.echan) {
55 		rte_errno = errno;
56 		DRV_LOG(ERR, "Failed to create event channel %d.", rte_errno);
57 		return -rte_errno;
58 	}
59 	return 0;
60 }
61 
62 static void
63 mlx5_txpp_free_pp_index(struct mlx5_dev_ctx_shared *sh)
64 {
65 #ifdef HAVE_MLX5DV_PP_ALLOC
66 	if (sh->txpp.pp) {
67 		mlx5_glue->dv_free_pp(sh->txpp.pp);
68 		sh->txpp.pp = NULL;
69 		sh->txpp.pp_id = 0;
70 	}
71 #else
72 	RTE_SET_USED(sh);
73 	DRV_LOG(ERR, "Freeing pacing index is not supported.");
74 #endif
75 }
76 
77 /* Allocate Packet Pacing index from kernel via mlx5dv call. */
78 static int
79 mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh)
80 {
81 #ifdef HAVE_MLX5DV_PP_ALLOC
82 	uint32_t pp[MLX5_ST_SZ_DW(set_pp_rate_limit_context)];
83 	uint64_t rate;
84 
85 	MLX5_ASSERT(!sh->txpp.pp);
86 	memset(&pp, 0, sizeof(pp));
87 	rate = NS_PER_S / sh->txpp.tick;
88 	if (rate * sh->txpp.tick != NS_PER_S)
89 		DRV_LOG(WARNING, "Packet pacing frequency is not precise.");
90 	if (sh->txpp.test) {
91 		uint32_t len;
92 
93 		len = RTE_MAX(MLX5_TXPP_TEST_PKT_SIZE,
94 			      (size_t)RTE_ETHER_MIN_LEN);
95 		MLX5_SET(set_pp_rate_limit_context, &pp,
96 			 burst_upper_bound, len);
97 		MLX5_SET(set_pp_rate_limit_context, &pp,
98 			 typical_packet_size, len);
99 		/* Convert packets per second into kilobits. */
100 		rate = (rate * len) / (1000ul / CHAR_BIT);
101 		DRV_LOG(INFO, "Packet pacing rate set to %" PRIu64, rate);
102 	}
103 	MLX5_SET(set_pp_rate_limit_context, &pp, rate_limit, rate);
104 	MLX5_SET(set_pp_rate_limit_context, &pp, rate_mode,
105 		 sh->txpp.test ? MLX5_DATA_RATE : MLX5_WQE_RATE);
106 	sh->txpp.pp = mlx5_glue->dv_alloc_pp
107 				(sh->cdev->ctx, sizeof(pp), &pp,
108 				 MLX5DV_PP_ALLOC_FLAGS_DEDICATED_INDEX);
109 	if (sh->txpp.pp == NULL) {
110 		DRV_LOG(ERR, "Failed to allocate packet pacing index.");
111 		rte_errno = errno;
112 		return -errno;
113 	}
114 	if (!((struct mlx5dv_pp *)sh->txpp.pp)->index) {
115 		DRV_LOG(ERR, "Zero packet pacing index allocated.");
116 		mlx5_txpp_free_pp_index(sh);
117 		rte_errno = ENOTSUP;
118 		return -ENOTSUP;
119 	}
120 	sh->txpp.pp_id = ((struct mlx5dv_pp *)(sh->txpp.pp))->index;
121 	return 0;
122 #else
123 	RTE_SET_USED(sh);
124 	DRV_LOG(ERR, "Allocating pacing index is not supported.");
125 	rte_errno = ENOTSUP;
126 	return -ENOTSUP;
127 #endif
128 }
129 
130 static void
131 mlx5_txpp_destroy_send_queue(struct mlx5_txpp_wq *wq)
132 {
133 	mlx5_devx_sq_destroy(&wq->sq_obj);
134 	mlx5_devx_cq_destroy(&wq->cq_obj);
135 	memset(wq, 0, sizeof(*wq));
136 }
137 
138 static void
139 mlx5_txpp_destroy_rearm_queue(struct mlx5_dev_ctx_shared *sh)
140 {
141 	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
142 
143 	mlx5_txpp_destroy_send_queue(wq);
144 }
145 
146 static void
147 mlx5_txpp_destroy_clock_queue(struct mlx5_dev_ctx_shared *sh)
148 {
149 	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
150 
151 	mlx5_txpp_destroy_send_queue(wq);
152 	if (sh->txpp.tsa) {
153 		mlx5_free(sh->txpp.tsa);
154 		sh->txpp.tsa = NULL;
155 	}
156 }
157 
158 static void
159 mlx5_txpp_doorbell_rearm_queue(struct mlx5_dev_ctx_shared *sh, uint16_t ci)
160 {
161 	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
162 	struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->sq_obj.wqes;
163 	union {
164 		uint32_t w32[2];
165 		uint64_t w64;
166 	} cs;
167 
168 	wq->sq_ci = ci + 1;
169 	cs.w32[0] = rte_cpu_to_be_32(rte_be_to_cpu_32
170 			(wqe[ci & (wq->sq_size - 1)].ctrl[0]) | (ci - 1) << 8);
171 	cs.w32[1] = wqe[ci & (wq->sq_size - 1)].ctrl[1];
172 	/* Update SQ doorbell record with new SQ ci. */
173 	mlx5_doorbell_ring(&sh->tx_uar.bf_db, cs.w64, wq->sq_ci,
174 			   wq->sq_obj.db_rec, !sh->tx_uar.dbnc);
175 }
176 
177 static void
178 mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
179 {
180 	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
181 	struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->sq_obj.wqes;
182 	uint32_t i;
183 
184 	for (i = 0; i < wq->sq_size; i += 2) {
185 		struct mlx5_wqe_cseg *cs;
186 		struct mlx5_wqe_qseg *qs;
187 		uint32_t index;
188 
189 		/* Build SEND_EN request with slave WQE index. */
190 		cs = &wqe[i + 0].cseg;
191 		cs->opcode = RTE_BE32(MLX5_OPCODE_SEND_EN | 0);
192 		cs->sq_ds = rte_cpu_to_be_32((wq->sq_obj.sq->id << 8) | 2);
193 		cs->flags = RTE_BE32(MLX5_COMP_ALWAYS <<
194 				     MLX5_COMP_MODE_OFFSET);
195 		cs->misc = RTE_BE32(0);
196 		qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
197 		index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM) &
198 			((1 << MLX5_WQ_INDEX_WIDTH) - 1);
199 		qs->max_index = rte_cpu_to_be_32(index);
200 		qs->qpn_cqn =
201 			   rte_cpu_to_be_32(sh->txpp.clock_queue.sq_obj.sq->id);
202 		/* Build WAIT request with slave CQE index. */
203 		cs = &wqe[i + 1].cseg;
204 		cs->opcode = RTE_BE32(MLX5_OPCODE_WAIT | 0);
205 		cs->sq_ds = rte_cpu_to_be_32((wq->sq_obj.sq->id << 8) | 2);
206 		cs->flags = RTE_BE32(MLX5_COMP_ONLY_ERR <<
207 				     MLX5_COMP_MODE_OFFSET);
208 		cs->misc = RTE_BE32(0);
209 		qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
210 		index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM / 2) &
211 			((1 << MLX5_CQ_INDEX_WIDTH) - 1);
212 		qs->max_index = rte_cpu_to_be_32(index);
213 		qs->qpn_cqn =
214 			   rte_cpu_to_be_32(sh->txpp.clock_queue.cq_obj.cq->id);
215 	}
216 }
217 
218 /* Creates the Rearm Queue to fire the requests to Clock Queue in realtime. */
219 static int
220 mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
221 {
222 	struct mlx5_devx_create_sq_attr sq_attr = {
223 		.cd_master = 1,
224 		.state = MLX5_SQC_STATE_RST,
225 		.tis_lst_sz = 1,
226 		.tis_num = sh->tis[0]->id,
227 		.wq_attr = (struct mlx5_devx_wq_attr){
228 			.pd = sh->cdev->pdn,
229 			.uar_page =
230 				mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj),
231 		},
232 		.ts_format = mlx5_ts_format_conv
233 				       (sh->cdev->config.hca_attr.sq_ts_format),
234 	};
235 	struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
236 	struct mlx5_devx_cq_attr cq_attr = {
237 		.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj),
238 	};
239 	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
240 	int ret;
241 
242 	/* Create completion queue object for Rearm Queue. */
243 	ret = mlx5_devx_cq_create(sh->cdev->ctx, &wq->cq_obj,
244 				  log2above(MLX5_TXPP_REARM_CQ_SIZE), &cq_attr,
245 				  sh->numa_node);
246 	if (ret) {
247 		DRV_LOG(ERR, "Failed to create CQ for Rearm Queue.");
248 		return ret;
249 	}
250 	wq->cq_ci = 0;
251 	wq->arm_sn = 0;
252 	wq->sq_size = MLX5_TXPP_REARM_SQ_SIZE;
253 	MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
254 	/* Create send queue object for Rearm Queue. */
255 	sq_attr.cqn = wq->cq_obj.cq->id;
256 	/* There should be no WQE leftovers in the cyclic queue. */
257 	ret = mlx5_devx_sq_create(sh->cdev->ctx, &wq->sq_obj,
258 				  log2above(MLX5_TXPP_REARM_SQ_SIZE), &sq_attr,
259 				  sh->numa_node);
260 	if (ret) {
261 		rte_errno = errno;
262 		DRV_LOG(ERR, "Failed to create SQ for Rearm Queue.");
263 		goto error;
264 	}
265 	/* Build the WQEs in the Send Queue before goto Ready state. */
266 	mlx5_txpp_fill_wqe_rearm_queue(sh);
267 	/* Change queue state to ready. */
268 	msq_attr.sq_state = MLX5_SQC_STATE_RST;
269 	msq_attr.state = MLX5_SQC_STATE_RDY;
270 	ret = mlx5_devx_cmd_modify_sq(wq->sq_obj.sq, &msq_attr);
271 	if (ret) {
272 		DRV_LOG(ERR, "Failed to set SQ ready state Rearm Queue.");
273 		goto error;
274 	}
275 	return 0;
276 error:
277 	ret = -rte_errno;
278 	mlx5_txpp_destroy_rearm_queue(sh);
279 	rte_errno = -ret;
280 	return ret;
281 }
282 
283 static void
284 mlx5_txpp_fill_wqe_clock_queue(struct mlx5_dev_ctx_shared *sh)
285 {
286 	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
287 	struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->sq_obj.wqes;
288 	struct mlx5_wqe_cseg *cs = &wqe->cseg;
289 	uint32_t wqe_size, opcode, i;
290 	uint8_t *dst;
291 
292 	/* For test purposes fill the WQ with SEND inline packet. */
293 	if (sh->txpp.test) {
294 		wqe_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE +
295 				     MLX5_WQE_CSEG_SIZE +
296 				     2 * MLX5_WQE_ESEG_SIZE -
297 				     MLX5_ESEG_MIN_INLINE_SIZE,
298 				     MLX5_WSEG_SIZE);
299 		opcode = MLX5_OPCODE_SEND;
300 	} else {
301 		wqe_size = MLX5_WSEG_SIZE;
302 		opcode = MLX5_OPCODE_NOP;
303 	}
304 	cs->opcode = rte_cpu_to_be_32(opcode | 0); /* Index is ignored. */
305 	cs->sq_ds = rte_cpu_to_be_32((wq->sq_obj.sq->id << 8) |
306 				     (wqe_size / MLX5_WSEG_SIZE));
307 	cs->flags = RTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET);
308 	cs->misc = RTE_BE32(0);
309 	wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE);
310 	if (sh->txpp.test) {
311 		struct mlx5_wqe_eseg *es = &wqe->eseg;
312 		struct rte_ether_hdr *eth_hdr;
313 		struct rte_ipv4_hdr *ip_hdr;
314 		struct rte_udp_hdr *udp_hdr;
315 
316 		/* Build the inline test packet pattern. */
317 		MLX5_ASSERT(wqe_size <= MLX5_WQE_SIZE_MAX);
318 		MLX5_ASSERT(MLX5_TXPP_TEST_PKT_SIZE >=
319 				(sizeof(struct rte_ether_hdr) +
320 				 sizeof(struct rte_ipv4_hdr)));
321 		es->flags = 0;
322 		es->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
323 		es->swp_offs = 0;
324 		es->metadata = 0;
325 		es->swp_flags = 0;
326 		es->mss = 0;
327 		es->inline_hdr_sz = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE);
328 		/* Build test packet L2 header (Ethernet). */
329 		dst = (uint8_t *)&es->inline_data;
330 		eth_hdr = (struct rte_ether_hdr *)dst;
331 		rte_eth_random_addr(&eth_hdr->dst_addr.addr_bytes[0]);
332 		rte_eth_random_addr(&eth_hdr->src_addr.addr_bytes[0]);
333 		eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
334 		/* Build test packet L3 header (IP v4). */
335 		dst += sizeof(struct rte_ether_hdr);
336 		ip_hdr = (struct rte_ipv4_hdr *)dst;
337 		ip_hdr->version_ihl = RTE_IPV4_VHL_DEF;
338 		ip_hdr->type_of_service = 0;
339 		ip_hdr->fragment_offset = 0;
340 		ip_hdr->time_to_live = 64;
341 		ip_hdr->next_proto_id = IPPROTO_UDP;
342 		ip_hdr->packet_id = 0;
343 		ip_hdr->total_length = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE -
344 						sizeof(struct rte_ether_hdr));
345 		/* use RFC5735 / RFC2544 reserved network test addresses */
346 		ip_hdr->src_addr = RTE_BE32((198U << 24) | (18 << 16) |
347 					    (0 << 8) | 1);
348 		ip_hdr->dst_addr = RTE_BE32((198U << 24) | (18 << 16) |
349 					    (0 << 8) | 2);
350 		if (MLX5_TXPP_TEST_PKT_SIZE <
351 					(sizeof(struct rte_ether_hdr) +
352 					 sizeof(struct rte_ipv4_hdr) +
353 					 sizeof(struct rte_udp_hdr)))
354 			goto wcopy;
355 		/* Build test packet L4 header (UDP). */
356 		dst += sizeof(struct rte_ipv4_hdr);
357 		udp_hdr = (struct rte_udp_hdr *)dst;
358 		udp_hdr->src_port = RTE_BE16(9); /* RFC863 Discard. */
359 		udp_hdr->dst_port = RTE_BE16(9);
360 		udp_hdr->dgram_len = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE -
361 					      sizeof(struct rte_ether_hdr) -
362 					      sizeof(struct rte_ipv4_hdr));
363 		udp_hdr->dgram_cksum = 0;
364 		/* Fill the test packet data. */
365 		dst += sizeof(struct rte_udp_hdr);
366 		for (i = sizeof(struct rte_ether_hdr) +
367 			sizeof(struct rte_ipv4_hdr) +
368 			sizeof(struct rte_udp_hdr);
369 				i < MLX5_TXPP_TEST_PKT_SIZE; i++)
370 			*dst++ = (uint8_t)(i & 0xFF);
371 	}
372 wcopy:
373 	/* Duplicate the pattern to the next WQEs. */
374 	dst = (uint8_t *)(uintptr_t)wq->sq_obj.umem_buf;
375 	for (i = 1; i < MLX5_TXPP_CLKQ_SIZE; i++) {
376 		dst += wqe_size;
377 		rte_memcpy(dst, (void *)(uintptr_t)wq->sq_obj.umem_buf,
378 			   wqe_size);
379 	}
380 }
381 
382 /* Creates the Clock Queue for packet pacing, returns zero on success. */
383 static int
384 mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
385 {
386 	struct mlx5_devx_create_sq_attr sq_attr = { 0 };
387 	struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
388 	struct mlx5_devx_cq_attr cq_attr = {
389 		.use_first_only = 1,
390 		.overrun_ignore = 1,
391 		.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj),
392 	};
393 	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
394 	int ret;
395 
396 	sh->txpp.tsa = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
397 				   MLX5_TXPP_REARM_SQ_SIZE *
398 				   sizeof(struct mlx5_txpp_ts),
399 				   0, sh->numa_node);
400 	if (!sh->txpp.tsa) {
401 		DRV_LOG(ERR, "Failed to allocate memory for CQ stats.");
402 		return -ENOMEM;
403 	}
404 	sh->txpp.ts_p = 0;
405 	sh->txpp.ts_n = 0;
406 	/* Create completion queue object for Clock Queue. */
407 	ret = mlx5_devx_cq_create(sh->cdev->ctx, &wq->cq_obj,
408 				  log2above(MLX5_TXPP_CLKQ_SIZE), &cq_attr,
409 				  sh->numa_node);
410 	if (ret) {
411 		DRV_LOG(ERR, "Failed to create CQ for Clock Queue.");
412 		goto error;
413 	}
414 	wq->cq_ci = 0;
415 	/* Allocate memory buffer for Send Queue WQEs. */
416 	if (sh->txpp.test) {
417 		wq->sq_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE +
418 					MLX5_WQE_CSEG_SIZE +
419 					2 * MLX5_WQE_ESEG_SIZE -
420 					MLX5_ESEG_MIN_INLINE_SIZE,
421 					MLX5_WQE_SIZE) / MLX5_WQE_SIZE;
422 		wq->sq_size *= MLX5_TXPP_CLKQ_SIZE;
423 	} else {
424 		wq->sq_size = MLX5_TXPP_CLKQ_SIZE;
425 	}
426 	/* There should not be WQE leftovers in the cyclic queue. */
427 	MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
428 	/* Create send queue object for Clock Queue. */
429 	if (sh->txpp.test) {
430 		sq_attr.tis_lst_sz = 1;
431 		sq_attr.tis_num = sh->tis[0]->id;
432 		sq_attr.non_wire = 0;
433 		sq_attr.static_sq_wq = 1;
434 	} else {
435 		sq_attr.non_wire = 1;
436 		sq_attr.static_sq_wq = 1;
437 	}
438 	sq_attr.cqn = wq->cq_obj.cq->id;
439 	sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
440 	sq_attr.wq_attr.cd_slave = 1;
441 	sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj);
442 	sq_attr.wq_attr.pd = sh->cdev->pdn;
443 	sq_attr.ts_format =
444 		mlx5_ts_format_conv(sh->cdev->config.hca_attr.sq_ts_format);
445 	ret = mlx5_devx_sq_create(sh->cdev->ctx, &wq->sq_obj,
446 				  log2above(wq->sq_size),
447 				  &sq_attr, sh->numa_node);
448 	if (ret) {
449 		rte_errno = errno;
450 		DRV_LOG(ERR, "Failed to create SQ for Clock Queue.");
451 		goto error;
452 	}
453 	/* Build the WQEs in the Send Queue before goto Ready state. */
454 	mlx5_txpp_fill_wqe_clock_queue(sh);
455 	/* Change queue state to ready. */
456 	msq_attr.sq_state = MLX5_SQC_STATE_RST;
457 	msq_attr.state = MLX5_SQC_STATE_RDY;
458 	wq->sq_ci = 0;
459 	ret = mlx5_devx_cmd_modify_sq(wq->sq_obj.sq, &msq_attr);
460 	if (ret) {
461 		DRV_LOG(ERR, "Failed to set SQ ready state Clock Queue.");
462 		goto error;
463 	}
464 	return 0;
465 error:
466 	ret = -rte_errno;
467 	mlx5_txpp_destroy_clock_queue(sh);
468 	rte_errno = -ret;
469 	return ret;
470 }
471 
472 /* Enable notification from the Rearm Queue CQ. */
473 static inline void
474 mlx5_txpp_cq_arm(struct mlx5_dev_ctx_shared *sh)
475 {
476 	struct mlx5_txpp_wq *aq = &sh->txpp.rearm_queue;
477 	uint32_t arm_sn = aq->arm_sn << MLX5_CQ_SQN_OFFSET;
478 	uint32_t db_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | aq->cq_ci;
479 	uint64_t db_be =
480 		rte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq_obj.cq->id);
481 
482 	mlx5_doorbell_ring(&sh->tx_uar.cq_db, db_be, db_hi,
483 			   &aq->cq_obj.db_rec[MLX5_CQ_ARM_DB], 0);
484 	aq->arm_sn++;
485 }
486 
487 #if defined(RTE_ARCH_X86_64)
488 static inline int
489 mlx5_atomic128_compare_exchange(rte_int128_t *dst,
490 				rte_int128_t *exp,
491 				const rte_int128_t *src)
492 {
493 	uint8_t res;
494 
495 	asm volatile (MPLOCKED
496 		      "cmpxchg16b %[dst];"
497 		      " sete %[res]"
498 		      : [dst] "=m" (dst->val[0]),
499 			"=a" (exp->val[0]),
500 			"=d" (exp->val[1]),
501 			[res] "=r" (res)
502 		      : "b" (src->val[0]),
503 			"c" (src->val[1]),
504 			"a" (exp->val[0]),
505 			"d" (exp->val[1]),
506 			"m" (dst->val[0])
507 		      : "memory");
508 
509 	return res;
510 }
511 #endif
512 
513 static inline void
514 mlx5_atomic_read_cqe(rte_int128_t *from, rte_int128_t *ts)
515 {
516 	/*
517 	 * The only CQE of Clock Queue is being continuously
518 	 * updated by hardware with specified rate. We must
519 	 * read timestamp and WQE completion index atomically.
520 	 */
521 #if defined(RTE_ARCH_X86_64)
522 	rte_int128_t src;
523 
524 	memset(&src, 0, sizeof(src));
525 	*ts = src;
526 	/* if (*from == *ts) *from = *src else *ts = *from; */
527 	mlx5_atomic128_compare_exchange(from, ts, &src);
528 #else
529 	uint64_t *cqe = (uint64_t *)from;
530 
531 	/*
532 	 * Power architecture does not support 16B compare-and-swap.
533 	 * ARM implements it in software, code below is more relevant.
534 	 */
535 	for (;;) {
536 		uint64_t tm, op;
537 		uint64_t *ps;
538 
539 		rte_compiler_barrier();
540 		tm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED);
541 		op = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED);
542 		rte_compiler_barrier();
543 		if (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED))
544 			continue;
545 		if (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED))
546 			continue;
547 		ps = (uint64_t *)ts;
548 		ps[0] = tm;
549 		ps[1] = op;
550 		return;
551 	}
552 #endif
553 }
554 
555 /* Stores timestamp in the cache structure to share data with datapath. */
556 static inline void
557 mlx5_txpp_cache_timestamp(struct mlx5_dev_ctx_shared *sh,
558 			   uint64_t ts, uint64_t ci)
559 {
560 	ci = ci << (64 - MLX5_CQ_INDEX_WIDTH);
561 	ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH;
562 	rte_compiler_barrier();
563 	__atomic_store_n(&sh->txpp.ts.ts, ts, __ATOMIC_RELAXED);
564 	__atomic_store_n(&sh->txpp.ts.ci_ts, ci, __ATOMIC_RELAXED);
565 	rte_wmb();
566 }
567 
568 /* Reads timestamp from Clock Queue CQE and stores in the cache. */
569 static inline void
570 mlx5_txpp_update_timestamp(struct mlx5_dev_ctx_shared *sh)
571 {
572 	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
573 	struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cq_obj.cqes;
574 	union {
575 		rte_int128_t u128;
576 		struct mlx5_cqe_ts cts;
577 	} to;
578 	uint64_t ts;
579 	uint16_t ci;
580 	uint8_t opcode;
581 
582 	mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
583 	opcode = MLX5_CQE_OPCODE(to.cts.op_own);
584 	if (opcode) {
585 		if (opcode != MLX5_CQE_INVALID) {
586 			/*
587 			 * Commit the error state if and only if
588 			 * we have got at least one actual completion.
589 			 */
590 			DRV_LOG(DEBUG,
591 				"Clock Queue error sync lost (%X).", opcode);
592 				__atomic_fetch_add(&sh->txpp.err_clock_queue,
593 				   1, __ATOMIC_RELAXED);
594 			sh->txpp.sync_lost = 1;
595 		}
596 		return;
597 	}
598 	ci = rte_be_to_cpu_16(to.cts.wqe_counter);
599 	ts = rte_be_to_cpu_64(to.cts.timestamp);
600 	ts = mlx5_txpp_convert_rx_ts(sh, ts);
601 	wq->cq_ci += (ci - wq->sq_ci) & UINT16_MAX;
602 	wq->sq_ci = ci;
603 	mlx5_txpp_cache_timestamp(sh, ts, wq->cq_ci);
604 }
605 
606 /* Waits for the first completion on Clock Queue to init timestamp. */
607 static inline void
608 mlx5_txpp_init_timestamp(struct mlx5_dev_ctx_shared *sh)
609 {
610 	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
611 	uint32_t wait;
612 
613 	sh->txpp.ts_p = 0;
614 	sh->txpp.ts_n = 0;
615 	for (wait = 0; wait < MLX5_TXPP_WAIT_INIT_TS; wait++) {
616 		mlx5_txpp_update_timestamp(sh);
617 		if (wq->sq_ci)
618 			return;
619 		/* Wait one millisecond and try again. */
620 		rte_delay_us_sleep(US_PER_S / MS_PER_S);
621 	}
622 	DRV_LOG(ERR, "Unable to initialize timestamp.");
623 	sh->txpp.sync_lost = 1;
624 }
625 
626 #ifdef HAVE_IBV_DEVX_EVENT
627 /* Gather statistics for timestamp from Clock Queue CQE. */
628 static inline void
629 mlx5_txpp_gather_timestamp(struct mlx5_dev_ctx_shared *sh)
630 {
631 	/* Check whether we have a valid timestamp. */
632 	if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n)
633 		return;
634 	MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE);
635 	__atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ts,
636 			 sh->txpp.ts.ts, __ATOMIC_RELAXED);
637 	__atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
638 			 sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
639 	if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE)
640 		sh->txpp.ts_p = 0;
641 	if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE)
642 		++sh->txpp.ts_n;
643 }
644 
645 /* Handles Rearm Queue completions in periodic service. */
646 static __rte_always_inline void
647 mlx5_txpp_handle_rearm_queue(struct mlx5_dev_ctx_shared *sh)
648 {
649 	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
650 	uint32_t cq_ci = wq->cq_ci;
651 	bool error = false;
652 	int ret;
653 
654 	do {
655 		volatile struct mlx5_cqe *cqe;
656 
657 		cqe = &wq->cq_obj.cqes[cq_ci & (MLX5_TXPP_REARM_CQ_SIZE - 1)];
658 		ret = check_cqe(cqe, MLX5_TXPP_REARM_CQ_SIZE, cq_ci);
659 		switch (ret) {
660 		case MLX5_CQE_STATUS_ERR:
661 			error = true;
662 			++cq_ci;
663 			break;
664 		case MLX5_CQE_STATUS_SW_OWN:
665 			wq->sq_ci += 2;
666 			++cq_ci;
667 			break;
668 		case MLX5_CQE_STATUS_HW_OWN:
669 			break;
670 		default:
671 			MLX5_ASSERT(false);
672 			break;
673 		}
674 	} while (ret != MLX5_CQE_STATUS_HW_OWN);
675 	if (likely(cq_ci != wq->cq_ci)) {
676 		/* Check whether we have missed interrupts. */
677 		if (cq_ci - wq->cq_ci != 1) {
678 			DRV_LOG(DEBUG, "Rearm Queue missed interrupt.");
679 			__atomic_fetch_add(&sh->txpp.err_miss_int,
680 					   1, __ATOMIC_RELAXED);
681 			/* Check sync lost on wqe index. */
682 			if (cq_ci - wq->cq_ci >=
683 				(((1UL << MLX5_WQ_INDEX_WIDTH) /
684 				  MLX5_TXPP_REARM) - 1))
685 				error = 1;
686 		}
687 		/* Update doorbell record to notify hardware. */
688 		rte_compiler_barrier();
689 		*wq->cq_obj.db_rec = rte_cpu_to_be_32(cq_ci);
690 		rte_wmb();
691 		wq->cq_ci = cq_ci;
692 		/* Fire new requests to Rearm Queue. */
693 		if (error) {
694 			DRV_LOG(DEBUG, "Rearm Queue error sync lost.");
695 			__atomic_fetch_add(&sh->txpp.err_rearm_queue,
696 					   1, __ATOMIC_RELAXED);
697 			sh->txpp.sync_lost = 1;
698 		}
699 	}
700 }
701 
702 /* Handles Clock Queue completions in periodic service. */
703 static __rte_always_inline void
704 mlx5_txpp_handle_clock_queue(struct mlx5_dev_ctx_shared *sh)
705 {
706 	mlx5_txpp_update_timestamp(sh);
707 	mlx5_txpp_gather_timestamp(sh);
708 }
709 #endif
710 
711 /* Invoked periodically on Rearm Queue completions. */
712 void
713 mlx5_txpp_interrupt_handler(void *cb_arg)
714 {
715 #ifndef HAVE_IBV_DEVX_EVENT
716 	RTE_SET_USED(cb_arg);
717 	return;
718 #else
719 	struct mlx5_dev_ctx_shared *sh = cb_arg;
720 	union {
721 		struct mlx5dv_devx_async_event_hdr event_resp;
722 		uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
723 	} out;
724 
725 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
726 	/* Process events in the loop. Only rearm completions are expected. */
727 	while (mlx5_glue->devx_get_event
728 				(sh->txpp.echan,
729 				 &out.event_resp,
730 				 sizeof(out.buf)) >=
731 				 (ssize_t)sizeof(out.event_resp.cookie)) {
732 		mlx5_txpp_handle_rearm_queue(sh);
733 		mlx5_txpp_handle_clock_queue(sh);
734 		mlx5_txpp_cq_arm(sh);
735 		mlx5_txpp_doorbell_rearm_queue
736 					(sh, sh->txpp.rearm_queue.sq_ci - 1);
737 	}
738 #endif /* HAVE_IBV_DEVX_ASYNC */
739 }
740 
741 static void
742 mlx5_txpp_stop_service(struct mlx5_dev_ctx_shared *sh)
743 {
744 	if (!rte_intr_fd_get(sh->txpp.intr_handle))
745 		return;
746 	mlx5_intr_callback_unregister(sh->txpp.intr_handle,
747 				      mlx5_txpp_interrupt_handler, sh);
748 	rte_intr_instance_free(sh->txpp.intr_handle);
749 }
750 
751 /* Attach interrupt handler and fires first request to Rearm Queue. */
752 static int
753 mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh)
754 {
755 	uint16_t event_nums[1] = {0};
756 	int ret;
757 	int fd;
758 
759 	sh->txpp.err_miss_int = 0;
760 	sh->txpp.err_rearm_queue = 0;
761 	sh->txpp.err_clock_queue = 0;
762 	sh->txpp.err_ts_past = 0;
763 	sh->txpp.err_ts_future = 0;
764 	/* Attach interrupt handler to process Rearm Queue completions. */
765 	fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
766 	ret = mlx5_os_set_nonblock_channel_fd(fd);
767 	if (ret) {
768 		DRV_LOG(ERR, "Failed to change event channel FD.");
769 		rte_errno = errno;
770 		return -rte_errno;
771 	}
772 	sh->txpp.intr_handle =
773 			rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
774 	if (sh->txpp.intr_handle == NULL) {
775 		DRV_LOG(ERR, "Fail to allocate intr_handle");
776 		return -ENOMEM;
777 	}
778 	fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
779 	if (rte_intr_fd_set(sh->txpp.intr_handle, fd))
780 		return -rte_errno;
781 
782 	if (rte_intr_type_set(sh->txpp.intr_handle, RTE_INTR_HANDLE_EXT))
783 		return -rte_errno;
784 
785 	if (rte_intr_callback_register(sh->txpp.intr_handle,
786 				       mlx5_txpp_interrupt_handler, sh)) {
787 		rte_intr_fd_set(sh->txpp.intr_handle, 0);
788 		DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
789 		return -rte_errno;
790 	}
791 	/* Subscribe CQ event to the event channel controlled by the driver. */
792 	ret = mlx5_os_devx_subscribe_devx_event(sh->txpp.echan,
793 					    sh->txpp.rearm_queue.cq_obj.cq->obj,
794 					     sizeof(event_nums), event_nums, 0);
795 	if (ret) {
796 		DRV_LOG(ERR, "Failed to subscribe CQE event.");
797 		rte_errno = errno;
798 		return -errno;
799 	}
800 	/* Enable interrupts in the CQ. */
801 	mlx5_txpp_cq_arm(sh);
802 	/* Fire the first request on Rearm Queue. */
803 	mlx5_txpp_doorbell_rearm_queue(sh, sh->txpp.rearm_queue.sq_size - 1);
804 	mlx5_txpp_init_timestamp(sh);
805 	return 0;
806 }
807 
808 /*
809  * The routine initializes the packet pacing infrastructure:
810  * - allocates PP context
811  * - Clock CQ/SQ
812  * - Rearm CQ/SQ
813  * - attaches rearm interrupt handler
814  * - starts Clock Queue
815  *
816  * Returns 0 on success, negative otherwise
817  */
818 static int
819 mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh, struct mlx5_priv *priv)
820 {
821 	int tx_pp = priv->config.tx_pp;
822 	int ret;
823 
824 	/* Store the requested pacing parameters. */
825 	sh->txpp.tick = tx_pp >= 0 ? tx_pp : -tx_pp;
826 	sh->txpp.test = !!(tx_pp < 0);
827 	sh->txpp.skew = priv->config.tx_skew;
828 	sh->txpp.freq = priv->config.hca_attr.dev_freq_khz;
829 	ret = mlx5_txpp_create_event_channel(sh);
830 	if (ret)
831 		goto exit;
832 	ret = mlx5_txpp_alloc_pp_index(sh);
833 	if (ret)
834 		goto exit;
835 	ret = mlx5_txpp_create_clock_queue(sh);
836 	if (ret)
837 		goto exit;
838 	ret = mlx5_txpp_create_rearm_queue(sh);
839 	if (ret)
840 		goto exit;
841 	ret = mlx5_txpp_start_service(sh);
842 	if (ret)
843 		goto exit;
844 exit:
845 	if (ret) {
846 		mlx5_txpp_stop_service(sh);
847 		mlx5_txpp_destroy_rearm_queue(sh);
848 		mlx5_txpp_destroy_clock_queue(sh);
849 		mlx5_txpp_free_pp_index(sh);
850 		mlx5_txpp_destroy_event_channel(sh);
851 		sh->txpp.tick = 0;
852 		sh->txpp.test = 0;
853 		sh->txpp.skew = 0;
854 	}
855 	return ret;
856 }
857 
858 /*
859  * The routine destroys the packet pacing infrastructure:
860  * - detaches rearm interrupt handler
861  * - Rearm CQ/SQ
862  * - Clock CQ/SQ
863  * - PP context
864  */
865 static void
866 mlx5_txpp_destroy(struct mlx5_dev_ctx_shared *sh)
867 {
868 	mlx5_txpp_stop_service(sh);
869 	mlx5_txpp_destroy_rearm_queue(sh);
870 	mlx5_txpp_destroy_clock_queue(sh);
871 	mlx5_txpp_free_pp_index(sh);
872 	mlx5_txpp_destroy_event_channel(sh);
873 	sh->txpp.tick = 0;
874 	sh->txpp.test = 0;
875 	sh->txpp.skew = 0;
876 }
877 
878 /**
879  * Creates and starts packet pacing infrastructure on specified device.
880  *
881  * @param dev
882  *   Pointer to Ethernet device structure.
883  *
884  * @return
885  *   0 on success, a negative errno value otherwise and rte_errno is set.
886  */
887 int
888 mlx5_txpp_start(struct rte_eth_dev *dev)
889 {
890 	struct mlx5_priv *priv = dev->data->dev_private;
891 	struct mlx5_dev_ctx_shared *sh = priv->sh;
892 	int err = 0;
893 
894 	if (!priv->config.tx_pp) {
895 		/* Packet pacing is not requested for the device. */
896 		MLX5_ASSERT(priv->txpp_en == 0);
897 		return 0;
898 	}
899 	if (priv->txpp_en) {
900 		/* Packet pacing is already enabled for the device. */
901 		MLX5_ASSERT(sh->txpp.refcnt);
902 		return 0;
903 	}
904 	if (priv->config.tx_pp > 0) {
905 		err = rte_mbuf_dynflag_lookup
906 			(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
907 		/* No flag registered means no service needed. */
908 		if (err < 0)
909 			return 0;
910 		err = 0;
911 	}
912 	claim_zero(pthread_mutex_lock(&sh->txpp.mutex));
913 	if (sh->txpp.refcnt) {
914 		priv->txpp_en = 1;
915 		++sh->txpp.refcnt;
916 	} else {
917 		err = mlx5_txpp_create(sh, priv);
918 		if (!err) {
919 			MLX5_ASSERT(sh->txpp.tick);
920 			priv->txpp_en = 1;
921 			sh->txpp.refcnt = 1;
922 		} else {
923 			rte_errno = -err;
924 		}
925 	}
926 	claim_zero(pthread_mutex_unlock(&sh->txpp.mutex));
927 	return err;
928 }
929 
930 /**
931  * Stops and destroys packet pacing infrastructure on specified device.
932  *
933  * @param dev
934  *   Pointer to Ethernet device structure.
935  *
936  * @return
937  *   0 on success, a negative errno value otherwise and rte_errno is set.
938  */
939 void
940 mlx5_txpp_stop(struct rte_eth_dev *dev)
941 {
942 	struct mlx5_priv *priv = dev->data->dev_private;
943 	struct mlx5_dev_ctx_shared *sh = priv->sh;
944 
945 	if (!priv->txpp_en) {
946 		/* Packet pacing is already disabled for the device. */
947 		return;
948 	}
949 	priv->txpp_en = 0;
950 	claim_zero(pthread_mutex_lock(&sh->txpp.mutex));
951 	MLX5_ASSERT(sh->txpp.refcnt);
952 	if (!sh->txpp.refcnt || --sh->txpp.refcnt) {
953 		claim_zero(pthread_mutex_unlock(&sh->txpp.mutex));
954 		return;
955 	}
956 	/* No references any more, do actual destroy. */
957 	mlx5_txpp_destroy(sh);
958 	claim_zero(pthread_mutex_unlock(&sh->txpp.mutex));
959 }
960 
961 /*
962  * Read the current clock counter of an Ethernet device
963  *
964  * This returns the current raw clock value of an Ethernet device. It is
965  * a raw amount of ticks, with no given time reference.
966  * The value returned here is from the same clock than the one
967  * filling timestamp field of Rx/Tx packets when using hardware timestamp
968  * offload. Therefore it can be used to compute a precise conversion of
969  * the device clock to the real time.
970  *
971  * @param dev
972  *   Pointer to Ethernet device structure.
973  * @param clock
974  *   Pointer to the uint64_t that holds the raw clock value.
975  *
976  * @return
977  *   - 0: Success.
978  *   - -ENOTSUP: The function is not supported in this mode. Requires
979  *     packet pacing module configured and started (tx_pp devarg)
980  */
981 int
982 mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp)
983 {
984 	struct mlx5_priv *priv = dev->data->dev_private;
985 	struct mlx5_dev_ctx_shared *sh = priv->sh;
986 	int ret;
987 
988 	if (sh->txpp.refcnt) {
989 		struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
990 		struct mlx5_cqe *cqe =
991 				(struct mlx5_cqe *)(uintptr_t)wq->cq_obj.cqes;
992 		union {
993 			rte_int128_t u128;
994 			struct mlx5_cqe_ts cts;
995 		} to;
996 		uint64_t ts;
997 
998 		mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
999 		if (to.cts.op_own >> 4) {
1000 			DRV_LOG(DEBUG, "Clock Queue error sync lost.");
1001 			__atomic_fetch_add(&sh->txpp.err_clock_queue,
1002 					   1, __ATOMIC_RELAXED);
1003 			sh->txpp.sync_lost = 1;
1004 			return -EIO;
1005 		}
1006 		ts = rte_be_to_cpu_64(to.cts.timestamp);
1007 		ts = mlx5_txpp_convert_rx_ts(sh, ts);
1008 		*timestamp = ts;
1009 		return 0;
1010 	}
1011 	/* Not supported in isolated mode - kernel does not see the CQEs. */
1012 	if (priv->isolated || rte_eal_process_type() != RTE_PROC_PRIMARY)
1013 		return -ENOTSUP;
1014 	ret = mlx5_read_clock(dev, timestamp);
1015 	return ret;
1016 }
1017 
1018 /**
1019  * DPDK callback to clear device extended statistics.
1020  *
1021  * @param dev
1022  *   Pointer to Ethernet device structure.
1023  *
1024  * @return
1025  *   0 on success and stats is reset, negative errno value otherwise and
1026  *   rte_errno is set.
1027  */
1028 int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev)
1029 {
1030 	struct mlx5_priv *priv = dev->data->dev_private;
1031 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1032 
1033 	__atomic_store_n(&sh->txpp.err_miss_int, 0, __ATOMIC_RELAXED);
1034 	__atomic_store_n(&sh->txpp.err_rearm_queue, 0, __ATOMIC_RELAXED);
1035 	__atomic_store_n(&sh->txpp.err_clock_queue, 0, __ATOMIC_RELAXED);
1036 	__atomic_store_n(&sh->txpp.err_ts_past, 0, __ATOMIC_RELAXED);
1037 	__atomic_store_n(&sh->txpp.err_ts_future, 0, __ATOMIC_RELAXED);
1038 	return 0;
1039 }
1040 
1041 /**
1042  * Routine to retrieve names of extended device statistics
1043  * for packet send scheduling. It appends the specific stats names
1044  * after the parts filled by preceding modules (eth stats, etc.)
1045  *
1046  * @param dev
1047  *   Pointer to Ethernet device structure.
1048  * @param[out] xstats_names
1049  *   Buffer to insert names into.
1050  * @param n
1051  *   Number of names.
1052  * @param n_used
1053  *   Number of names filled by preceding statistics modules.
1054  *
1055  * @return
1056  *   Number of xstats names.
1057  */
1058 int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1059 			       struct rte_eth_xstat_name *xstats_names,
1060 			       unsigned int n, unsigned int n_used)
1061 {
1062 	unsigned int n_txpp = RTE_DIM(mlx5_txpp_stat_names);
1063 	unsigned int i;
1064 
1065 	if (n >= n_used + n_txpp && xstats_names) {
1066 		for (i = 0; i < n_txpp; ++i) {
1067 			strncpy(xstats_names[i + n_used].name,
1068 				mlx5_txpp_stat_names[i],
1069 				RTE_ETH_XSTATS_NAME_SIZE);
1070 			xstats_names[i + n_used].name
1071 					[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0;
1072 		}
1073 	}
1074 	return n_used + n_txpp;
1075 }
1076 
1077 static inline void
1078 mlx5_txpp_read_tsa(struct mlx5_dev_txpp *txpp,
1079 		   struct mlx5_txpp_ts *tsa, uint16_t idx)
1080 {
1081 	do {
1082 		uint64_t ts, ci;
1083 
1084 		ts = __atomic_load_n(&txpp->tsa[idx].ts, __ATOMIC_RELAXED);
1085 		ci = __atomic_load_n(&txpp->tsa[idx].ci_ts, __ATOMIC_RELAXED);
1086 		rte_compiler_barrier();
1087 		if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0)
1088 			continue;
1089 		if (__atomic_load_n(&txpp->tsa[idx].ts,
1090 				    __ATOMIC_RELAXED) != ts)
1091 			continue;
1092 		if (__atomic_load_n(&txpp->tsa[idx].ci_ts,
1093 				    __ATOMIC_RELAXED) != ci)
1094 			continue;
1095 		tsa->ts = ts;
1096 		tsa->ci_ts = ci;
1097 		return;
1098 	} while (true);
1099 }
1100 
1101 /*
1102  * Jitter reflects the clock change between
1103  * neighbours Clock Queue completions.
1104  */
1105 static uint64_t
1106 mlx5_txpp_xstats_jitter(struct mlx5_dev_txpp *txpp)
1107 {
1108 	struct mlx5_txpp_ts tsa0, tsa1;
1109 	int64_t dts, dci;
1110 	uint16_t ts_p;
1111 
1112 	if (txpp->ts_n < 2) {
1113 		/* No gathered enough reports yet. */
1114 		return 0;
1115 	}
1116 	do {
1117 		int ts_0, ts_1;
1118 
1119 		ts_p = txpp->ts_p;
1120 		rte_compiler_barrier();
1121 		ts_0 = ts_p - 2;
1122 		if (ts_0 < 0)
1123 			ts_0 += MLX5_TXPP_REARM_SQ_SIZE;
1124 		ts_1 = ts_p - 1;
1125 		if (ts_1 < 0)
1126 			ts_1 += MLX5_TXPP_REARM_SQ_SIZE;
1127 		mlx5_txpp_read_tsa(txpp, &tsa0, ts_0);
1128 		mlx5_txpp_read_tsa(txpp, &tsa1, ts_1);
1129 		rte_compiler_barrier();
1130 	} while (ts_p != txpp->ts_p);
1131 	/* We have two neighbor reports, calculate the jitter. */
1132 	dts = tsa1.ts - tsa0.ts;
1133 	dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) -
1134 	      (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH));
1135 	if (dci < 0)
1136 		dci += 1 << MLX5_CQ_INDEX_WIDTH;
1137 	dci *= txpp->tick;
1138 	return (dts > dci) ? dts - dci : dci - dts;
1139 }
1140 
1141 /*
1142  * Wander reflects the long-term clock change
1143  * over the entire length of all Clock Queue completions.
1144  */
1145 static uint64_t
1146 mlx5_txpp_xstats_wander(struct mlx5_dev_txpp *txpp)
1147 {
1148 	struct mlx5_txpp_ts tsa0, tsa1;
1149 	int64_t dts, dci;
1150 	uint16_t ts_p;
1151 
1152 	if (txpp->ts_n < MLX5_TXPP_REARM_SQ_SIZE) {
1153 		/* No gathered enough reports yet. */
1154 		return 0;
1155 	}
1156 	do {
1157 		int ts_0, ts_1;
1158 
1159 		ts_p = txpp->ts_p;
1160 		rte_compiler_barrier();
1161 		ts_0 = ts_p - MLX5_TXPP_REARM_SQ_SIZE / 2 - 1;
1162 		if (ts_0 < 0)
1163 			ts_0 += MLX5_TXPP_REARM_SQ_SIZE;
1164 		ts_1 = ts_p - 1;
1165 		if (ts_1 < 0)
1166 			ts_1 += MLX5_TXPP_REARM_SQ_SIZE;
1167 		mlx5_txpp_read_tsa(txpp, &tsa0, ts_0);
1168 		mlx5_txpp_read_tsa(txpp, &tsa1, ts_1);
1169 		rte_compiler_barrier();
1170 	} while (ts_p != txpp->ts_p);
1171 	/* We have two neighbor reports, calculate the jitter. */
1172 	dts = tsa1.ts - tsa0.ts;
1173 	dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) -
1174 	      (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH));
1175 	dci += 1 << MLX5_CQ_INDEX_WIDTH;
1176 	dci *= txpp->tick;
1177 	return (dts > dci) ? dts - dci : dci - dts;
1178 }
1179 
1180 /**
1181  * Routine to retrieve extended device statistics
1182  * for packet send scheduling. It appends the specific statistics
1183  * after the parts filled by preceding modules (eth stats, etc.)
1184  *
1185  * @param dev
1186  *   Pointer to Ethernet device.
1187  * @param[out] stats
1188  *   Pointer to rte extended stats table.
1189  * @param n
1190  *   The size of the stats table.
1191  * @param n_used
1192  *   Number of stats filled by preceding statistics modules.
1193  *
1194  * @return
1195  *   Number of extended stats on success and stats is filled,
1196  *   negative on error and rte_errno is set.
1197  */
1198 int
1199 mlx5_txpp_xstats_get(struct rte_eth_dev *dev,
1200 		     struct rte_eth_xstat *stats,
1201 		     unsigned int n, unsigned int n_used)
1202 {
1203 	unsigned int n_txpp = RTE_DIM(mlx5_txpp_stat_names);
1204 
1205 	if (n >= n_used + n_txpp && stats) {
1206 		struct mlx5_priv *priv = dev->data->dev_private;
1207 		struct mlx5_dev_ctx_shared *sh = priv->sh;
1208 		unsigned int i;
1209 
1210 		for (i = 0; i < n_txpp; ++i)
1211 			stats[n_used + i].id = n_used + i;
1212 		stats[n_used + 0].value =
1213 				__atomic_load_n(&sh->txpp.err_miss_int,
1214 						__ATOMIC_RELAXED);
1215 		stats[n_used + 1].value =
1216 				__atomic_load_n(&sh->txpp.err_rearm_queue,
1217 						__ATOMIC_RELAXED);
1218 		stats[n_used + 2].value =
1219 				__atomic_load_n(&sh->txpp.err_clock_queue,
1220 						__ATOMIC_RELAXED);
1221 		stats[n_used + 3].value =
1222 				__atomic_load_n(&sh->txpp.err_ts_past,
1223 						__ATOMIC_RELAXED);
1224 		stats[n_used + 4].value =
1225 				__atomic_load_n(&sh->txpp.err_ts_future,
1226 						__ATOMIC_RELAXED);
1227 		stats[n_used + 5].value = mlx5_txpp_xstats_jitter(&sh->txpp);
1228 		stats[n_used + 6].value = mlx5_txpp_xstats_wander(&sh->txpp);
1229 		stats[n_used + 7].value = sh->txpp.sync_lost;
1230 	}
1231 	return n_used + n_txpp;
1232 }
1233