xref: /dpdk/drivers/net/intel/ice/ice_diagnose.c (revision c038157a2e4416338bb5c7171ae7d611c454045d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Intel Corporation
3  */
4 
5 #include <stdlib.h>
6 #include <unistd.h>
7 #include <sys/stat.h>
8 
9 #include <rte_string_fns.h>
10 #include <rte_malloc.h>
11 #include <rte_tailq.h>
12 
13 #include "ice_ethdev.h"
14 #include "ice_rxtx.h"
15 
16 #define ICE_BLK_MAX_COUNT          512
17 #define ICE_BUFF_SEG_HEADER_FLAG   0x1
18 #define ICE_PKG_HDR_HEADR_PART1    1
19 #define ICE_PKG_HDR_HEADR_PART2    2
20 #define ICE_PKG_HDR_GM_SEG_OFFSET  16
21 #define ICE_PKG_HDR_ICE_SEG_OFFSET 100
22 #define ICE_PKG_GM_SEG_TYPE        1
23 #define ICE_PKG_MAJOR_VERSION      1
24 #define ICE_PKG_GM_SEG_SIZE        84
25 #define ICE_PKG_ICE_SEG_TYPE       0x10
26 #define ICE_PKG_ICE_SEG_SIZE_BASE  56
27 #define SPACE_CHAR                 0x20
28 
29 #define ICE_PKG_COPY_STRING(dst, src)	\
30 	do {\
31 		char *_dst = (dst); \
32 		const char *_src = (src); \
33 		memset(_dst, SPACE_CHAR, ICE_PKG_NAME_SIZE); \
34 		strlcpy(_dst, _src, strlen(_dst)); \
35 	} while (0)
36 
37 /* Package header */
38 struct ice_package_header {
39 	struct __hdr {
40 		uint32_t h1; /* header part 1 */
41 		uint32_t h2; /* header part 2 */
42 	} header;
43 	uint32_t gm_seg_offset;	 /* Global Metadata segment: 16 */
44 	uint32_t ice_seg_offset; /* ICE segment: 100 */
45 	struct ice_global_metadata_seg gm_seg;
46 	struct __ice_seg {
47 		struct ice_generic_seg_hdr hdr;
48 		uint32_t devid_count;
49 		struct ice_pkg_ver nvm_ver;
50 	} ice_seg;
51 
52 	uint32_t buff_count;
53 };
54 
55 struct ice_buff_seg_header {
56 	__le16 flag;
57 	__le16 length;
58 	__le16 type;
59 	__le16 reserve;		/* 0 */
60 	__le16 header_len;	/* 0x0C */
61 	__le16 data_size;	/* length - header_len */
62 };
63 
64 struct ice_buff_seg_simple {
65 	struct ice_buff_seg_header header;
66 	__le16 seg_end;
67 };
68 
69 struct ice_buff_seg_simple_data {
70 	__le16 type;
71 	__le32 addr;
72 	__le16 len;
73 	__le16 seg_end;
74 };
75 
76 struct ice_buff_seg_series {
77 	struct ice_buff_seg_header header;
78 	uint16_t offset_delta;
79 	uint16_t offset[2];
80 };
81 
82 struct ice_buff_seg_series_data {
83 	__le16 type;
84 	__le32 begin_addr;
85 	__le16 len;
86 	__le32 end_addr;
87 	__le16 last_len;
88 	__le16 offset_delta;
89 	__le16 seg_end;
90 	uint8_t padding;
91 };
92 
93 struct ice_buff_seg_series_with_sub {
94 	struct ice_buff_seg_header header;
95 	uint16_t sub_block_num;
96 };
97 
98 struct ice_buff_seg_series_with_sub_data {
99 	__le16 type;
100 	__le32 begin_addr;
101 	__le16 len;
102 	__le32 end_addr;
103 	__le16 last_len;
104 	__le16 sblk_size;
105 };
106 
107 
108 static const
109 uint16_t ice_buff_seg_header_size = sizeof(struct ice_buff_seg_header);
110 
111 static void
112 write_buffer_simple(uint8_t **buff)
113 {
114 	uint16_t i;
115 	/* ICE ddp package simple segment template */
116 	const struct ice_buff_seg_simple_data buff_data[] = {
117 	    {0x0001, 0x00000, 0x0030, 0x0000},
118 	    {0x000a, 0x01000, 0x0810, 0x0800},
119 	    {0x000b, 0x02000, 0x00d8, 0x0000},
120 	    {0x000d, 0x06000, 0x0810, 0x0400},
121 	    {0x000f, 0x09000, 0x0110, 0x0100},
122 	    {0x0011, 0x17000, 0x001d, 0x0000},
123 	    {0x0012, 0x18000, 0x0014, 0x0000},
124 	    {0x0014, 0x19000, 0x0810, 0x0800},
125 	    {0x0015, 0x1a000, 0x00d8, 0x0000},
126 	    {0x0017, 0x1e000, 0x0810, 0x0400},
127 	    {0x0019, 0x21000, 0x0090, 0x0080},
128 	    {0x001b, 0x27000, 0x001d, 0x0000},
129 	    {0x001c, 0x28000, 0x0014, 0x0000},
130 	    {0x001e, 0x29000, 0x0810, 0x0800},
131 	    {0x001f, 0x2a000, 0x00d8, 0x0000},
132 	    {0x0021, 0x2e000, 0x0810, 0x0400},
133 	    {0x0023, 0x31000, 0x0090, 0x0080},
134 	    {0x0025, 0x36000, 0x001d, 0x0000},
135 	    {0x0026, 0x37000, 0x0014, 0x0000},
136 	    {0x0028, 0x38000, 0x0810, 0x0800},
137 	    {0x0029, 0x39000, 0x00d8, 0x0000},
138 	    {0x002b, 0x3d000, 0x0810, 0x0400},
139 	    {0x002d, 0x40000, 0x0090, 0x0080},
140 	    {0x002f, 0x45000, 0x001d, 0x0000},
141 	    {0x0030, 0x46000, 0x0014, 0x0000},
142 	    {0x0035, 0x57000, 0x0010, 0x0000},
143 	    {0x003a, 0x67000, 0x0190, 0x0010},
144 	    {0x003b, 0x68000, 0x0810, 0x0800},
145 	    {0x003f, 0x79000, 0x0010, 0x0000},
146 	    {0x0044, 0x89000, 0x0190, 0x0010},
147 	    {0x0045, 0x8a000, 0x0810, 0x0800},
148 	    {0x0046, 0x8b000, 0x001c, 0x0000},
149 	    {0x0047, 0x8c000, 0x001c, 0x0000},
150 	    {0x0048, 0x8d000, 0x0410, 0x0080},
151 	    {0x0049, 0x8e000, 0x0410, 0x0080},
152 	    {0x004a, 0x8f000, 0x0028, 0x0006},
153 	    {0x004b, 0x90000, 0x0028, 0x0006},
154 	    {0x004c, 0x91000, 0x0890, 0x0080},
155 	    {0x004d, 0x92000, 0x0890, 0x0080},
156 	    {0x004e, 0x93000, 0x0350, 0x0040},
157 	    {0x004f, 0x94000, 0x0350, 0x0040},
158 	    {0x0050, 0x95000, 0x0810, 0x0800},
159 	    {0x0051, 0x96000, 0x00d8, 0x0000},
160 	    {0x0053, 0x9a000, 0x0810, 0x0400},
161 	    {0x0055, 0x9c000, 0x0030, 0x0020},
162 	    {0x0057, 0x9f000, 0x001d, 0x0000},
163 	    {0x0058, 0xa0000, 0x0014, 0x0000},
164 	    {0x005a, 0xa1000, 0x0024, 0x0000},
165 	    {0x005b, 0xa2000, 0x0024, 0x0000},
166 	    {0x005d, 0xa4000, 0x0810, 0x0100},
167 	    {0x020d, 0xa8000, 0x0414, 0x0400},
168 	    {0x020e, 0xa9000, 0x0214, 0x0200},
169 	    {0x020f, 0xaa000, 0x0114, 0x0100},
170 	    {0x0210, 0xab000, 0x0114, 0x0100},
171 	    {0x0217, 0xaf000, 0x0414, 0x0400},
172 	    {0x0218, 0xb0000, 0x0214, 0x0200},
173 	    {0x0219, 0xb1000, 0x0094, 0x0080},
174 	    {0x021a, 0xb2000, 0x0094, 0x0080},
175 	    {0x0221, 0xb6000, 0x0414, 0x0400},
176 	    {0x0222, 0xb7000, 0x0214, 0x0200},
177 	    {0x0223, 0xb8000, 0x0094, 0x0080},
178 	    {0x0224, 0xb9000, 0x0094, 0x0080},
179 	    {0x022b, 0xbd000, 0x0414, 0x0400},
180 	    {0x022c, 0xbe000, 0x0214, 0x0200},
181 	    {0x022d, 0xbf000, 0x0094, 0x0080},
182 	    {0x022e, 0xc0000, 0x0094, 0x0080},
183 	    {0x0238, 0xc1000, 0x0114, 0x0100},
184 	    {0x0253, 0xc5000, 0x0414, 0x0400},
185 	    {0x0254, 0xc6000, 0x0054, 0x0040},
186 	    {0x0255, 0xc7000, 0x0034, 0x0020},
187 	    {0x0256, 0xc8000, 0x0034, 0x0020},
188 	};
189 
190 	for (i = 0; i < ARRAY_SIZE(buff_data); i++) {
191 		const struct ice_buff_seg_simple_data *seg = &buff_data[i];
192 		struct ice_buff_seg_simple buff_seg;
193 		uint8_t *buffer = &(*buff)[seg->addr];
194 
195 		memset(buffer, 0xFF, ICE_PKG_BUF_SIZE);
196 		buff_seg.header.flag = ICE_BUFF_SEG_HEADER_FLAG;
197 		buff_seg.header.length = seg->len;
198 		buff_seg.header.type = seg->type;
199 		buff_seg.header.reserve = 0x0;
200 		buff_seg.header.header_len =
201 			sizeof(struct ice_buff_seg_header);
202 		buff_seg.header.data_size =
203 			buff_seg.header.length - buff_seg.header.header_len;
204 		buff_seg.seg_end = seg->seg_end;
205 
206 		memset(buffer, 0x00, buff_seg.header.length);
207 		memcpy(buffer, &buff_seg, sizeof(struct ice_buff_seg_simple));
208 	}
209 }
210 
211 static void
212 write_buffer_block(uint8_t **buff)
213 {
214 	uint16_t i;
215 	/* ICE ddp package multiple segments template 1 */
216 	const struct ice_buff_seg_series_data buff_data[] = {
217 		{0x000c, 0x03000, 0x1000, 0x05000, 0x0030, 0x0ff0, 0x0020, 0},
218 		{0x0010, 0x0a000, 0x0fd0, 0x16000, 0x0310, 0x0015, 0x0004, 0},
219 		{0x0016, 0x1b000, 0x1000, 0x1d000, 0x0030, 0x0ff0, 0x0020, 0},
220 		{0x001a, 0x22000, 0x0f90, 0x26000, 0x0210, 0x001f, 0x0004, 0},
221 		{0x0020, 0x2b000, 0x1000, 0x2d000, 0x0030, 0x0ff0, 0x0020, 0},
222 		{0x0024, 0x32000, 0x0fd0, 0x35000, 0x00d0, 0x002a, 0x0002, 0},
223 		{0x002a, 0x3a000, 0x1000, 0x3c000, 0x0030, 0x0ff0, 0x0020, 0},
224 		{0x002e, 0x41000, 0x0fd0, 0x44000, 0x00d0, 0x002a, 0x0002, 0},
225 		{0x0032, 0x47000, 0x1000, 0x4f000, 0x0090, 0x00ff, 0x0008, 0},
226 		{0x0033, 0x50000, 0x1000, 0x53000, 0x0040, 0x0154, 0x0004, 0},
227 		{0x0034, 0x54000, 0x1000, 0x56000, 0x0430, 0x0055, 0x0016, 0},
228 		{0x0039, 0x65000, 0x1000, 0x66000, 0x0220, 0x00aa, 0x0016, 0},
229 		{0x003c, 0x69000, 0x1000, 0x71000, 0x0090, 0x00ff, 0x0008, 0},
230 		{0x003d, 0x72000, 0x1000, 0x75000, 0x0040, 0x0154, 0x0004, 0},
231 		{0x003e, 0x76000, 0x1000, 0x78000, 0x0430, 0x0055, 0x0016, 0},
232 		{0x0043, 0x87000, 0x1000, 0x88000, 0x0220, 0x00aa, 0x0016, 0},
233 		{0x0052, 0x97000, 0x1000, 0x99000, 0x0030, 0x0ff0, 0x0020, 0},
234 		{0x0056, 0x9d000, 0x0f90, 0x9e000, 0x0090, 0x001f, 0x0001, 0},
235 		{0x020c, 0xa5000, 0x1000, 0xa7000, 0x003c, 0x0fec, 0x0028, 1},
236 		{0x0216, 0xac000, 0x1000, 0xae000, 0x003c, 0x0fec, 0x0028, 1},
237 		{0x0220, 0xb3000, 0x1000, 0xb5000, 0x003c, 0x0fec, 0x0028, 1},
238 		{0x022a, 0xba000, 0x1000, 0xbc000, 0x003c, 0x0fec, 0x0028, 1},
239 		{0x0252, 0xc2000, 0x1000, 0xc4000, 0x003c, 0x0fec, 0x0028, 1},
240 	};
241 
242 	for (i = 0; i < ARRAY_SIZE(buff_data); i++) {
243 		const struct ice_buff_seg_series_data *seg = &buff_data[i];
244 		struct ice_buff_seg_series buff_seg;
245 		const uint16_t buff_seg_size =
246 			sizeof(struct ice_buff_seg_series);
247 		uint32_t addr = seg->begin_addr;
248 		__le16 last_offset = 0;
249 
250 		for (; addr <= seg->end_addr; addr += ICE_PKG_BUF_SIZE) {
251 			uint8_t *buffer = &(*buff)[addr];
252 
253 			memset(buffer, 0xFF, ICE_PKG_BUF_SIZE);
254 			buff_seg.header.flag = ICE_BUFF_SEG_HEADER_FLAG;
255 			buff_seg.header.length = addr == seg->end_addr ?
256 						seg->last_len : seg->len;
257 			buff_seg.header.type = seg->type;
258 			buff_seg.header.reserve = 0x0;
259 			buff_seg.header.header_len = ice_buff_seg_header_size;
260 			buff_seg.header.data_size = buff_seg.header.length -
261 						buff_seg.header.header_len;
262 			buff_seg.offset_delta =  addr < seg->end_addr ?
263 				seg->offset_delta : seg->seg_end;
264 			buff_seg.offset[!seg->padding] = 0x0;
265 			buff_seg.offset[seg->padding] = last_offset;
266 
267 			memset(buffer, 0x00, buff_seg.header.length);
268 			memcpy(buffer, &buff_seg, buff_seg_size);
269 
270 			last_offset += seg->offset_delta;
271 		}
272 	}
273 }
274 
275 static void
276 write_buffer_block2(uint8_t **buff)
277 {
278 	uint16_t i;
279 	/* ICE ddp package multiple segments template 2 */
280 	struct ice_buff_seg_series_with_sub_data buff_data[] = {
281 		{0x000e, 0x07000, 0x1000, 0x08000, 0x0a1c, 13},
282 		{0x0018, 0x1f000, 0x1000, 0x20000, 0x0a1c, 13},
283 		{0x0022, 0x2f000, 0x1000, 0x30000, 0x0a1c, 13},
284 		{0x002c, 0x3e000, 0x1000, 0x3f000, 0x0a1c, 13},
285 		{0x0037, 0x58000, 0x1000, 0x5e000, 0x0070, 24},
286 		{0x0038, 0x5f000, 0x0fe0, 0x64000, 0x0900, 88},
287 		{0x0041, 0x7a000, 0x1000, 0x80000, 0x0070, 24},
288 		{0x0042, 0x81000, 0x0fe0, 0x86000, 0x0900, 88},
289 		{0x0054, 0x9b000, 0x034e, 0x9b000, 0x034e, 13},
290 		{0x005c, 0xa3000, 0x0a10, 0xa3000, 0x0a10, 40},
291 	};
292 
293 	for (i = 0; i < ARRAY_SIZE(buff_data); i++) {
294 		struct ice_buff_seg_series_with_sub_data *seg = &buff_data[i];
295 		struct ice_buff_seg_series_with_sub buff_seg;
296 		const uint16_t buff_seg_size =
297 			sizeof(struct ice_buff_seg_series_with_sub);
298 		uint32_t addr;
299 		uint16_t last_idx = 0;
300 
301 		for (addr = seg->begin_addr;
302 		     addr <= seg->end_addr; addr += ICE_PKG_BUF_SIZE) {
303 			uint8_t *buffer = &(*buff)[addr];
304 			uint16_t total_sblk_size;
305 			uint16_t idx = 0;
306 			uint32_t pos = buff_seg_size;
307 
308 			memset(buffer, 0xFF, ICE_PKG_BUF_SIZE);
309 			buff_seg.header.flag = ICE_BUFF_SEG_HEADER_FLAG;
310 			buff_seg.header.length =
311 				addr == seg->end_addr ?
312 					seg->last_len : seg->len;
313 			buff_seg.header.type = seg->type;
314 			buff_seg.header.reserve = 0x0;
315 			buff_seg.header.header_len = ice_buff_seg_header_size;
316 			buff_seg.header.data_size = buff_seg.header.length -
317 					buff_seg.header.header_len;
318 
319 			total_sblk_size = buff_seg.header.data_size
320 					  - sizeof(buff_seg.sub_block_num);
321 			buff_seg.sub_block_num =
322 					total_sblk_size / seg->sblk_size;
323 
324 			memset(buffer, 0x00, buff_seg.header.length);
325 			memcpy(buffer, &buff_seg, buff_seg_size);
326 
327 			/* padding if needed */
328 			if (total_sblk_size % seg->sblk_size)
329 				pos += sizeof(uint16_t);
330 
331 			for (idx = last_idx;
332 			     idx < last_idx + buff_seg.sub_block_num; idx++) {
333 				memcpy(buffer + pos, &idx, sizeof(uint16_t));
334 				pos += seg->sblk_size;
335 			}
336 
337 			last_idx = idx;
338 		}
339 	}
340 }
341 
342 static int
343 ice_dump_pkg(struct rte_eth_dev *dev, uint8_t **buff, uint32_t *size)
344 {
345 	struct ice_hw *hw;
346 	struct ice_buf pkg_buff;
347 	uint8_t *next_buff;
348 	uint16_t i = 0;
349 	uint16_t count;
350 	struct ice_package_header *cache;
351 	uint32_t cache_size;
352 
353 	write_buffer_simple(buff);
354 	write_buffer_block(buff);
355 	write_buffer_block2(buff);
356 
357 	hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
358 
359 	if (*size % ICE_PKG_BUF_SIZE)
360 		return -EINVAL;
361 
362 	count = *size / ICE_PKG_BUF_SIZE;
363 	for (i = 0; i < count; i++) {
364 		next_buff = (uint8_t *)(*buff) + i * ICE_PKG_BUF_SIZE;
365 		rte_memcpy(pkg_buff.buf, next_buff, ICE_PKG_BUF_SIZE);
366 		if (ice_aq_upload_section(hw,
367 					  (struct ice_buf_hdr *)&pkg_buff.buf[0],
368 					  ICE_PKG_BUF_SIZE,
369 					  NULL))
370 			return -EINVAL;
371 		rte_memcpy(next_buff, pkg_buff.buf, ICE_PKG_BUF_SIZE);
372 	}
373 
374 	cache_size = sizeof(struct ice_package_header) + *size;
375 	cache = (struct ice_package_header *)malloc(cache_size);
376 	if (!cache)
377 		return -ENOSPC;
378 
379 	cache->header.h1 = ICE_PKG_HDR_HEADR_PART1;
380 	cache->header.h2 = ICE_PKG_HDR_HEADR_PART2;
381 	cache->gm_seg_offset = ICE_PKG_HDR_GM_SEG_OFFSET;
382 	cache->ice_seg_offset = ICE_PKG_HDR_ICE_SEG_OFFSET;
383 	cache->gm_seg.hdr.seg_type = ICE_PKG_GM_SEG_TYPE;
384 	cache->gm_seg.hdr.seg_format_ver.major = ICE_PKG_MAJOR_VERSION;
385 	cache->gm_seg.hdr.seg_size = ICE_PKG_GM_SEG_SIZE;
386 	ICE_PKG_COPY_STRING(cache->gm_seg.hdr.seg_id, "Global Metadata");
387 
388 	cache->gm_seg.pkg_ver.major = ICE_PKG_MAJOR_VERSION;
389 	cache->gm_seg.rsvd = 1;
390 	ICE_PKG_COPY_STRING(cache->gm_seg.pkg_name, "DEFAULT");
391 
392 	cache->ice_seg.hdr.seg_type = ICE_PKG_ICE_SEG_TYPE;
393 	cache->ice_seg.hdr.seg_format_ver.major = ICE_PKG_MAJOR_VERSION;
394 	cache->ice_seg.hdr.seg_size = ICE_PKG_ICE_SEG_SIZE_BASE + *size;
395 	cache->ice_seg.devid_count = 0;
396 	cache->ice_seg.nvm_ver.major = 0;
397 	ICE_PKG_COPY_STRING(cache->ice_seg.hdr.seg_id, "CPK Configuration Data");
398 
399 	cache->buff_count = count;
400 
401 	next_buff = (uint8_t *)cache;
402 	next_buff += sizeof(struct ice_package_header);
403 	memcpy(next_buff, *buff, *size);
404 
405 	free(*buff);
406 	*buff = (uint8_t *)cache;
407 	*size = cache_size;
408 
409 	return 0;
410 }
411 
412 int rte_pmd_ice_dump_package(uint16_t port, uint8_t **buff, uint32_t *size)
413 {
414 	struct rte_eth_dev *dev;
415 
416 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
417 
418 	dev = &rte_eth_devices[port];
419 	if (!is_ice_supported(dev))
420 		return -ENOTSUP;
421 
422 	return ice_dump_pkg(dev, buff, size);
423 }
424 
425 static uint16_t
426 covert_byte_to_hex(uint8_t **outbuf, const uint8_t *inbuf, uint32_t inbuf_size)
427 {
428 	uint32_t i;
429 	uint8_t *buffer = *outbuf;
430 	for (i = 0; i < inbuf_size; ++i)
431 		sprintf((char *)(buffer + i * 2), "%02X", inbuf[i]);
432 
433 	return inbuf_size * 2;
434 }
435 
436 static int
437 ice_dump_switch(struct rte_eth_dev *dev, uint8_t **buff2, uint32_t *size)
438 {
439 	struct ice_hw *hw;
440 	struct ice_sq_cd *cd = NULL;
441 	int i = 0;
442 	uint16_t tbl_id = 0;
443 	uint16_t tbl_idx = 0;
444 	uint8_t *buffer = *buff2;
445 
446 	hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
447 
448 	/* table index string format: "0000:" */
449 	#define TBL_IDX_STR_SIZE 7
450 	for (i = 0; i < ICE_BLK_MAX_COUNT; i++) {
451 		int res;
452 		uint16_t buff_size;
453 		uint8_t *buff;
454 		uint32_t offset = 0;
455 
456 		buff = malloc(ICE_PKG_BUF_SIZE);
457 		if (!buff)
458 			return ICE_ERR_NO_MEMORY;
459 
460 		if (tbl_idx == 0) {
461 			char tbl_idx_str[TBL_IDX_STR_SIZE];
462 			memset(tbl_idx_str, 0, sizeof(tbl_idx_str));
463 			sprintf(tbl_idx_str, "%d:", tbl_id);
464 			memcpy(buffer, tbl_idx_str, strlen(tbl_idx_str));
465 			offset = strlen(tbl_idx_str);
466 			buffer += offset;
467 		}
468 
469 		res = ice_aq_get_internal_data(hw,
470 			ICE_AQC_DBG_DUMP_CLUSTER_ID_SW_E810,
471 			tbl_id, tbl_idx, buff,
472 			ICE_PKG_BUF_SIZE,
473 			&buff_size, &tbl_id, &tbl_idx, NULL, cd);
474 
475 		if (res) {
476 			free(buff);
477 			return res;
478 		}
479 
480 		offset = covert_byte_to_hex(&buffer, buff, buff_size);
481 		buffer += offset;
482 
483 		free(buff);
484 
485 		if (tbl_idx == 0xffff) {
486 			tbl_idx = 0;
487 			memset(buffer, '\n', sizeof(char));
488 			buffer++;
489 			offset = 0;
490 		}
491 
492 		if (tbl_id == 0xff)
493 			break;
494 	}
495 
496 	*size = buffer - *buff2;
497 	return 0;
498 }
499 
500 int rte_pmd_ice_dump_switch(uint16_t port, uint8_t **buff, uint32_t *size)
501 {
502 	struct rte_eth_dev *dev;
503 
504 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
505 
506 	dev = &rte_eth_devices[port];
507 	if (!is_ice_supported(dev))
508 		return -ENOTSUP;
509 
510 	return ice_dump_switch(dev, buff, size);
511 }
512 
513 static void print_rl_profile(const struct ice_aqc_rl_profile_elem *prof,
514 			     FILE *stream)
515 {
516 	fprintf(stream, "\t\t\t\t\t<td>\n");
517 	fprintf(stream, "\t\t\t\t\t\t<table>\n");
518 
519 	fprintf(stream, "\t\t\t\t\t\t\t<tr>\n");
520 	fprintf(stream, "\t\t\t\t\t\t\t\t<td>id</td>\n");
521 	fprintf(stream, "\t\t\t\t\t\t\t\t<td>%d</td>\n", prof->profile_id);
522 	fprintf(stream, "\t\t\t\t\t\t\t</tr>\n");
523 
524 	fprintf(stream, "\t\t\t\t\t\t\t<tr>\n");
525 	fprintf(stream, "\t\t\t\t\t\t\t\t<td>max burst size</td>\n");
526 	fprintf(stream, "\t\t\t\t\t\t\t\t<td>%d</td>\n", prof->max_burst_size);
527 	fprintf(stream, "\t\t\t\t\t\t\t</tr>\n");
528 
529 	fprintf(stream, "\t\t\t\t\t\t\t<tr>\n");
530 	fprintf(stream, "\t\t\t\t\t\t\t\t<td>rate limit multiply</td>\n");
531 	fprintf(stream, "\t\t\t\t\t\t\t\t<td>%d</td>\n", prof->rl_multiply);
532 	fprintf(stream, "\t\t\t\t\t\t\t</tr>\n");
533 
534 	fprintf(stream, "\t\t\t\t\t\t\t<tr>\n");
535 	fprintf(stream, "\t\t\t\t\t\t\t\t<td>wake up calculation</td>\n");
536 	fprintf(stream, "\t\t\t\t\t\t\t\t<td>%d</td>\n", prof->wake_up_calc);
537 	fprintf(stream, "\t\t\t\t\t\t\t</tr>\n");
538 
539 	fprintf(stream, "\t\t\t\t\t\t\t<tr>\n");
540 	fprintf(stream, "\t\t\t\t\t\t\t\t<td>rate limit encode</td>\n");
541 	fprintf(stream, "\t\t\t\t\t\t\t\t<td>%d</td>\n", prof->rl_encode);
542 	fprintf(stream, "\t\t\t\t\t\t\t</tr>\n");
543 
544 	fprintf(stream, "\t\t\t\t\t\t</table>\n");
545 	fprintf(stream, "\t\t\t\t\t</td>\n");
546 }
547 
548 static const char *
549 get_elem_type(u8 type)
550 {
551 	static const char * const ice_sched_node_types[] = {
552 			"Undefined", "Root", "TC", "SE Generic", "SW Entry", "Leaf"
553 	};
554 	if (type < RTE_DIM(ice_sched_node_types))
555 		return ice_sched_node_types[type];
556 	return "*UNKNOWN*";
557 }
558 
559 static
560 void print_valid_sections(FILE *stream, u8 vs)
561 {
562 	if ((vs & 0x1) != 0)
563 		fprintf(stream, "generic ");
564 	if ((vs & 0x2) != 0)
565 		fprintf(stream, "cir ");
566 	if ((vs & 0x4) != 0)
567 		fprintf(stream, "eir ");
568 	if ((vs & 0x8) != 0)
569 		fprintf(stream, "shared ");
570 }
571 
572 static
573 void print_scheduling_mode(FILE *stream, bool flag)
574 {
575 	if (flag)
576 		fprintf(stream, "pps");
577 	else
578 		fprintf(stream, "bps");
579 }
580 
581 static
582 void print_priority_mode(FILE *stream, bool flag)
583 {
584 	if (flag)
585 		fprintf(stream, "single priority node");
586 	else
587 		fprintf(stream, "wfq");
588 }
589 
590 static
591 void print_node(const struct rte_eth_dev_data *ethdata,
592 		const struct ice_aqc_txsched_elem_data *data,
593 		const struct ice_aqc_rl_profile_elem *cir_prof,
594 		const struct ice_aqc_rl_profile_elem *eir_prof,
595 		const struct ice_aqc_rl_profile_elem *shared_prof,
596 		bool detail, FILE *stream)
597 {
598 	fprintf(stream, "\tNODE_%d [\n", data->node_teid);
599 	fprintf(stream, "\t\tlabel=<\n");
600 
601 	fprintf(stream, "\t\t\t<table>\n");
602 
603 	fprintf(stream, "\t\t\t\t<tr><td>teid</td><td>%d</td></tr>\n", data->node_teid);
604 	fprintf(stream, "\t\t\t\t<tr><td>type</td><td>%s</td></tr>\n",
605 			get_elem_type(data->data.elem_type));
606 	if (data->data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
607 		for (uint16_t i = 0; i < ethdata->nb_tx_queues; i++) {
608 			struct ci_tx_queue *q = ethdata->tx_queues[i];
609 			if (q->q_teid == data->node_teid) {
610 				fprintf(stream, "\t\t\t\t<tr><td>TXQ</td><td>%u</td></tr>\n", i);
611 				break;
612 			}
613 		}
614 	}
615 
616 	if (!detail)
617 		goto brief;
618 
619 	fprintf(stream, "\t\t\t\t<tr>\n");
620 	fprintf(stream, "\t\t\t\t\t<td> valid sections </td>\n");
621 	fprintf(stream, "\t\t\t\t\t<td>");
622 	print_valid_sections(stream, data->data.valid_sections);
623 	fprintf(stream,	"</td>\n");
624 	fprintf(stream, "\t\t\t\t</tr>\n");
625 
626 	fprintf(stream, "\t\t\t\t<tr>\n");
627 	fprintf(stream, "\t\t\t\t\t<td> scheduling mode </td>\n");
628 	fprintf(stream, "\t\t\t\t\t<td>");
629 	print_scheduling_mode(stream, (data->data.generic & 0x1) != 0);
630 	fprintf(stream, "</td>\n");
631 	fprintf(stream, "\t\t\t\t</tr>\n");
632 
633 	fprintf(stream, "\t\t\t\t<tr>\n");
634 	fprintf(stream, "\t\t\t\t\t<td> priority </td>\n");
635 	fprintf(stream, "\t\t\t\t\t<td> %d </td>\n", (data->data.generic >> 1) & 0x7);
636 	fprintf(stream, "\t\t\t\t</tr>\n");
637 
638 	fprintf(stream, "\t\t\t\t<tr>\n");
639 	fprintf(stream, "\t\t\t\t\t<td> priority mode</td>\n");
640 	fprintf(stream, "\t\t\t\t\t<td>");
641 	print_priority_mode(stream, ((data->data.generic >> 4) & 0x1) != 0);
642 	fprintf(stream,	"</td>\n");
643 	fprintf(stream, "\t\t\t\t</tr>\n");
644 
645 	fprintf(stream, "\t\t\t\t<tr>\n");
646 	fprintf(stream, "\t\t\t\t\t<td> adjustment value </td>\n");
647 	fprintf(stream, "\t\t\t\t\t<td> %d </td>\n", (data->data.generic >> 5) & 0x3);
648 	fprintf(stream, "\t\t\t\t</tr>\n");
649 
650 	fprintf(stream, "\t\t\t\t<tr>\n");
651 	fprintf(stream, "\t\t\t\t\t<td> suspended </td>\n");
652 	fprintf(stream, "\t\t\t\t\t<td> %d </td>\n", data->data.flags & 0x1);
653 	fprintf(stream, "\t\t\t\t</tr>\n");
654 
655 	fprintf(stream, "\t\t\t\t<tr>\n");
656 	fprintf(stream, "\t\t\t\t\t<td> cir bw profile </td>\n");
657 	if (cir_prof == NULL)
658 		fprintf(stream, "\t\t\t\t\t<td> default </td>\n");
659 	else
660 		print_rl_profile(cir_prof, stream);
661 	fprintf(stream, "\t\t\t\t</tr>\n");
662 
663 	fprintf(stream, "\t\t\t\t<tr>\n");
664 	fprintf(stream, "\t\t\t\t\t<td> cir bw weight </td>\n");
665 	fprintf(stream, "\t\t\t\t\t<td> %d </td>\n", data->data.cir_bw.bw_alloc);
666 	fprintf(stream, "\t\t\t\t</tr>\n");
667 
668 	fprintf(stream, "\t\t\t\t<tr>\n");
669 	fprintf(stream, "\t\t\t\t\t<td> eir bw profile </td>\n");
670 	if (eir_prof == NULL)
671 		fprintf(stream, "\t\t\t\t\t<td> default </td>\n");
672 	else
673 		print_rl_profile(eir_prof, stream);
674 	fprintf(stream, "\t\t\t\t</tr>\n");
675 
676 	fprintf(stream, "\t\t\t\t<tr>\n");
677 	fprintf(stream, "\t\t\t\t\t<td> eir bw weight </td>\n");
678 	fprintf(stream, "\t\t\t\t\t<td> %d </td>\n", data->data.eir_bw.bw_alloc);
679 	fprintf(stream, "\t\t\t\t</tr>\n");
680 
681 	fprintf(stream, "\t\t\t\t<tr>\n");
682 	fprintf(stream, "\t\t\t\t\t<td> shared rl profile </td>\n");
683 	if (shared_prof == NULL)
684 		fprintf(stream, "\t\t\t\t\t<td> default </td>\n");
685 	else
686 		print_rl_profile(shared_prof, stream);
687 	fprintf(stream, "\t\t\t\t</tr>\n");
688 
689 brief:
690 	fprintf(stream, "\t\t\t</table>\n");
691 
692 	fprintf(stream, "\t\t>\n");
693 	fprintf(stream, "\t\tshape=plain\n");
694 	fprintf(stream, "\t]\n");
695 
696 }
697 
698 static
699 int query_rl_profile(struct ice_hw *hw,
700 		     uint8_t level, uint8_t flags, uint16_t profile_id,
701 		     struct ice_aqc_rl_profile_elem *data)
702 {
703 	int ice_status;
704 
705 	data->level = level;
706 	data->flags = flags;
707 	data->profile_id = profile_id;
708 
709 	ice_status = ice_aq_query_rl_profile(hw, 1, data,
710 					     sizeof(struct ice_aqc_rl_profile_elem), NULL);
711 
712 	if (ice_status != ICE_SUCCESS) {
713 		PMD_DRV_LOG(ERR, "Failed to query rl profile.");
714 		return -EINVAL;
715 	}
716 
717 	return 0;
718 }
719 
720 static int
721 query_node(struct ice_hw *hw, struct rte_eth_dev_data *ethdata,
722 		struct ice_sched_node *node, bool detail, FILE *stream)
723 {
724 	struct ice_aqc_txsched_elem_data *data = &node->info;
725 	struct ice_aqc_rl_profile_elem cir_prof;
726 	struct ice_aqc_rl_profile_elem eir_prof;
727 	struct ice_aqc_rl_profile_elem shared_prof;
728 	struct ice_aqc_rl_profile_elem *cp = NULL;
729 	struct ice_aqc_rl_profile_elem *ep = NULL;
730 	struct ice_aqc_rl_profile_elem *sp = NULL;
731 	u8 level = node->tx_sched_layer;
732 	int ret;
733 
734 	if (data->data.cir_bw.bw_profile_idx != 0) {
735 		ret = query_rl_profile(hw, level, 0, data->data.cir_bw.bw_profile_idx, &cir_prof);
736 
737 		if (ret)
738 			return ret;
739 		cp = &cir_prof;
740 	}
741 
742 	if (data->data.eir_bw.bw_profile_idx != 0) {
743 		ret = query_rl_profile(hw, level, 1, data->data.eir_bw.bw_profile_idx, &eir_prof);
744 
745 		if (ret)
746 			return ret;
747 		ep = &eir_prof;
748 	}
749 
750 	if (data->data.srl_id != 0) {
751 		ret = query_rl_profile(hw, level, 2, data->data.srl_id, &shared_prof);
752 
753 		if (ret)
754 			return ret;
755 		sp = &shared_prof;
756 	}
757 
758 	print_node(ethdata, data, cp, ep, sp, detail, stream);
759 
760 	return 0;
761 }
762 
763 static int
764 query_node_recursive(struct ice_hw *hw, struct rte_eth_dev_data *ethdata,
765 		struct ice_sched_node *node, bool detail, FILE *stream)
766 {
767 	bool close = false;
768 	if (node->parent != NULL && node->vsi_handle != node->parent->vsi_handle) {
769 		fprintf(stream, "subgraph cluster_%u {\n", node->vsi_handle);
770 		fprintf(stream, "\tlabel = \"VSI %u\";\n", node->vsi_handle);
771 		close = true;
772 	}
773 
774 	int ret = query_node(hw, ethdata, node, detail, stream);
775 	if (ret != 0)
776 		return ret;
777 
778 	for (uint16_t i = 0; i < node->num_children; i++) {
779 		ret = query_node_recursive(hw, ethdata, node->children[i], detail, stream);
780 		if (ret != 0)
781 			return ret;
782 		/* if we have a lot of nodes, skip a bunch in the middle */
783 		if (node->num_children > 16 && i == 2) {
784 			uint16_t inc = node->num_children - 5;
785 			fprintf(stream, "\tn%d_children [label=\"... +%d child nodes ...\"];\n",
786 					node->info.node_teid, inc);
787 			fprintf(stream, "\tNODE_%d -> n%d_children;\n",
788 					node->info.node_teid, node->info.node_teid);
789 			i += inc;
790 		}
791 	}
792 	if (close)
793 		fprintf(stream, "}\n");
794 	if (node->info.parent_teid != 0xFFFFFFFF)
795 		fprintf(stream, "\tNODE_%d -> NODE_%d\n",
796 				node->info.parent_teid, node->info.node_teid);
797 
798 	return 0;
799 }
800 
801 int
802 rte_pmd_ice_dump_txsched(uint16_t port, bool detail, FILE *stream)
803 {
804 	struct rte_eth_dev *dev;
805 	struct ice_hw *hw;
806 
807 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
808 
809 	dev = &rte_eth_devices[port];
810 	if (!is_ice_supported(dev))
811 		return -ENOTSUP;
812 
813 	dev = &rte_eth_devices[port];
814 	hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
815 
816 	fprintf(stream, "digraph tx_sched {\n");
817 	query_node_recursive(hw, dev->data, hw->port_info->root, detail, stream);
818 	fprintf(stream, "}\n");
819 
820 	return 0;
821 }
822