xref: /netbsd-src/sys/dev/pci/ixgbe/ixgbe_dcb_82599.c (revision 59adb0c0a804eaa6fff7ee9df54a0ebd8b87948d)
1 /* $NetBSD: ixgbe_dcb_82599.c,v 1.12 2023/07/15 21:41:26 andvar Exp $ */
2 /******************************************************************************
3   SPDX-License-Identifier: BSD-3-Clause
4 
5   Copyright (c) 2001-2020, Intel Corporation
6   All rights reserved.
7 
8   Redistribution and use in source and binary forms, with or without
9   modification, are permitted provided that the following conditions are met:
10 
11    1. Redistributions of source code must retain the above copyright notice,
12       this list of conditions and the following disclaimer.
13 
14    2. Redistributions in binary form must reproduce the above copyright
15       notice, this list of conditions and the following disclaimer in the
16       documentation and/or other materials provided with the distribution.
17 
18    3. Neither the name of the Intel Corporation nor the names of its
19       contributors may be used to endorse or promote products derived from
20       this software without specific prior written permission.
21 
22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32   POSSIBILITY OF SUCH DAMAGE.
33 
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_dcb_82599.c 331224 2018-03-19 20:55:05Z erj $*/
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: ixgbe_dcb_82599.c,v 1.12 2023/07/15 21:41:26 andvar Exp $");
39 
40 #include "ixgbe_type.h"
41 #include "ixgbe_dcb.h"
42 #include "ixgbe_dcb_82599.h"
43 
44 /**
45  * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
46  * @hw: pointer to hardware structure
47  * @stats: pointer to statistics structure
48  * @tc_count:  Number of elements in bwg_array.
49  *
50  * This function returns the status data for each of the Traffic Classes in use.
51  */
ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw * hw,struct ixgbe_hw_stats * stats,u8 tc_count)52 s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
53 				 struct ixgbe_hw_stats *stats,
54 				 u8 tc_count)
55 {
56 	int tc;
57 
58 	DEBUGFUNC("dcb_get_tc_stats");
59 
60 	if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
61 		return IXGBE_ERR_PARAM;
62 
63 	/* Statistics pertaining to each traffic class */
64 	for (tc = 0; tc < tc_count; tc++) {
65 		/* Transmitted Packets */
66 		stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
67 		/* Transmitted Bytes (read low first to prevent missed carry) */
68 		stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc));
69 		stats->qbtc[tc] +=
70 			(((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32);
71 		/* Received Packets */
72 		stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
73 		/* Received Bytes (read low first to prevent missed carry) */
74 		stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc));
75 		stats->qbrc[tc] +=
76 			(((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32);
77 
78 		/* Received Dropped Packet */
79 		stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc));
80 	}
81 
82 	return IXGBE_SUCCESS;
83 }
84 
85 /**
86  * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
87  * @hw: pointer to hardware structure
88  * @stats: pointer to statistics structure
89  * @tc_count:  Number of elements in bwg_array.
90  *
91  * This function returns the CBFC status data for each of the Traffic Classes.
92  */
ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw * hw,struct ixgbe_hw_stats * stats,u8 tc_count)93 s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
94 				  struct ixgbe_hw_stats *stats,
95 				  u8 tc_count)
96 {
97 	int tc;
98 
99 	DEBUGFUNC("dcb_get_pfc_stats");
100 
101 	if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
102 		return IXGBE_ERR_PARAM;
103 
104 	for (tc = 0; tc < tc_count; tc++) {
105 		/* Priority XOFF Transmitted */
106 		stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
107 		/* Priority XOFF Received */
108 		stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
109 	}
110 
111 	return IXGBE_SUCCESS;
112 }
113 
114 /**
115  * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
116  * @hw: pointer to hardware structure
117  * @refill: refill credits index by traffic class
118  * @max: max credits index by traffic class
119  * @bwg_id: bandwidth grouping indexed by traffic class
120  * @tsa: transmission selection algorithm indexed by traffic class
121  * @map: priority to tc assignments indexed by priority
122  *
123  * Configure Rx Packet Arbiter and credits for each traffic class.
124  */
ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw * hw,u16 * refill,u16 * max,u8 * bwg_id,u8 * tsa,u8 * map)125 s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
126 				      u16 *max, u8 *bwg_id, u8 *tsa,
127 				      u8 *map)
128 {
129 	u32 reg = 0;
130 	u32 credit_refill = 0;
131 	u32 credit_max = 0;
132 	u8  i = 0;
133 
134 	/*
135 	 * Disable the arbiter before changing parameters
136 	 * (always enable recycle mode; WSP)
137 	 */
138 	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
139 	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
140 
141 	/*
142 	 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
143 	 * bits sets for the UPs that needs to be mapped to that TC.
144 	 * e.g if priorities 6 and 7 are to be mapped to a TC then the
145 	 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
146 	 */
147 	reg = 0;
148 	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
149 		reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
150 
151 	IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
152 
153 	/* Configure traffic class credits and priority */
154 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
155 		credit_refill = refill[i];
156 		credit_max = max[i];
157 		reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
158 
159 		reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
160 
161 		if (tsa[i] == ixgbe_dcb_tsa_strict)
162 			reg |= IXGBE_RTRPT4C_LSP;
163 
164 		IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
165 	}
166 
167 	/*
168 	 * Configure Rx packet plane (recycle mode; WSP) and
169 	 * enable arbiter
170 	 */
171 	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
172 	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
173 
174 	return IXGBE_SUCCESS;
175 }
176 
177 /**
178  * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
179  * @hw: pointer to hardware structure
180  * @refill: refill credits index by traffic class
181  * @max: max credits index by traffic class
182  * @bwg_id: bandwidth grouping indexed by traffic class
183  * @tsa: transmission selection algorithm indexed by traffic class
184  *
185  * Configure Tx Descriptor Arbiter and credits for each traffic class.
186  */
ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw * hw,u16 * refill,u16 * max,u8 * bwg_id,u8 * tsa)187 s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
188 					   u16 *max, u8 *bwg_id, u8 *tsa)
189 {
190 	u32 reg, max_credits;
191 	u8  i;
192 
193 	/* Clear the per-Tx queue credits; we use per-TC instead */
194 	for (i = 0; i < 128; i++) {
195 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
196 		IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
197 	}
198 
199 	/* Configure traffic class credits and priority */
200 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
201 		max_credits = max[i];
202 		reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
203 		reg |= (u32)(refill[i]);
204 		reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
205 
206 		if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
207 			reg |= IXGBE_RTTDT2C_GSP;
208 
209 		if (tsa[i] == ixgbe_dcb_tsa_strict)
210 			reg |= IXGBE_RTTDT2C_LSP;
211 
212 		IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
213 	}
214 
215 	/*
216 	 * Configure Tx descriptor plane (recycle mode; WSP) and
217 	 * enable arbiter
218 	 */
219 	reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
220 	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
221 
222 	return IXGBE_SUCCESS;
223 }
224 
225 /**
226  * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
227  * @hw: pointer to hardware structure
228  * @refill: refill credits index by traffic class
229  * @max: max credits index by traffic class
230  * @bwg_id: bandwidth grouping indexed by traffic class
231  * @tsa: transmission selection algorithm indexed by traffic class
232  * @map: priority to tc assignments indexed by priority
233  *
234  * Configure Tx Packet Arbiter and credits for each traffic class.
235  */
ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw * hw,u16 * refill,u16 * max,u8 * bwg_id,u8 * tsa,u8 * map)236 s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
237 					   u16 *max, u8 *bwg_id, u8 *tsa,
238 					   u8 *map)
239 {
240 	u32 reg;
241 	u8 i;
242 
243 	/*
244 	 * Disable the arbiter before changing parameters
245 	 * (always enable recycle mode; SP; arb delay)
246 	 */
247 	reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
248 	      (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
249 	      IXGBE_RTTPCS_ARBDIS;
250 	IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
251 
252 	/*
253 	 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
254 	 * bits sets for the UPs that needs to be mapped to that TC.
255 	 * e.g if priorities 6 and 7 are to be mapped to a TC then the
256 	 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
257 	 */
258 	reg = 0;
259 	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
260 		reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
261 
262 	IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
263 
264 	/* Configure traffic class credits and priority */
265 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
266 		reg = refill[i];
267 		reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
268 		reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
269 
270 		if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
271 			reg |= IXGBE_RTTPT2C_GSP;
272 
273 		if (tsa[i] == ixgbe_dcb_tsa_strict)
274 			reg |= IXGBE_RTTPT2C_LSP;
275 
276 		IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
277 	}
278 
279 	/*
280 	 * Configure Tx packet plane (recycle mode; SP; arb delay) and
281 	 * enable arbiter
282 	 */
283 	reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
284 	      (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
285 	IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
286 
287 	return IXGBE_SUCCESS;
288 }
289 
290 /**
291  * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
292  * @hw: pointer to hardware structure
293  * @pfc_en: enabled pfc bitmask
294  * @map: priority to tc assignments indexed by priority
295  *
296  * Configure Priority Flow Control (PFC) for each traffic class.
297  */
ixgbe_dcb_config_pfc_82599(struct ixgbe_hw * hw,u8 pfc_en,u8 * map)298 s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
299 {
300 	u32 i, j, fcrtl, reg;
301 	u8 max_tc = 0;
302 
303 	/* Enable Transmit Priority Flow Control */
304 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
305 
306 	/* Enable Receive Priority Flow Control */
307 	reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
308 	reg |= IXGBE_MFLCN_DPF;
309 
310 	/*
311 	 * X540 supports per TC Rx priority flow control.  So
312 	 * clear all TCs and only enable those that should be
313 	 * enabled.
314 	 */
315 	reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
316 
317 	if (hw->mac.type >= ixgbe_mac_X540)
318 		reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
319 
320 	if (pfc_en)
321 		reg |= IXGBE_MFLCN_RPFCE;
322 
323 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
324 
325 	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) {
326 		if (map[i] > max_tc)
327 			max_tc = map[i];
328 	}
329 
330 
331 	/* Configure PFC Tx thresholds per TC */
332 	for (i = 0; i <= max_tc; i++) {
333 		int enabled = 0;
334 
335 		for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) {
336 			if ((map[j] == i) && (pfc_en & (1 << j))) {
337 				enabled = 1;
338 				break;
339 			}
340 		}
341 
342 		if (enabled) {
343 			reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
344 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
345 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
346 		} else {
347 			/*
348 			 * In order to prevent Tx hangs when the internal Tx
349 			 * switch is enabled we must set the high water mark
350 			 * to the Rx packet buffer size - 24KB.  This allows
351 			 * the Tx switch to function even under heavy Rx
352 			 * workloads.
353 			 */
354 			reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
355 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
356 		}
357 
358 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
359 	}
360 
361 	for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
362 		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
363 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
364 	}
365 
366 	/* Configure pause time (2 TCs per register) */
367 	reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
368 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
369 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
370 
371 	/* Configure flow control refresh threshold value */
372 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
373 
374 	return IXGBE_SUCCESS;
375 }
376 
377 /**
378  * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
379  * @hw: pointer to hardware structure
380  * @dcb_config: pointer to ixgbe_dcb_config structure
381  *
382  * Configure queue statistics registers, all queues belonging to same traffic
383  * class uses a single set of queue statistics counters.
384  */
ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw * hw,struct ixgbe_dcb_config * dcb_config)385 s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw,
386 				    struct ixgbe_dcb_config *dcb_config)
387 {
388 	u32 reg = 0;
389 	u8  i   = 0;
390 	u8 tc_count = 8;
391 	bool vt_mode = FALSE;
392 
393 	if (dcb_config != NULL) {
394 		tc_count = dcb_config->num_tcs.pg_tcs;
395 		vt_mode = dcb_config->vt_mode;
396 	}
397 
398 	if (!((tc_count == 8 && vt_mode == FALSE) || tc_count == 4))
399 		return IXGBE_ERR_PARAM;
400 
401 	if (tc_count == 8 && vt_mode == FALSE) {
402 		/*
403 		 * Receive Queues stats setting
404 		 * 32 RQSMR registers, each configuring 4 queues.
405 		 *
406 		 * Set all 16 queues of each TC to the same stat
407 		 * with TC 'n' going to stat 'n'.
408 		 */
409 		for (i = 0; i < 32; i++) {
410 			reg = 0x01010101 * (i / 4);
411 			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
412 		}
413 		/*
414 		 * Transmit Queues stats setting
415 		 * 32 TQSM registers, each controlling 4 queues.
416 		 *
417 		 * Set all queues of each TC to the same stat
418 		 * with TC 'n' going to stat 'n'.
419 		 * Tx queues are allocated non-uniformly to TCs:
420 		 * 32, 32, 16, 16, 8, 8, 8, 8.
421 		 */
422 		for (i = 0; i < 32; i++) {
423 			if (i < 8)
424 				reg = 0x00000000;
425 			else if (i < 16)
426 				reg = 0x01010101;
427 			else if (i < 20)
428 				reg = 0x02020202;
429 			else if (i < 24)
430 				reg = 0x03030303;
431 			else if (i < 26)
432 				reg = 0x04040404;
433 			else if (i < 28)
434 				reg = 0x05050505;
435 			else if (i < 30)
436 				reg = 0x06060606;
437 			else
438 				reg = 0x07070707;
439 			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
440 		}
441 	} else if (tc_count == 4 && vt_mode == FALSE) {
442 		/*
443 		 * Receive Queues stats setting
444 		 * 32 RQSMR registers, each configuring 4 queues.
445 		 *
446 		 * Set all 16 queues of each TC to the same stat
447 		 * with TC 'n' going to stat 'n'.
448 		 */
449 		for (i = 0; i < 32; i++) {
450 			if (i % 8 > 3)
451 				/* In 4 TC mode, odd 16-queue ranges are
452 				 *  not used.
453 				*/
454 				continue;
455 			reg = 0x01010101 * (i / 8);
456 			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
457 		}
458 		/*
459 		 * Transmit Queues stats setting
460 		 * 32 TQSM registers, each controlling 4 queues.
461 		 *
462 		 * Set all queues of each TC to the same stat
463 		 * with TC 'n' going to stat 'n'.
464 		 * Tx queues are allocated non-uniformly to TCs:
465 		 * 64, 32, 16, 16.
466 		 */
467 		for (i = 0; i < 32; i++) {
468 			if (i < 16)
469 				reg = 0x00000000;
470 			else if (i < 24)
471 				reg = 0x01010101;
472 			else if (i < 28)
473 				reg = 0x02020202;
474 			else
475 				reg = 0x03030303;
476 			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
477 		}
478 	} else if (tc_count == 4 && vt_mode == TRUE) {
479 		/*
480 		 * Receive Queues stats setting
481 		 * 32 RQSMR registers, each configuring 4 queues.
482 		 *
483 		 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
484 		 * pool. Set all 32 queues of each TC across pools to the same
485 		 * stat with TC 'n' going to stat 'n'.
486 		 */
487 		for (i = 0; i < 32; i++)
488 			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100);
489 		/*
490 		 * Transmit Queues stats setting
491 		 * 32 TQSM registers, each controlling 4 queues.
492 		 *
493 		 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
494 		 * pool. Set all 32 queues of each TC across pools to the same
495 		 * stat with TC 'n' going to stat 'n'.
496 		 */
497 		for (i = 0; i < 32; i++)
498 			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100);
499 	}
500 
501 	return IXGBE_SUCCESS;
502 }
503 
504 /**
505  * ixgbe_dcb_config_82599 - Configure general DCB parameters
506  * @hw: pointer to hardware structure
507  * @dcb_config: pointer to ixgbe_dcb_config structure
508  *
509  * Configure general DCB parameters.
510  */
ixgbe_dcb_config_82599(struct ixgbe_hw * hw,struct ixgbe_dcb_config * dcb_config)511 s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw,
512 			   struct ixgbe_dcb_config *dcb_config)
513 {
514 	u32 reg;
515 	u32 q;
516 
517 	/* Disable the Tx desc arbiter so that MTQC can be changed */
518 	reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
519 	reg |= IXGBE_RTTDCS_ARBDIS;
520 	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
521 
522 	reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
523 	if (dcb_config->num_tcs.pg_tcs == 8) {
524 		/* Enable DCB for Rx with 8 TCs */
525 		switch (reg & IXGBE_MRQC_MRQE_MASK) {
526 		case 0:
527 		case IXGBE_MRQC_RT4TCEN:
528 			/* RSS disabled cases */
529 			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
530 			      IXGBE_MRQC_RT8TCEN;
531 			break;
532 		case IXGBE_MRQC_RSSEN:
533 		case IXGBE_MRQC_RTRSS4TCEN:
534 			/* RSS enabled cases */
535 			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
536 			      IXGBE_MRQC_RTRSS8TCEN;
537 			break;
538 		default:
539 			/*
540 			 * Unsupported value, assume stale data,
541 			 * overwrite no RSS
542 			 */
543 			ASSERT(0);
544 			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
545 			      IXGBE_MRQC_RT8TCEN;
546 		}
547 	}
548 	if (dcb_config->num_tcs.pg_tcs == 4) {
549 		/* We support both VT-on and VT-off with 4 TCs. */
550 		if (dcb_config->vt_mode)
551 			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
552 			      IXGBE_MRQC_VMDQRT4TCEN;
553 		else
554 			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
555 			      IXGBE_MRQC_RTRSS4TCEN;
556 	}
557 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
558 
559 	/* Enable DCB for Tx with 8 TCs */
560 	if (dcb_config->num_tcs.pg_tcs == 8)
561 		reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
562 	else {
563 		/* We support both VT-on and VT-off with 4 TCs. */
564 		reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
565 		if (dcb_config->vt_mode)
566 			reg |= IXGBE_MTQC_VT_ENA;
567 	}
568 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
569 
570 	/* Disable drop for all queues */
571 	for (q = 0; q < 128; q++)
572 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
573 				(IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
574 
575 	/* Enable the Tx desc arbiter */
576 	reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
577 	reg &= ~IXGBE_RTTDCS_ARBDIS;
578 	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
579 
580 	/* Enable Security TX Buffer IFG for DCB */
581 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
582 	reg |= IXGBE_SECTX_DCB;
583 	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
584 
585 	return IXGBE_SUCCESS;
586 }
587 
588 /**
589  * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
590  * @hw: pointer to hardware structure
591  * @link_speed: unused
592  * @refill: refill credits index by traffic class
593  * @max: max credits index by traffic class
594  * @bwg_id: bandwidth grouping indexed by traffic class
595  * @tsa: transmission selection algorithm indexed by traffic class
596  * @map: priority to tc assignments indexed by priority
597  *
598  * Configure dcb settings and enable dcb mode.
599  */
ixgbe_dcb_hw_config_82599(struct ixgbe_hw * hw,int link_speed,u16 * refill,u16 * max,u8 * bwg_id,u8 * tsa,u8 * map)600 s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed,
601 			      u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
602 			      u8 *map)
603 {
604 	UNREFERENCED_1PARAMETER(link_speed);
605 
606 	ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa,
607 					  map);
608 	ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
609 					       tsa);
610 	ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
611 					       tsa, map);
612 
613 	return IXGBE_SUCCESS;
614 }
615 
616