xref: /dpdk/drivers/net/r8169/r8169_hw.c (revision b574fb4cc855f4e86659d37ded01e7a218c38865)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2024 Realtek Corporation. All rights reserved
3  */
4 
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 
9 #include <rte_ether.h>
10 #include <ethdev_driver.h>
11 
12 #include "r8169_hw.h"
13 #include "r8169_logs.h"
14 #include "r8169_dash.h"
15 
16 static u32
17 rtl_eri_read_with_oob_base_address(struct rtl_hw *hw, int addr, int len,
18 				   int type, const u32 base_address)
19 {
20 	int i, val_shift, shift = 0;
21 	u32 value1 = 0;
22 	u32 value2 = 0;
23 	u32 eri_cmd, tmp, mask;
24 	const u32 transformed_base_address = ((base_address & 0x00FFF000) << 6) |
25 					     (base_address & 0x000FFF);
26 
27 	if (len > 4 || len <= 0)
28 		return -1;
29 
30 	while (len > 0) {
31 		val_shift = addr % ERIAR_Addr_Align;
32 		addr = addr & ~0x3;
33 
34 		eri_cmd = ERIAR_Read | transformed_base_address |
35 			  type << ERIAR_Type_shift |
36 			  ERIAR_ByteEn << ERIAR_ByteEn_shift |
37 			  (addr & 0x0FFF);
38 		if (addr & 0xF000) {
39 			tmp = addr & 0xF000;
40 			tmp >>= 12;
41 			eri_cmd |= (tmp << 20) & 0x00F00000;
42 		}
43 
44 		RTL_W32(hw, ERIAR, eri_cmd);
45 
46 		for (i = 0; i < RTL_CHANNEL_WAIT_COUNT; i++) {
47 			rte_delay_us(RTL_CHANNEL_WAIT_TIME);
48 
49 			/* Check if the NIC has completed ERI read */
50 			if (RTL_R32(hw, ERIAR) & ERIAR_Flag)
51 				break;
52 		}
53 
54 		if (len == 1)
55 			mask = (0xFF << (val_shift * 8)) & 0xFFFFFFFF;
56 		else if (len == 2)
57 			mask = (0xFFFF << (val_shift * 8)) & 0xFFFFFFFF;
58 		else if (len == 3)
59 			mask = (0xFFFFFF << (val_shift * 8)) & 0xFFFFFFFF;
60 		else
61 			mask = (0xFFFFFFFF << (val_shift * 8)) & 0xFFFFFFFF;
62 
63 		value1 = RTL_R32(hw, ERIDR) & mask;
64 		value2 |= (value1 >> val_shift * 8) << shift * 8;
65 
66 		if (len <= 4 - val_shift) {
67 			len = 0;
68 		} else {
69 			len -= (4 - val_shift);
70 			shift = 4 - val_shift;
71 			addr += 4;
72 		}
73 	}
74 
75 	rte_delay_us(RTL_CHANNEL_EXIT_DELAY_TIME);
76 
77 	return value2;
78 }
79 
80 static int
81 rtl_eri_write_with_oob_base_address(struct rtl_hw *hw, int addr,
82 				    int len, u32 value, int type, const u32 base_address)
83 {
84 	int i, val_shift, shift = 0;
85 	u32 value1 = 0;
86 	u32 eri_cmd, mask, tmp;
87 	const u32 transformed_base_address = ((base_address & 0x00FFF000) << 6) |
88 					     (base_address & 0x000FFF);
89 
90 	if (len > 4 || len <= 0)
91 		return -1;
92 
93 	while (len > 0) {
94 		val_shift = addr % ERIAR_Addr_Align;
95 		addr = addr & ~0x3;
96 
97 		if (len == 1)
98 			mask = (0xFF << (val_shift * 8)) & 0xFFFFFFFF;
99 		else if (len == 2)
100 			mask = (0xFFFF << (val_shift * 8)) & 0xFFFFFFFF;
101 		else if (len == 3)
102 			mask = (0xFFFFFF << (val_shift * 8)) & 0xFFFFFFFF;
103 		else
104 			mask = (0xFFFFFFFF << (val_shift * 8)) & 0xFFFFFFFF;
105 
106 		value1 = rtl_eri_read_with_oob_base_address(hw, addr, 4, type,
107 							    base_address) & ~mask;
108 		value1 |= ((value << val_shift * 8) >> shift * 8);
109 
110 		RTL_W32(hw, ERIDR, value1);
111 
112 		eri_cmd = ERIAR_Write | transformed_base_address |
113 			  type << ERIAR_Type_shift |
114 			  ERIAR_ByteEn << ERIAR_ByteEn_shift |
115 			  (addr & 0x0FFF);
116 		if (addr & 0xF000) {
117 			tmp = addr & 0xF000;
118 			tmp >>= 12;
119 			eri_cmd |= (tmp << 20) & 0x00F00000;
120 		}
121 
122 		RTL_W32(hw, ERIAR, eri_cmd);
123 
124 		for (i = 0; i < RTL_CHANNEL_WAIT_COUNT; i++) {
125 			rte_delay_us(RTL_CHANNEL_WAIT_TIME);
126 
127 			/* Check if the NIC has completed ERI write */
128 			if (!(RTL_R32(hw, ERIAR) & ERIAR_Flag))
129 				break;
130 		}
131 
132 		if (len <= 4 - val_shift) {
133 			len = 0;
134 		} else {
135 			len -= (4 - val_shift);
136 			shift = 4 - val_shift;
137 			addr += 4;
138 		}
139 	}
140 
141 	rte_delay_us(RTL_CHANNEL_EXIT_DELAY_TIME);
142 
143 	return 0;
144 }
145 
146 static u32
147 rtl_ocp_read_with_oob_base_address(struct rtl_hw *hw, u16 addr, u8 len,
148 				   const u32 base_address)
149 {
150 	return rtl_eri_read_with_oob_base_address(hw, addr, len, ERIAR_OOB,
151 						  base_address);
152 }
153 
154 u32
155 rtl_ocp_read(struct rtl_hw *hw, u16 addr, u8 len)
156 {
157 	u32 value = 0;
158 
159 	if (!hw->AllowAccessDashOcp)
160 		return 0xffffffff;
161 
162 	if (hw->HwSuppOcpChannelVer == 2)
163 		value = rtl_ocp_read_with_oob_base_address(hw, addr, len, NO_BASE_ADDRESS);
164 
165 	return value;
166 }
167 
168 static u32
169 rtl_ocp_write_with_oob_base_address(struct rtl_hw *hw, u16 addr, u8 len,
170 				    u32 value, const u32 base_address)
171 {
172 	return rtl_eri_write_with_oob_base_address(hw, addr, len, value, ERIAR_OOB,
173 						   base_address);
174 }
175 
176 void
177 rtl_ocp_write(struct rtl_hw *hw, u16 addr, u8 len, u32 value)
178 {
179 	if (!hw->AllowAccessDashOcp)
180 		return;
181 
182 	if (hw->HwSuppOcpChannelVer == 2)
183 		rtl_ocp_write_with_oob_base_address(hw, addr, len, value, NO_BASE_ADDRESS);
184 }
185 
186 void
187 rtl8125_oob_mutex_lock(struct rtl_hw *hw)
188 {
189 	u8 reg_16, reg_a0;
190 	u16 ocp_reg_mutex_ib;
191 	u16 ocp_reg_mutex_oob;
192 	u16 ocp_reg_mutex_prio;
193 	u32 wait_cnt_0, wait_cnt_1;
194 
195 	if (!hw->DASH)
196 		return;
197 
198 	switch (hw->mcfg) {
199 	case CFG_METHOD_48:
200 	case CFG_METHOD_49:
201 	case CFG_METHOD_52:
202 	case CFG_METHOD_54:
203 	case CFG_METHOD_55:
204 		ocp_reg_mutex_oob = 0x110;
205 		ocp_reg_mutex_ib = 0x114;
206 		ocp_reg_mutex_prio = 0x11C;
207 		break;
208 	default:
209 		return;
210 	}
211 
212 	rtl_ocp_write(hw, ocp_reg_mutex_ib, 1, BIT_0);
213 	reg_16 = rtl_ocp_read(hw, ocp_reg_mutex_oob, 1);
214 	wait_cnt_0 = 0;
215 	while (reg_16) {
216 		reg_a0 = rtl_ocp_read(hw, ocp_reg_mutex_prio, 1);
217 		if (reg_a0) {
218 			rtl_ocp_write(hw, ocp_reg_mutex_ib, 1, 0x00);
219 			reg_a0 = rtl_ocp_read(hw, ocp_reg_mutex_prio, 1);
220 			wait_cnt_1 = 0;
221 			while (reg_a0) {
222 				reg_a0 = rtl_ocp_read(hw, ocp_reg_mutex_prio, 1);
223 
224 				wait_cnt_1++;
225 
226 				if (wait_cnt_1 > 2000)
227 					break;
228 			};
229 			rtl_ocp_write(hw, ocp_reg_mutex_ib, 1, BIT_0);
230 		}
231 		reg_16 = rtl_ocp_read(hw, ocp_reg_mutex_oob, 1);
232 
233 		wait_cnt_0++;
234 
235 		if (wait_cnt_0 > 2000)
236 			break;
237 	};
238 }
239 
240 void
241 rtl8125_oob_mutex_unlock(struct rtl_hw *hw)
242 {
243 	u16 ocp_reg_mutex_ib;
244 	u16 ocp_reg_mutex_prio;
245 
246 	if (!hw->DASH)
247 		return;
248 
249 	switch (hw->mcfg) {
250 	case CFG_METHOD_48:
251 	case CFG_METHOD_49:
252 	case CFG_METHOD_52:
253 	case CFG_METHOD_54:
254 	case CFG_METHOD_55:
255 		ocp_reg_mutex_ib = 0x114;
256 		ocp_reg_mutex_prio = 0x11C;
257 		break;
258 	default:
259 		return;
260 	}
261 
262 	rtl_ocp_write(hw, ocp_reg_mutex_prio, 1, BIT_0);
263 	rtl_ocp_write(hw, ocp_reg_mutex_ib, 1, 0x00);
264 }
265 
266 void
267 rtl_mac_ocp_write(struct rtl_hw *hw, u16 addr, u16 value)
268 {
269 	u32 data32;
270 
271 	data32 = addr / 2;
272 	data32 <<= OCPR_Addr_Reg_shift;
273 	data32 += value;
274 	data32 |= OCPR_Write;
275 
276 	RTL_W32(hw, MACOCP, data32);
277 }
278 
279 u16
280 rtl_mac_ocp_read(struct rtl_hw *hw, u16 addr)
281 {
282 	u32 data32;
283 	u16 data16 = 0;
284 
285 	data32 = addr / 2;
286 	data32 <<= OCPR_Addr_Reg_shift;
287 
288 	RTL_W32(hw, MACOCP, data32);
289 	data16 = (u16)RTL_R32(hw, MACOCP);
290 
291 	return data16;
292 }
293 
294 u32
295 rtl_csi_read(struct rtl_hw *hw, u32 addr)
296 {
297 	u32 cmd;
298 	int i;
299 	u32 value = 0;
300 
301 	cmd = CSIAR_Read | CSIAR_ByteEn << CSIAR_ByteEn_shift |
302 	      (addr & CSIAR_Addr_Mask);
303 
304 	RTL_W32(hw, CSIAR, cmd);
305 
306 	for (i = 0; i < 10; i++) {
307 		rte_delay_us(100);
308 
309 		/* Check if the NIC has completed CSI read */
310 		if (RTL_R32(hw, CSIAR) & CSIAR_Flag) {
311 			value = RTL_R32(hw, CSIDR);
312 			break;
313 		}
314 	}
315 
316 	rte_delay_us(20);
317 
318 	return value;
319 }
320 
321 void
322 rtl_csi_write(struct rtl_hw *hw, u32 addr, u32 value)
323 {
324 	u32 cmd;
325 	int i;
326 
327 	RTL_W32(hw, CSIDR, value);
328 	cmd = CSIAR_Write | CSIAR_ByteEn << CSIAR_ByteEn_shift |
329 	      (addr & CSIAR_Addr_Mask);
330 
331 	RTL_W32(hw, CSIAR, cmd);
332 
333 	for (i = 0; i < RTL_CHANNEL_WAIT_COUNT; i++) {
334 		rte_delay_us(RTL_CHANNEL_WAIT_TIME);
335 
336 		/* Check if the NIC has completed CSI write */
337 		if (!(RTL_R32(hw, CSIAR) & CSIAR_Flag))
338 			break;
339 	}
340 
341 	rte_delay_us(RTL_CHANNEL_EXIT_DELAY_TIME);
342 }
343 
344 static void
345 rtl_enable_rxdvgate(struct rtl_hw *hw)
346 {
347 	switch (hw->mcfg) {
348 	case CFG_METHOD_48 ... CFG_METHOD_57:
349 	case CFG_METHOD_69 ... CFG_METHOD_71:
350 		RTL_W8(hw, 0xF2, RTL_R8(hw, 0xF2) | BIT_3);
351 		rte_delay_ms(2);
352 	}
353 }
354 
355 void
356 rtl_disable_rxdvgate(struct rtl_hw *hw)
357 {
358 	switch (hw->mcfg) {
359 	case CFG_METHOD_48 ... CFG_METHOD_57:
360 	case CFG_METHOD_69 ... CFG_METHOD_71:
361 		RTL_W8(hw, 0xF2, RTL_R8(hw, 0xF2) & ~BIT_3);
362 		rte_delay_ms(2);
363 	}
364 }
365 
366 static void
367 rtl_stop_all_request(struct rtl_hw *hw)
368 {
369 	int i;
370 
371 	RTL_W8(hw, ChipCmd, RTL_R8(hw, ChipCmd) | StopReq);
372 
373 	switch (hw->mcfg) {
374 	case CFG_METHOD_48:
375 	case CFG_METHOD_49:
376 	case CFG_METHOD_52:
377 		for (i = 0; i < 20; i++) {
378 			rte_delay_us(10);
379 			if (!(RTL_R8(hw, ChipCmd) & StopReq))
380 				break;
381 		}
382 
383 		break;
384 	default:
385 		rte_delay_us(200);
386 		break;
387 	}
388 
389 	RTL_W8(hw, ChipCmd, RTL_R8(hw, ChipCmd) & (CmdTxEnb | CmdRxEnb));
390 }
391 
392 static void
393 rtl_wait_txrx_fifo_empty(struct rtl_hw *hw)
394 {
395 	int i;
396 
397 	switch (hw->mcfg) {
398 	case CFG_METHOD_48 ... CFG_METHOD_57:
399 	case CFG_METHOD_69 ... CFG_METHOD_71:
400 		for (i = 0; i < 3000; i++) {
401 			rte_delay_us(50);
402 			if ((RTL_R8(hw, MCUCmd_reg) & (Txfifo_empty | Rxfifo_empty)) ==
403 			    (Txfifo_empty | Rxfifo_empty))
404 				break;
405 		}
406 		break;
407 	}
408 
409 	switch (hw->mcfg) {
410 	case CFG_METHOD_50:
411 	case CFG_METHOD_51:
412 	case CFG_METHOD_53 ... CFG_METHOD_57:
413 	case CFG_METHOD_69 ... CFG_METHOD_71:
414 		for (i = 0; i < 3000; i++) {
415 			rte_delay_us(50);
416 			if ((RTL_R16(hw, IntrMitigate) & (BIT_0 | BIT_1 | BIT_8)) ==
417 			    (BIT_0 | BIT_1 | BIT_8))
418 				break;
419 		}
420 		break;
421 	}
422 }
423 
424 static void
425 rtl_disable_rx_packet_filter(struct rtl_hw *hw)
426 {
427 	RTL_W32(hw, RxConfig, RTL_R32(hw, RxConfig) &
428 		~(AcceptErr | AcceptRunt | AcceptBroadcast | AcceptMulticast |
429 		AcceptMyPhys | AcceptAllPhys));
430 }
431 
432 void
433 rtl_nic_reset(struct rtl_hw *hw)
434 {
435 	int i;
436 
437 	rtl_disable_rx_packet_filter(hw);
438 
439 	rtl_enable_rxdvgate(hw);
440 
441 	rtl_stop_all_request(hw);
442 
443 	rtl_wait_txrx_fifo_empty(hw);
444 
445 	rte_delay_ms(2);
446 
447 	/* Soft reset the chip. */
448 	RTL_W8(hw, ChipCmd, CmdReset);
449 
450 	/* Check that the chip has finished the reset. */
451 	for (i = 100; i > 0; i--) {
452 		rte_delay_us(100);
453 		if ((RTL_R8(hw, ChipCmd) & CmdReset) == 0)
454 			break;
455 	}
456 }
457 
458 void
459 rtl_enable_cfg9346_write(struct rtl_hw *hw)
460 {
461 	RTL_W8(hw, Cfg9346, RTL_R8(hw, Cfg9346) | Cfg9346_Unlock);
462 }
463 
464 void
465 rtl_disable_cfg9346_write(struct rtl_hw *hw)
466 {
467 	RTL_W8(hw, Cfg9346, RTL_R8(hw, Cfg9346) & ~Cfg9346_Unlock);
468 }
469 
470 static void
471 rtl_enable_force_clkreq(struct rtl_hw *hw, bool enable)
472 {
473 	if (enable)
474 		RTL_W8(hw, 0xF1, RTL_R8(hw, 0xF1) | BIT_7);
475 	else
476 		RTL_W8(hw, 0xF1, RTL_R8(hw, 0xF1) & ~BIT_7);
477 }
478 
479 static void
480 rtl_enable_aspm_clkreq_lock(struct rtl_hw *hw, bool enable)
481 {
482 	switch (hw->mcfg) {
483 	case CFG_METHOD_48 ... CFG_METHOD_57:
484 	case CFG_METHOD_69:
485 		rtl_enable_cfg9346_write(hw);
486 		if (enable) {
487 			RTL_W8(hw, Config2, RTL_R8(hw, Config2) | BIT_7);
488 			RTL_W8(hw, Config5, RTL_R8(hw, Config5) | BIT_0);
489 		} else {
490 			RTL_W8(hw, Config2, RTL_R8(hw, Config2) & ~BIT_7);
491 			RTL_W8(hw, Config5, RTL_R8(hw, Config5) & ~BIT_0);
492 		}
493 		rtl_disable_cfg9346_write(hw);
494 		break;
495 	case CFG_METHOD_70:
496 	case CFG_METHOD_71:
497 		rtl_enable_cfg9346_write(hw);
498 		if (enable) {
499 			RTL_W8(hw, INT_CFG0_8125, RTL_R8(hw, INT_CFG0_8125) | BIT_3);
500 			RTL_W8(hw, Config5, RTL_R8(hw, Config5) | BIT_0);
501 		} else {
502 			RTL_W8(hw, INT_CFG0_8125, RTL_R8(hw, INT_CFG0_8125) & ~BIT_3);
503 			RTL_W8(hw, Config5, RTL_R8(hw, Config5) & ~BIT_0);
504 		}
505 		rtl_disable_cfg9346_write(hw);
506 		break;
507 	}
508 }
509 
510 static void
511 rtl_disable_l1_timeout(struct rtl_hw *hw)
512 {
513 	rtl_csi_write(hw, 0x890, rtl_csi_read(hw, 0x890) & ~BIT_0);
514 }
515 
516 static void
517 rtl_disable_eee_plus(struct rtl_hw *hw)
518 {
519 	switch (hw->mcfg) {
520 	case CFG_METHOD_48 ... CFG_METHOD_57:
521 	case CFG_METHOD_69 ... CFG_METHOD_71:
522 		rtl_mac_ocp_write(hw, 0xE080, rtl_mac_ocp_read(hw, 0xE080) & ~BIT_1);
523 		break;
524 
525 	default:
526 		/* Not support EEEPlus */
527 		break;
528 	}
529 }
530 
531 static void
532 rtl_hw_clear_timer_int(struct rtl_hw *hw)
533 {
534 	switch (hw->mcfg) {
535 	case CFG_METHOD_48 ... CFG_METHOD_57:
536 	case CFG_METHOD_69 ... CFG_METHOD_71:
537 		RTL_W32(hw, TIMER_INT0_8125, 0x0000);
538 		RTL_W32(hw, TIMER_INT1_8125, 0x0000);
539 		RTL_W32(hw, TIMER_INT2_8125, 0x0000);
540 		RTL_W32(hw, TIMER_INT3_8125, 0x0000);
541 		break;
542 	}
543 }
544 
545 static void
546 rtl_hw_clear_int_miti(struct rtl_hw *hw)
547 {
548 	int i;
549 
550 	switch (hw->HwSuppIntMitiVer) {
551 	case 3:
552 	case 6:
553 		/* IntMITI_0-IntMITI_31 */
554 		for (i = 0xA00; i < 0xB00; i += 4)
555 			RTL_W32(hw, i, 0x0000);
556 		break;
557 	case 4:
558 	case 5:
559 		/* IntMITI_0-IntMITI_15 */
560 		for (i = 0xA00; i < 0xA80; i += 4)
561 			RTL_W32(hw, i, 0x0000);
562 
563 		if (hw->HwSuppIntMitiVer == 5)
564 			RTL_W8(hw, INT_CFG0_8125, RTL_R8(hw, INT_CFG0_8125) &
565 			       ~(INT_CFG0_TIMEOUT0_BYPASS_8125 |
566 			       INT_CFG0_MITIGATION_BYPASS_8125 |
567 			       INT_CFG0_RDU_BYPASS_8126));
568 		else
569 			RTL_W8(hw, INT_CFG0_8125, RTL_R8(hw, INT_CFG0_8125) &
570 			       ~(INT_CFG0_TIMEOUT0_BYPASS_8125 | INT_CFG0_MITIGATION_BYPASS_8125));
571 
572 		RTL_W16(hw, INT_CFG1_8125, 0x0000);
573 		break;
574 	}
575 }
576 
577 void
578 rtl_hw_config(struct rtl_hw *hw)
579 {
580 	u32 mac_ocp_data;
581 
582 	/* Set RxConfig to default */
583 	RTL_W32(hw, RxConfig, (RX_DMA_BURST_unlimited << RxCfgDMAShift));
584 
585 	rtl_nic_reset(hw);
586 
587 	rtl_enable_cfg9346_write(hw);
588 
589 	/* Disable aspm clkreq internal */
590 	switch (hw->mcfg) {
591 	case CFG_METHOD_48 ... CFG_METHOD_57:
592 	case CFG_METHOD_69 ... CFG_METHOD_71:
593 		rtl_enable_force_clkreq(hw, 0);
594 		rtl_enable_aspm_clkreq_lock(hw, 0);
595 		break;
596 	}
597 
598 	/* Disable magic packet */
599 	switch (hw->mcfg) {
600 	case CFG_METHOD_48 ... CFG_METHOD_57:
601 	case CFG_METHOD_69 ... CFG_METHOD_71:
602 		mac_ocp_data = 0;
603 		rtl_mac_ocp_write(hw, 0xC0B6, mac_ocp_data);
604 		break;
605 	}
606 
607 	/* Set DMA burst size and interframe gap time */
608 	RTL_W32(hw, TxConfig, (TX_DMA_BURST_unlimited << TxDMAShift) |
609 		(InterFrameGap << TxInterFrameGapShift));
610 
611 	if (hw->EnableTxNoClose)
612 		RTL_W32(hw, TxConfig, (RTL_R32(hw, TxConfig) | BIT_6));
613 
614 	/* TCAM */
615 	switch (hw->mcfg) {
616 	case CFG_METHOD_48 ... CFG_METHOD_53:
617 		RTL_W16(hw, 0x382, 0x221B);
618 		break;
619 	}
620 
621 	switch (hw->mcfg) {
622 	case CFG_METHOD_69 ... CFG_METHOD_71:
623 		rtl_disable_l1_timeout(hw);
624 		break;
625 	}
626 
627 	switch (hw->mcfg) {
628 	case CFG_METHOD_48 ... CFG_METHOD_57:
629 	case CFG_METHOD_69 ... CFG_METHOD_71:
630 
631 		/* RSS_control_0 */
632 		RTL_W32(hw, RSS_CTRL_8125, 0x00);
633 
634 		/* VMQ_control */
635 		RTL_W16(hw, Q_NUM_CTRL_8125, 0x0000);
636 
637 		/* Disable speed down */
638 		RTL_W8(hw, Config1, RTL_R8(hw, Config1) & ~0x10);
639 
640 		/* CRC disable set */
641 		rtl_mac_ocp_write(hw, 0xC140, 0xFFFF);
642 		rtl_mac_ocp_write(hw, 0xC142, 0xFFFF);
643 
644 		/* New TX desc format */
645 		mac_ocp_data = rtl_mac_ocp_read(hw, 0xEB58);
646 		if (hw->mcfg == CFG_METHOD_70 || hw->mcfg == CFG_METHOD_71)
647 			mac_ocp_data &= ~(BIT_0 | BIT_1);
648 		mac_ocp_data |= BIT_0;
649 		rtl_mac_ocp_write(hw, 0xEB58, mac_ocp_data);
650 
651 		if (hw->mcfg == CFG_METHOD_70 || hw->mcfg == CFG_METHOD_71)
652 			RTL_W8(hw, 0xD8, RTL_R8(hw, 0xD8) & ~BIT_1);
653 
654 		/*
655 		 * MTPS
656 		 * 15-8 maximum tx use credit number
657 		 * 7-0 reserved for pcie product line
658 		 */
659 		mac_ocp_data = rtl_mac_ocp_read(hw, 0xE614);
660 		mac_ocp_data &= ~(BIT_10 | BIT_9 | BIT_8);
661 		if (hw->mcfg == CFG_METHOD_50 || hw->mcfg == CFG_METHOD_51 ||
662 		    hw->mcfg == CFG_METHOD_53)
663 			mac_ocp_data |= ((2 & 0x07) << 8);
664 		else if (hw->mcfg == CFG_METHOD_69 || hw->mcfg == CFG_METHOD_70 ||
665 			 hw->mcfg == CFG_METHOD_71)
666 			mac_ocp_data |= ((4 & 0x07) << 8);
667 		else
668 			mac_ocp_data |= ((3 & 0x07) << 8);
669 		rtl_mac_ocp_write(hw, 0xE614, mac_ocp_data);
670 
671 		mac_ocp_data = rtl_mac_ocp_read(hw, 0xE63E);
672 		mac_ocp_data &= ~(BIT_5 | BIT_4);
673 		if (hw->mcfg == CFG_METHOD_48 || hw->mcfg == CFG_METHOD_49 ||
674 		    hw->mcfg == CFG_METHOD_52 || hw->mcfg == CFG_METHOD_69 ||
675 		    hw->mcfg == CFG_METHOD_70 || hw->mcfg == CFG_METHOD_71)
676 			mac_ocp_data |= ((0x02 & 0x03) << 4);
677 		rtl_mac_ocp_write(hw, 0xE63E, mac_ocp_data);
678 
679 		/*
680 		 * FTR_MCU_CTRL
681 		 * 3-2 txpla packet valid start
682 		 */
683 		mac_ocp_data = rtl_mac_ocp_read(hw, 0xC0B4);
684 		mac_ocp_data &= ~BIT_0;
685 		rtl_mac_ocp_write(hw, 0xC0B4, mac_ocp_data);
686 		mac_ocp_data |= BIT_0;
687 		rtl_mac_ocp_write(hw, 0xC0B4, mac_ocp_data);
688 
689 		mac_ocp_data = rtl_mac_ocp_read(hw, 0xC0B4);
690 		mac_ocp_data |= (BIT_3 | BIT_2);
691 		rtl_mac_ocp_write(hw, 0xC0B4, mac_ocp_data);
692 
693 		mac_ocp_data = rtl_mac_ocp_read(hw, 0xEB6A);
694 		mac_ocp_data &= ~(BIT_7 | BIT_6 | BIT_5 | BIT_4 | BIT_3 | BIT_2 | BIT_1 |
695 				  BIT_0);
696 		mac_ocp_data |= (BIT_5 | BIT_4 | BIT_1 | BIT_0);
697 		rtl_mac_ocp_write(hw, 0xEB6A, mac_ocp_data);
698 
699 		mac_ocp_data = rtl_mac_ocp_read(hw, 0xEB50);
700 		mac_ocp_data &= ~(BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5);
701 		mac_ocp_data |= BIT_6;
702 		rtl_mac_ocp_write(hw, 0xEB50, mac_ocp_data);
703 
704 		mac_ocp_data = rtl_mac_ocp_read(hw, 0xE056);
705 		mac_ocp_data &= ~(BIT_7 | BIT_6 | BIT_5 | BIT_4);
706 		rtl_mac_ocp_write(hw, 0xE056, mac_ocp_data);
707 
708 		/* EEE_CR */
709 		mac_ocp_data = rtl_mac_ocp_read(hw, 0xE040);
710 		mac_ocp_data &= ~BIT_12;
711 		rtl_mac_ocp_write(hw, 0xE040, mac_ocp_data);
712 
713 		mac_ocp_data = rtl_mac_ocp_read(hw, 0xEA1C);
714 		mac_ocp_data &= ~(BIT_1 | BIT_0);
715 		mac_ocp_data |= BIT_0;
716 		rtl_mac_ocp_write(hw, 0xEA1C, mac_ocp_data);
717 
718 		switch (hw->mcfg) {
719 		case CFG_METHOD_48:
720 		case CFG_METHOD_49:
721 		case CFG_METHOD_52:
722 		case CFG_METHOD_54:
723 		case CFG_METHOD_55:
724 			rtl8125_oob_mutex_lock(hw);
725 			break;
726 		}
727 
728 		/* MAC_PWRDWN_CR0 */
729 		rtl_mac_ocp_write(hw, 0xE0C0, 0x4000);
730 
731 		rtl_set_mac_ocp_bit(hw, 0xE052, (BIT_6 | BIT_5));
732 		rtl_clear_mac_ocp_bit(hw, 0xE052, (BIT_3 | BIT_7));
733 
734 		switch (hw->mcfg) {
735 		case CFG_METHOD_48:
736 		case CFG_METHOD_49:
737 		case CFG_METHOD_52:
738 		case CFG_METHOD_54:
739 		case CFG_METHOD_55:
740 			rtl8125_oob_mutex_unlock(hw);
741 			break;
742 		}
743 
744 		/*
745 		 * DMY_PWR_REG_0
746 		 * (1)ERI(0xD4)(OCP 0xC0AC).bit[7:12]=6'b111111, L1 Mask
747 		 */
748 		rtl_set_mac_ocp_bit(hw, 0xC0AC,
749 				    (BIT_7 | BIT_8 | BIT_9 | BIT_10 | BIT_11 | BIT_12));
750 
751 		mac_ocp_data = rtl_mac_ocp_read(hw, 0xD430);
752 		mac_ocp_data &= ~(BIT_11 | BIT_10 | BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5 |
753 				  BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
754 		mac_ocp_data |= 0x45F;
755 		rtl_mac_ocp_write(hw, 0xD430, mac_ocp_data);
756 
757 		if (!hw->DASH)
758 			RTL_W8(hw, 0xD0, RTL_R8(hw, 0xD0) | BIT_6 | BIT_7);
759 		else
760 			RTL_W8(hw, 0xD0, RTL_R8(hw, 0xD0) & ~(BIT_6 | BIT_7));
761 
762 		if (hw->mcfg == CFG_METHOD_48 || hw->mcfg == CFG_METHOD_49 ||
763 		    hw->mcfg == CFG_METHOD_52)
764 			RTL_W8(hw, MCUCmd_reg, RTL_R8(hw, MCUCmd_reg) | BIT_0);
765 
766 		rtl_disable_eee_plus(hw);
767 
768 		mac_ocp_data = rtl_mac_ocp_read(hw, 0xEA1C);
769 		mac_ocp_data &= ~BIT_2;
770 		if (hw->mcfg == CFG_METHOD_70 || hw->mcfg == CFG_METHOD_71)
771 			mac_ocp_data &= ~(BIT_9 | BIT_8);
772 		rtl_mac_ocp_write(hw, 0xEA1C, mac_ocp_data);
773 
774 		/* Clear TCAM entries */
775 		rtl_set_mac_ocp_bit(hw, 0xEB54, BIT_0);
776 		rte_delay_us(1);
777 		rtl_clear_mac_ocp_bit(hw, 0xEB54, BIT_0);
778 
779 		RTL_W16(hw, 0x1880, RTL_R16(hw, 0x1880) & ~(BIT_4 | BIT_5));
780 
781 		switch (hw->mcfg) {
782 		case CFG_METHOD_54 ... CFG_METHOD_57:
783 			RTL_W8(hw, 0xd8, RTL_R8(hw, 0xd8) & ~EnableRxDescV4_0);
784 			break;
785 		}
786 	}
787 
788 	/* Other hw parameters */
789 	rtl_hw_clear_timer_int(hw);
790 
791 	rtl_hw_clear_int_miti(hw);
792 
793 	switch (hw->mcfg) {
794 	case CFG_METHOD_48 ... CFG_METHOD_57:
795 	case CFG_METHOD_69 ... CFG_METHOD_71:
796 		rtl_mac_ocp_write(hw, 0xE098, 0xC302);
797 		break;
798 	}
799 
800 	rtl_disable_cfg9346_write(hw);
801 
802 	rte_delay_us(10);
803 }
804 
805 int
806 rtl_set_hw_ops(struct rtl_hw *hw)
807 {
808 	switch (hw->mcfg) {
809 	/* 8125A */
810 	case CFG_METHOD_48:
811 	case CFG_METHOD_49:
812 		hw->hw_ops = rtl8125a_ops;
813 		return 0;
814 	/* 8125B */
815 	case CFG_METHOD_50:
816 	case CFG_METHOD_51:
817 		hw->hw_ops = rtl8125b_ops;
818 		return 0;
819 	/* 8125BP */
820 	case CFG_METHOD_54:
821 	case CFG_METHOD_55:
822 		hw->hw_ops = rtl8125bp_ops;
823 		return 0;
824 	/* 8125D */
825 	case CFG_METHOD_56:
826 	case CFG_METHOD_57:
827 		hw->hw_ops = rtl8125d_ops;
828 		return 0;
829 	/* 8126A */
830 	case CFG_METHOD_69 ... CFG_METHOD_71:
831 		hw->hw_ops = rtl8126a_ops;
832 		return 0;
833 	default:
834 		return -ENOTSUP;
835 	}
836 }
837 
838 void
839 rtl_hw_disable_mac_mcu_bps(struct rtl_hw *hw)
840 {
841 	u16 reg_addr;
842 
843 	rtl_enable_aspm_clkreq_lock(hw, 0);
844 
845 	switch (hw->mcfg) {
846 	case CFG_METHOD_48 ... CFG_METHOD_57:
847 	case CFG_METHOD_69 ... CFG_METHOD_71:
848 		rtl_mac_ocp_write(hw, 0xFC48, 0x0000);
849 		break;
850 	}
851 
852 	switch (hw->mcfg) {
853 	case CFG_METHOD_48 ... CFG_METHOD_57:
854 	case CFG_METHOD_69 ... CFG_METHOD_71:
855 		for (reg_addr = 0xFC28; reg_addr < 0xFC48; reg_addr += 2)
856 			rtl_mac_ocp_write(hw, reg_addr, 0x0000);
857 
858 		rte_delay_ms(3);
859 
860 		rtl_mac_ocp_write(hw, 0xFC26, 0x0000);
861 		break;
862 	}
863 }
864 
865 static void
866 rtl_switch_mac_mcu_ram_code_page(struct rtl_hw *hw, u16 page)
867 {
868 	u16 tmp_ushort;
869 
870 	page &= (BIT_1 | BIT_0);
871 	tmp_ushort = rtl_mac_ocp_read(hw, 0xE446);
872 	tmp_ushort &= ~(BIT_1 | BIT_0);
873 	tmp_ushort |= page;
874 	rtl_mac_ocp_write(hw, 0xE446, tmp_ushort);
875 }
876 
877 static void
878 _rtl_write_mac_mcu_ram_code(struct rtl_hw *hw, const u16 *entry, u16 entry_cnt)
879 {
880 	u16 i;
881 
882 	for (i = 0; i < entry_cnt; i++)
883 		rtl_mac_ocp_write(hw, 0xF800 + i * 2, entry[i]);
884 }
885 
886 static void
887 _rtl_write_mac_mcu_ram_code_with_page(struct rtl_hw *hw, const u16 *entry,
888 				      u16 entry_cnt, u16 page_size)
889 {
890 	u16 i;
891 	u16 offset;
892 	u16 page;
893 
894 	if (page_size == 0)
895 		return;
896 
897 	for (i = 0; i < entry_cnt; i++) {
898 		offset = i % page_size;
899 		if (offset == 0) {
900 			page = (i / page_size);
901 			rtl_switch_mac_mcu_ram_code_page(hw, page);
902 		}
903 		rtl_mac_ocp_write(hw, 0xF800 + offset * 2, entry[i]);
904 	}
905 }
906 
907 void
908 rtl_write_mac_mcu_ram_code(struct rtl_hw *hw, const u16 *entry, u16 entry_cnt)
909 {
910 	if (HW_SUPPORT_MAC_MCU(hw) == FALSE)
911 		return;
912 	if (entry == NULL || entry_cnt == 0)
913 		return;
914 
915 	if (hw->MacMcuPageSize > 0)
916 		_rtl_write_mac_mcu_ram_code_with_page(hw, entry, entry_cnt,
917 						      hw->MacMcuPageSize);
918 	else
919 		_rtl_write_mac_mcu_ram_code(hw, entry, entry_cnt);
920 }
921 
922 bool
923 rtl_is_speed_mode_valid(u32 speed)
924 {
925 	switch (speed) {
926 	case SPEED_5000:
927 	case SPEED_2500:
928 	case SPEED_1000:
929 	case SPEED_100:
930 	case SPEED_10:
931 		return true;
932 	default:
933 		return false;
934 	}
935 }
936 
937 static bool
938 rtl_is_duplex_mode_valid(u8 duplex)
939 {
940 	switch (duplex) {
941 	case DUPLEX_FULL:
942 	case DUPLEX_HALF:
943 		return true;
944 	default:
945 		return false;
946 	}
947 }
948 
949 static bool
950 rtl_is_autoneg_mode_valid(u32 autoneg)
951 {
952 	switch (autoneg) {
953 	case AUTONEG_ENABLE:
954 	case AUTONEG_DISABLE:
955 		return true;
956 	default:
957 		return false;
958 	}
959 }
960 
961 void
962 rtl_set_link_option(struct rtl_hw *hw, u8 autoneg, u32 speed, u8 duplex,
963 		    enum rtl_fc_mode fc)
964 {
965 	u64 adv;
966 
967 	if (!rtl_is_speed_mode_valid(speed))
968 		speed = SPEED_5000;
969 
970 	if (!rtl_is_duplex_mode_valid(duplex))
971 		duplex = DUPLEX_FULL;
972 
973 	if (!rtl_is_autoneg_mode_valid(autoneg))
974 		autoneg = AUTONEG_ENABLE;
975 
976 	speed = RTE_MIN(speed, hw->HwSuppMaxPhyLinkSpeed);
977 
978 	adv = 0;
979 	switch (speed) {
980 	case SPEED_5000:
981 		adv |= ADVERTISE_5000_FULL;
982 	/* Fall through */
983 	case SPEED_2500:
984 		adv |= ADVERTISE_2500_FULL;
985 	/* Fall through */
986 	default:
987 		adv |= (ADVERTISE_10_HALF | ADVERTISE_10_FULL |
988 			ADVERTISE_100_HALF | ADVERTISE_100_FULL |
989 			ADVERTISE_1000_HALF | ADVERTISE_1000_FULL);
990 		break;
991 	}
992 
993 	hw->autoneg = autoneg;
994 	hw->speed = speed;
995 	hw->duplex = duplex;
996 	hw->advertising = adv;
997 	hw->fcpause = fc;
998 }
999 
1000 static void
1001 rtl_init_software_variable(struct rtl_hw *hw)
1002 {
1003 	int tx_no_close_enable = 1;
1004 	unsigned int speed_mode = SPEED_5000;
1005 	unsigned int duplex_mode = DUPLEX_FULL;
1006 	unsigned int autoneg_mode = AUTONEG_ENABLE;
1007 	u8 tmp;
1008 
1009 	switch (hw->mcfg) {
1010 	case CFG_METHOD_48:
1011 	case CFG_METHOD_49:
1012 		tmp = (u8)rtl_mac_ocp_read(hw, 0xD006);
1013 		if (tmp == 0x02 || tmp == 0x04)
1014 			hw->HwSuppDashVer = 2;
1015 		break;
1016 	case CFG_METHOD_54:
1017 	case CFG_METHOD_55:
1018 		hw->HwSuppDashVer = 4;
1019 		break;
1020 	default:
1021 		hw->HwSuppDashVer = 0;
1022 		break;
1023 	}
1024 
1025 	switch (hw->mcfg) {
1026 	case CFG_METHOD_48:
1027 	case CFG_METHOD_49:
1028 		if (HW_DASH_SUPPORT_DASH(hw))
1029 			hw->HwSuppOcpChannelVer = 2;
1030 		break;
1031 	case CFG_METHOD_54:
1032 	case CFG_METHOD_55:
1033 		hw->HwSuppOcpChannelVer = 2;
1034 		break;
1035 	}
1036 
1037 	hw->AllowAccessDashOcp = rtl_is_allow_access_dash_ocp(hw);
1038 
1039 	if (HW_DASH_SUPPORT_DASH(hw) && rtl_check_dash(hw))
1040 		hw->DASH = 1;
1041 	else
1042 		hw->DASH = 0;
1043 
1044 	if (HW_DASH_SUPPORT_TYPE_2(hw))
1045 		hw->cmac_ioaddr = hw->mmio_addr;
1046 
1047 	switch (hw->mcfg) {
1048 	case CFG_METHOD_48:
1049 	case CFG_METHOD_49:
1050 		hw->chipset_name = RTL8125A;
1051 		break;
1052 	case CFG_METHOD_50:
1053 	case CFG_METHOD_51:
1054 		hw->chipset_name = RTL8125B;
1055 		break;
1056 	case CFG_METHOD_52:
1057 	case CFG_METHOD_53:
1058 		hw->chipset_name = RTL8168KB;
1059 		break;
1060 	case CFG_METHOD_54:
1061 	case CFG_METHOD_55:
1062 		hw->chipset_name = RTL8125BP;
1063 		break;
1064 	case CFG_METHOD_56:
1065 	case CFG_METHOD_57:
1066 		hw->chipset_name = RTL8125D;
1067 		break;
1068 	case CFG_METHOD_69 ... CFG_METHOD_71:
1069 		hw->chipset_name = RTL8126A;
1070 		break;
1071 	default:
1072 		hw->chipset_name = UNKNOWN;
1073 		break;
1074 	}
1075 
1076 	switch (hw->mcfg) {
1077 	case CFG_METHOD_48 ... CFG_METHOD_57:
1078 	case CFG_METHOD_69 ... CFG_METHOD_71:
1079 		hw->HwSuppNowIsOobVer = 1;
1080 	}
1081 
1082 	switch (hw->mcfg) {
1083 	case CFG_METHOD_48 ... CFG_METHOD_57:
1084 	case CFG_METHOD_69 ... CFG_METHOD_71:
1085 		hw->HwSuppCheckPhyDisableModeVer = 3;
1086 	}
1087 
1088 	switch (hw->mcfg) {
1089 	case CFG_METHOD_48 ... CFG_METHOD_51:
1090 	case CFG_METHOD_54 ... CFG_METHOD_57:
1091 		hw->HwSuppMaxPhyLinkSpeed = SPEED_2500;
1092 		break;
1093 	case CFG_METHOD_69 ... CFG_METHOD_71:
1094 		hw->HwSuppMaxPhyLinkSpeed = SPEED_5000;
1095 		break;
1096 	default:
1097 		hw->HwSuppMaxPhyLinkSpeed = SPEED_1000;
1098 		break;
1099 	}
1100 
1101 	switch (hw->mcfg) {
1102 	case CFG_METHOD_48 ... CFG_METHOD_53:
1103 		hw->HwSuppTxNoCloseVer = 3;
1104 		break;
1105 	case CFG_METHOD_54 ... CFG_METHOD_57:
1106 		hw->HwSuppTxNoCloseVer = 6;
1107 		break;
1108 	case CFG_METHOD_69:
1109 		hw->HwSuppTxNoCloseVer = 4;
1110 		break;
1111 	case CFG_METHOD_70:
1112 	case CFG_METHOD_71:
1113 		hw->HwSuppTxNoCloseVer = 5;
1114 		break;
1115 	}
1116 
1117 	switch (hw->HwSuppTxNoCloseVer) {
1118 	case 5:
1119 	case 6:
1120 		hw->MaxTxDescPtrMask = MAX_TX_NO_CLOSE_DESC_PTR_MASK_V4;
1121 		break;
1122 	case 4:
1123 		hw->MaxTxDescPtrMask = MAX_TX_NO_CLOSE_DESC_PTR_MASK_V3;
1124 		break;
1125 	case 3:
1126 		hw->MaxTxDescPtrMask = MAX_TX_NO_CLOSE_DESC_PTR_MASK_V2;
1127 		break;
1128 	default:
1129 		tx_no_close_enable = 0;
1130 		break;
1131 	}
1132 
1133 	if (hw->HwSuppTxNoCloseVer > 0 && tx_no_close_enable == 1)
1134 		hw->EnableTxNoClose = TRUE;
1135 
1136 	switch (hw->HwSuppTxNoCloseVer) {
1137 	case 4:
1138 	case 5:
1139 		hw->hw_clo_ptr_reg = HW_CLO_PTR0_8126;
1140 		hw->sw_tail_ptr_reg = SW_TAIL_PTR0_8126;
1141 		break;
1142 	case 6:
1143 		hw->hw_clo_ptr_reg = HW_CLO_PTR0_8125BP;
1144 		hw->sw_tail_ptr_reg = SW_TAIL_PTR0_8125BP;
1145 		break;
1146 	default:
1147 		hw->hw_clo_ptr_reg = HW_CLO_PTR0_8125;
1148 		hw->sw_tail_ptr_reg = SW_TAIL_PTR0_8125;
1149 		break;
1150 	}
1151 
1152 	switch (hw->mcfg) {
1153 	case CFG_METHOD_48:
1154 		hw->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_48;
1155 		break;
1156 	case CFG_METHOD_49:
1157 	case CFG_METHOD_52:
1158 		hw->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_49;
1159 		break;
1160 	case CFG_METHOD_50:
1161 		hw->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_50;
1162 		break;
1163 	case CFG_METHOD_51:
1164 	case CFG_METHOD_53:
1165 		hw->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_51;
1166 		break;
1167 	case CFG_METHOD_54:
1168 		hw->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_54;
1169 		break;
1170 	case CFG_METHOD_55:
1171 		hw->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_55;
1172 		break;
1173 	case CFG_METHOD_56:
1174 		hw->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_56;
1175 		break;
1176 	case CFG_METHOD_57:
1177 		hw->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_57;
1178 		break;
1179 	case CFG_METHOD_69:
1180 		hw->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_69;
1181 		break;
1182 	case CFG_METHOD_70:
1183 		hw->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_70;
1184 		break;
1185 	case CFG_METHOD_71:
1186 		hw->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_71;
1187 		break;
1188 	}
1189 
1190 	if (hw->HwIcVerUnknown) {
1191 		hw->NotWrRamCodeToMicroP = TRUE;
1192 		hw->NotWrMcuPatchCode = TRUE;
1193 	}
1194 
1195 	switch (hw->mcfg) {
1196 	case CFG_METHOD_48 ... CFG_METHOD_57:
1197 	case CFG_METHOD_69 ... CFG_METHOD_71:
1198 		hw->HwSuppMacMcuVer = 2;
1199 		break;
1200 	}
1201 
1202 	switch (hw->mcfg) {
1203 	case CFG_METHOD_48 ... CFG_METHOD_57:
1204 	case CFG_METHOD_69 ... CFG_METHOD_71:
1205 		hw->MacMcuPageSize = RTL_MAC_MCU_PAGE_SIZE;
1206 		break;
1207 	}
1208 
1209 	switch (hw->mcfg) {
1210 	case CFG_METHOD_49:
1211 	case CFG_METHOD_52:
1212 		if ((rtl_mac_ocp_read(hw, 0xD442) & BIT_5) &&
1213 		    (rtl_mdio_direct_read_phy_ocp(hw, 0xD068) & BIT_1))
1214 			hw->RequirePhyMdiSwapPatch = TRUE;
1215 		break;
1216 	}
1217 
1218 	switch (hw->mcfg) {
1219 	case CFG_METHOD_48:
1220 	case CFG_METHOD_49:
1221 	case CFG_METHOD_52:
1222 		hw->HwSuppIntMitiVer = 3;
1223 		break;
1224 	case CFG_METHOD_50:
1225 	case CFG_METHOD_51:
1226 	case CFG_METHOD_53:
1227 	case CFG_METHOD_69:
1228 		hw->HwSuppIntMitiVer = 4;
1229 		break;
1230 	case CFG_METHOD_54 ... CFG_METHOD_57:
1231 		hw->HwSuppIntMitiVer = 6;
1232 		break;
1233 	case CFG_METHOD_70:
1234 	case CFG_METHOD_71:
1235 		hw->HwSuppIntMitiVer = 5;
1236 		break;
1237 	}
1238 
1239 	rtl_set_link_option(hw, autoneg_mode, speed_mode, duplex_mode, rtl_fc_full);
1240 
1241 	switch (hw->mcfg) {
1242 	case CFG_METHOD_48 ... CFG_METHOD_57:
1243 	case CFG_METHOD_69 ... CFG_METHOD_71:
1244 		hw->mcu_pme_setting = rtl_mac_ocp_read(hw, 0xE00A);
1245 		break;
1246 	}
1247 
1248 	hw->mtu = RTL_DEFAULT_MTU;
1249 }
1250 
1251 static void
1252 rtl_exit_realwow(struct rtl_hw *hw)
1253 {
1254 	/* Disable realwow function */
1255 	switch (hw->mcfg) {
1256 	case CFG_METHOD_48 ... CFG_METHOD_57:
1257 	case CFG_METHOD_69 ... CFG_METHOD_71:
1258 		rtl_mac_ocp_write(hw, 0xC0BC, 0x00FF);
1259 		break;
1260 	}
1261 }
1262 
1263 static void
1264 rtl_disable_now_is_oob(struct rtl_hw *hw)
1265 {
1266 	if (hw->HwSuppNowIsOobVer == 1)
1267 		RTL_W8(hw, MCUCmd_reg, RTL_R8(hw, MCUCmd_reg) & ~Now_is_oob);
1268 }
1269 
1270 static void
1271 rtl_wait_ll_share_fifo_ready(struct rtl_hw *hw)
1272 {
1273 	int i;
1274 
1275 	for (i = 0; i < 10; i++) {
1276 		rte_delay_us(100);
1277 		if (RTL_R16(hw, 0xD2) & BIT_9)
1278 			break;
1279 	}
1280 }
1281 
1282 static void
1283 rtl_exit_oob(struct rtl_hw *hw)
1284 {
1285 	u16 data16;
1286 
1287 	rtl_disable_rx_packet_filter(hw);
1288 
1289 	if (HW_DASH_SUPPORT_DASH(hw)) {
1290 		rtl8125_driver_start(hw);
1291 		rtl8125_dash2_disable_txrx(hw);
1292 	}
1293 
1294 	rtl_exit_realwow(hw);
1295 
1296 	rtl_nic_reset(hw);
1297 
1298 	switch (hw->mcfg) {
1299 	case CFG_METHOD_48 ... CFG_METHOD_57:
1300 	case CFG_METHOD_69 ... CFG_METHOD_71:
1301 		rtl_disable_now_is_oob(hw);
1302 
1303 		data16 = rtl_mac_ocp_read(hw, 0xE8DE) & ~BIT_14;
1304 		rtl_mac_ocp_write(hw, 0xE8DE, data16);
1305 		rtl_wait_ll_share_fifo_ready(hw);
1306 
1307 		rtl_mac_ocp_write(hw, 0xC0AA, 0x07D0);
1308 
1309 		rtl_mac_ocp_write(hw, 0xC0A6, 0x01B5);
1310 
1311 		rtl_mac_ocp_write(hw, 0xC01E, 0x5555);
1312 
1313 		rtl_wait_ll_share_fifo_ready(hw);
1314 		break;
1315 	}
1316 }
1317 
1318 static void
1319 rtl_disable_ups(struct rtl_hw *hw)
1320 {
1321 	switch (hw->mcfg) {
1322 	case CFG_METHOD_48 ... CFG_METHOD_57:
1323 	case CFG_METHOD_69 ... CFG_METHOD_71:
1324 		rtl_mac_ocp_write(hw, 0xD40A, rtl_mac_ocp_read(hw, 0xD40A) & ~BIT_4);
1325 		break;
1326 	}
1327 }
1328 
1329 static void
1330 rtl8125_disable_ocp_phy_power_saving(struct rtl_hw *hw)
1331 {
1332 	u16 val;
1333 
1334 	if (hw->mcfg == CFG_METHOD_48 || hw->mcfg == CFG_METHOD_49 ||
1335 	    hw->mcfg == CFG_METHOD_52) {
1336 		val = rtl_mdio_direct_read_phy_ocp(hw, 0xC416);
1337 		if (val != 0x0050) {
1338 			rtl_set_phy_mcu_patch_request(hw);
1339 			rtl_mdio_direct_write_phy_ocp(hw, 0xC416, 0x0000);
1340 			rtl_mdio_direct_write_phy_ocp(hw, 0xC416, 0x0500);
1341 			rtl_clear_phy_mcu_patch_request(hw);
1342 		}
1343 	}
1344 }
1345 
1346 static void
1347 rtl_hw_init(struct rtl_hw *hw)
1348 {
1349 	switch (hw->mcfg) {
1350 	case CFG_METHOD_48 ... CFG_METHOD_57:
1351 	case CFG_METHOD_69 ... CFG_METHOD_71:
1352 		rtl_enable_aspm_clkreq_lock(hw, 0);
1353 		rtl_enable_force_clkreq(hw, 0);
1354 		break;
1355 	}
1356 
1357 	rtl_disable_ups(hw);
1358 
1359 	hw->hw_ops.hw_mac_mcu_config(hw);
1360 
1361 	/* Disable ocp phy power saving */
1362 	rtl8125_disable_ocp_phy_power_saving(hw);
1363 }
1364 
1365 void
1366 rtl_hw_initialize(struct rtl_hw *hw)
1367 {
1368 	rtl_init_software_variable(hw);
1369 
1370 	rtl_exit_oob(hw);
1371 
1372 	rtl_hw_init(hw);
1373 
1374 	rtl_nic_reset(hw);
1375 }
1376 
1377 void
1378 rtl_get_mac_version(struct rtl_hw *hw, struct rte_pci_device *pci_dev)
1379 {
1380 	u32 reg, val32;
1381 	u32 ic_version_id;
1382 
1383 	val32 = RTL_R32(hw, TxConfig);
1384 	reg = val32 & 0x7c800000;
1385 	ic_version_id = val32 & 0x00700000;
1386 
1387 	switch (reg) {
1388 	case 0x60800000:
1389 		if (ic_version_id == 0x00000000) {
1390 			hw->mcfg = CFG_METHOD_48;
1391 
1392 		} else if (ic_version_id == 0x100000) {
1393 			hw->mcfg = CFG_METHOD_49;
1394 
1395 		} else {
1396 			hw->mcfg = CFG_METHOD_49;
1397 			hw->HwIcVerUnknown = TRUE;
1398 		}
1399 
1400 		hw->efuse_ver = EFUSE_SUPPORT_V4;
1401 		break;
1402 	case 0x64000000:
1403 		if (ic_version_id == 0x00000000) {
1404 			hw->mcfg = CFG_METHOD_50;
1405 
1406 		} else if (ic_version_id == 0x100000) {
1407 			hw->mcfg = CFG_METHOD_51;
1408 
1409 		} else {
1410 			hw->mcfg = CFG_METHOD_51;
1411 			hw->HwIcVerUnknown = TRUE;
1412 		}
1413 
1414 		hw->efuse_ver = EFUSE_SUPPORT_V4;
1415 		break;
1416 	case 0x68000000:
1417 		if (ic_version_id == 0x00000000) {
1418 			hw->mcfg = CFG_METHOD_54;
1419 		} else if (ic_version_id == 0x100000) {
1420 			hw->mcfg = CFG_METHOD_55;
1421 		} else {
1422 			hw->mcfg = CFG_METHOD_55;
1423 			hw->HwIcVerUnknown = TRUE;
1424 		}
1425 
1426 		hw->efuse_ver = EFUSE_SUPPORT_V4;
1427 		break;
1428 	case 0x68800000:
1429 		if (ic_version_id == 0x00000000) {
1430 			hw->mcfg = CFG_METHOD_56;
1431 		} else if (ic_version_id == 0x100000) {
1432 			hw->mcfg = CFG_METHOD_57;
1433 		} else {
1434 			hw->mcfg = CFG_METHOD_57;
1435 			hw->HwIcVerUnknown = TRUE;
1436 		}
1437 
1438 		hw->efuse_ver = EFUSE_SUPPORT_V4;
1439 		break;
1440 	case 0x64800000:
1441 		if (ic_version_id == 0x00000000) {
1442 			hw->mcfg = CFG_METHOD_69;
1443 		} else if (ic_version_id == 0x100000) {
1444 			hw->mcfg = CFG_METHOD_70;
1445 		} else if (ic_version_id == 0x200000) {
1446 			hw->mcfg = CFG_METHOD_71;
1447 		} else {
1448 			hw->mcfg = CFG_METHOD_71;
1449 			hw->HwIcVerUnknown = TRUE;
1450 		}
1451 
1452 		hw->efuse_ver = EFUSE_SUPPORT_V4;
1453 		break;
1454 	default:
1455 		PMD_INIT_LOG(NOTICE, "unknown chip version (%x)", reg);
1456 		hw->mcfg = CFG_METHOD_DEFAULT;
1457 		hw->HwIcVerUnknown = TRUE;
1458 		hw->efuse_ver = EFUSE_NOT_SUPPORT;
1459 		break;
1460 	}
1461 
1462 	if (pci_dev->id.device_id == 0x8162) {
1463 		if (hw->mcfg == CFG_METHOD_49)
1464 			hw->mcfg = CFG_METHOD_52;
1465 		else if (hw->mcfg == CFG_METHOD_51)
1466 			hw->mcfg = CFG_METHOD_53;
1467 	}
1468 }
1469 
1470 int
1471 rtl_get_mac_address(struct rtl_hw *hw, struct rte_ether_addr *ea)
1472 {
1473 	u8 mac_addr[MAC_ADDR_LEN];
1474 
1475 	switch (hw->mcfg) {
1476 	case CFG_METHOD_48 ... CFG_METHOD_57:
1477 	case CFG_METHOD_69 ... CFG_METHOD_71:
1478 		*(u32 *)&mac_addr[0] = RTL_R32(hw, BACKUP_ADDR0_8125);
1479 		*(u16 *)&mac_addr[4] = RTL_R16(hw, BACKUP_ADDR1_8125);
1480 		break;
1481 	default:
1482 		break;
1483 	}
1484 
1485 	rte_ether_addr_copy((struct rte_ether_addr *)mac_addr, ea);
1486 
1487 	return 0;
1488 }
1489 
1490 void
1491 rtl_rar_set(struct rtl_hw *hw, uint8_t *addr)
1492 {
1493 	uint32_t rar_low = 0;
1494 	uint32_t rar_high = 0;
1495 
1496 	rar_low = ((uint32_t)addr[0] | ((uint32_t)addr[1] << 8) |
1497 		   ((uint32_t)addr[2] << 16) | ((uint32_t)addr[3] << 24));
1498 
1499 	rar_high = ((uint32_t)addr[4] | ((uint32_t)addr[5] << 8));
1500 
1501 	rtl_enable_cfg9346_write(hw);
1502 
1503 	RTL_W32(hw, MAC0, rar_low);
1504 	RTL_W32(hw, MAC4, rar_high);
1505 
1506 	rtl_disable_cfg9346_write(hw);
1507 }
1508 
1509 void
1510 rtl_get_tally_stats(struct rtl_hw *hw, struct rte_eth_stats *rte_stats)
1511 {
1512 	struct rtl_counters *counters;
1513 	uint64_t paddr;
1514 	u32 cmd;
1515 	u32 wait_cnt;
1516 
1517 	counters = hw->tally_vaddr;
1518 	paddr = hw->tally_paddr;
1519 	if (!counters)
1520 		return;
1521 
1522 	RTL_W32(hw, CounterAddrHigh, (u64)paddr >> 32);
1523 	cmd = (u64)paddr & DMA_BIT_MASK(32);
1524 	RTL_W32(hw, CounterAddrLow, cmd);
1525 	RTL_W32(hw, CounterAddrLow, cmd | CounterDump);
1526 
1527 	wait_cnt = 0;
1528 	while (RTL_R32(hw, CounterAddrLow) & CounterDump) {
1529 		rte_delay_us(10);
1530 
1531 		wait_cnt++;
1532 		if (wait_cnt > 20)
1533 			break;
1534 	}
1535 
1536 	/* RX errors */
1537 	rte_stats->imissed = rte_le_to_cpu_64(counters->rx_missed);
1538 	rte_stats->ierrors = rte_le_to_cpu_64(counters->rx_errors);
1539 
1540 	/* TX errors */
1541 	rte_stats->oerrors = rte_le_to_cpu_64(counters->tx_errors);
1542 
1543 	rte_stats->ipackets = rte_le_to_cpu_64(counters->rx_packets);
1544 	rte_stats->opackets = rte_le_to_cpu_64(counters->tx_packets);
1545 }
1546 
1547 void
1548 rtl_clear_tally_stats(struct rtl_hw *hw)
1549 {
1550 	if (!hw->tally_paddr)
1551 		return;
1552 
1553 	RTL_W32(hw, CounterAddrHigh, (u64)hw->tally_paddr >> 32);
1554 	RTL_W32(hw, CounterAddrLow,
1555 		((u64)hw->tally_paddr & (DMA_BIT_MASK(32))) | CounterReset);
1556 }
1557 
1558 int
1559 rtl_tally_init(struct rte_eth_dev *dev)
1560 {
1561 	struct rtl_adapter *adapter = RTL_DEV_PRIVATE(dev);
1562 	struct rtl_hw *hw = &adapter->hw;
1563 	const struct rte_memzone *mz;
1564 
1565 	mz = rte_eth_dma_zone_reserve(dev, "tally_counters", 0,
1566 				      sizeof(struct rtl_counters), 64, rte_socket_id());
1567 	if (mz == NULL)
1568 		return -ENOMEM;
1569 
1570 	hw->tally_vaddr = mz->addr;
1571 	hw->tally_paddr = mz->iova;
1572 
1573 	/* Fill tally addrs */
1574 	RTL_W32(hw, CounterAddrHigh, (u64)hw->tally_paddr >> 32);
1575 	RTL_W32(hw, CounterAddrLow, (u64)hw->tally_paddr & (DMA_BIT_MASK(32)));
1576 
1577 	/* Reset the hw statistics */
1578 	rtl_clear_tally_stats(hw);
1579 
1580 	return 0;
1581 }
1582 
1583 void
1584 rtl_tally_free(struct rte_eth_dev *dev)
1585 {
1586 	rte_eth_dma_zone_free(dev, "tally_counters", 0);
1587 }
1588