1*b843c749SSergey Zigachev /*
2*b843c749SSergey Zigachev * Copyright 2016 Advanced Micro Devices, Inc.
3*b843c749SSergey Zigachev *
4*b843c749SSergey Zigachev * Permission is hereby granted, free of charge, to any person obtaining a
5*b843c749SSergey Zigachev * copy of this software and associated documentation files (the "Software"),
6*b843c749SSergey Zigachev * to deal in the Software without restriction, including without limitation
7*b843c749SSergey Zigachev * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*b843c749SSergey Zigachev * and/or sell copies of the Software, and to permit persons to whom the
9*b843c749SSergey Zigachev * Software is furnished to do so, subject to the following conditions:
10*b843c749SSergey Zigachev *
11*b843c749SSergey Zigachev * The above copyright notice and this permission notice shall be included in
12*b843c749SSergey Zigachev * all copies or substantial portions of the Software.
13*b843c749SSergey Zigachev *
14*b843c749SSergey Zigachev * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*b843c749SSergey Zigachev * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*b843c749SSergey Zigachev * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17*b843c749SSergey Zigachev * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*b843c749SSergey Zigachev * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*b843c749SSergey Zigachev * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*b843c749SSergey Zigachev * OTHER DEALINGS IN THE SOFTWARE.
21*b843c749SSergey Zigachev *
22*b843c749SSergey Zigachev */
23*b843c749SSergey Zigachev #include "amdgpu.h"
24*b843c749SSergey Zigachev #include "amdgpu_atombios.h"
25*b843c749SSergey Zigachev #include "nbio_v6_1.h"
26*b843c749SSergey Zigachev
27*b843c749SSergey Zigachev #include "nbio/nbio_6_1_default.h"
28*b843c749SSergey Zigachev #include "nbio/nbio_6_1_offset.h"
29*b843c749SSergey Zigachev #include "nbio/nbio_6_1_sh_mask.h"
30*b843c749SSergey Zigachev #include "vega10_enum.h"
31*b843c749SSergey Zigachev
32*b843c749SSergey Zigachev #define smnCPM_CONTROL 0x11180460
33*b843c749SSergey Zigachev #define smnPCIE_CNTL2 0x11180070
34*b843c749SSergey Zigachev #define smnPCIE_CONFIG_CNTL 0x11180044
35*b843c749SSergey Zigachev
nbio_v6_1_get_rev_id(struct amdgpu_device * adev)36*b843c749SSergey Zigachev static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
37*b843c749SSergey Zigachev {
38*b843c749SSergey Zigachev u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
39*b843c749SSergey Zigachev
40*b843c749SSergey Zigachev tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
41*b843c749SSergey Zigachev tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
42*b843c749SSergey Zigachev
43*b843c749SSergey Zigachev return tmp;
44*b843c749SSergey Zigachev }
45*b843c749SSergey Zigachev
nbio_v6_1_mc_access_enable(struct amdgpu_device * adev,bool enable)46*b843c749SSergey Zigachev static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
47*b843c749SSergey Zigachev {
48*b843c749SSergey Zigachev if (enable)
49*b843c749SSergey Zigachev WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
50*b843c749SSergey Zigachev BIF_FB_EN__FB_READ_EN_MASK |
51*b843c749SSergey Zigachev BIF_FB_EN__FB_WRITE_EN_MASK);
52*b843c749SSergey Zigachev else
53*b843c749SSergey Zigachev WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
54*b843c749SSergey Zigachev }
55*b843c749SSergey Zigachev
nbio_v6_1_hdp_flush(struct amdgpu_device * adev,struct amdgpu_ring * ring)56*b843c749SSergey Zigachev static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev,
57*b843c749SSergey Zigachev struct amdgpu_ring *ring)
58*b843c749SSergey Zigachev {
59*b843c749SSergey Zigachev if (!ring || !ring->funcs->emit_wreg)
60*b843c749SSergey Zigachev WREG32_SOC15_NO_KIQ(NBIO, 0,
61*b843c749SSergey Zigachev mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL,
62*b843c749SSergey Zigachev 0);
63*b843c749SSergey Zigachev else
64*b843c749SSergey Zigachev amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
65*b843c749SSergey Zigachev NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
66*b843c749SSergey Zigachev }
67*b843c749SSergey Zigachev
nbio_v6_1_get_memsize(struct amdgpu_device * adev)68*b843c749SSergey Zigachev static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
69*b843c749SSergey Zigachev {
70*b843c749SSergey Zigachev return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE);
71*b843c749SSergey Zigachev }
72*b843c749SSergey Zigachev
nbio_v6_1_sdma_doorbell_range(struct amdgpu_device * adev,int instance,bool use_doorbell,int doorbell_index)73*b843c749SSergey Zigachev static void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
74*b843c749SSergey Zigachev bool use_doorbell, int doorbell_index)
75*b843c749SSergey Zigachev {
76*b843c749SSergey Zigachev u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
77*b843c749SSergey Zigachev SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
78*b843c749SSergey Zigachev
79*b843c749SSergey Zigachev u32 doorbell_range = RREG32(reg);
80*b843c749SSergey Zigachev
81*b843c749SSergey Zigachev if (use_doorbell) {
82*b843c749SSergey Zigachev doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
83*b843c749SSergey Zigachev doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2);
84*b843c749SSergey Zigachev } else
85*b843c749SSergey Zigachev doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
86*b843c749SSergey Zigachev
87*b843c749SSergey Zigachev WREG32(reg, doorbell_range);
88*b843c749SSergey Zigachev
89*b843c749SSergey Zigachev }
90*b843c749SSergey Zigachev
nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device * adev,bool enable)91*b843c749SSergey Zigachev static void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
92*b843c749SSergey Zigachev bool enable)
93*b843c749SSergey Zigachev {
94*b843c749SSergey Zigachev WREG32_FIELD15(NBIO, 0, RCC_PF_0_0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
95*b843c749SSergey Zigachev }
96*b843c749SSergey Zigachev
nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device * adev,bool enable)97*b843c749SSergey Zigachev static void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
98*b843c749SSergey Zigachev bool enable)
99*b843c749SSergey Zigachev {
100*b843c749SSergey Zigachev u32 tmp = 0;
101*b843c749SSergey Zigachev
102*b843c749SSergey Zigachev if (enable) {
103*b843c749SSergey Zigachev tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
104*b843c749SSergey Zigachev REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
105*b843c749SSergey Zigachev REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
106*b843c749SSergey Zigachev
107*b843c749SSergey Zigachev WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
108*b843c749SSergey Zigachev lower_32_bits(adev->doorbell.base));
109*b843c749SSergey Zigachev WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
110*b843c749SSergey Zigachev upper_32_bits(adev->doorbell.base));
111*b843c749SSergey Zigachev }
112*b843c749SSergey Zigachev
113*b843c749SSergey Zigachev WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, tmp);
114*b843c749SSergey Zigachev }
115*b843c749SSergey Zigachev
116*b843c749SSergey Zigachev
nbio_v6_1_ih_doorbell_range(struct amdgpu_device * adev,bool use_doorbell,int doorbell_index)117*b843c749SSergey Zigachev static void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
118*b843c749SSergey Zigachev bool use_doorbell, int doorbell_index)
119*b843c749SSergey Zigachev {
120*b843c749SSergey Zigachev u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
121*b843c749SSergey Zigachev
122*b843c749SSergey Zigachev if (use_doorbell) {
123*b843c749SSergey Zigachev ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
124*b843c749SSergey Zigachev ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
125*b843c749SSergey Zigachev } else
126*b843c749SSergey Zigachev ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
127*b843c749SSergey Zigachev
128*b843c749SSergey Zigachev WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
129*b843c749SSergey Zigachev }
130*b843c749SSergey Zigachev
nbio_v6_1_ih_control(struct amdgpu_device * adev)131*b843c749SSergey Zigachev static void nbio_v6_1_ih_control(struct amdgpu_device *adev)
132*b843c749SSergey Zigachev {
133*b843c749SSergey Zigachev u32 interrupt_cntl;
134*b843c749SSergey Zigachev
135*b843c749SSergey Zigachev /* setup interrupt control */
136*b843c749SSergey Zigachev WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
137*b843c749SSergey Zigachev interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
138*b843c749SSergey Zigachev /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
139*b843c749SSergey Zigachev * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
140*b843c749SSergey Zigachev */
141*b843c749SSergey Zigachev interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
142*b843c749SSergey Zigachev /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
143*b843c749SSergey Zigachev interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
144*b843c749SSergey Zigachev WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
145*b843c749SSergey Zigachev }
146*b843c749SSergey Zigachev
nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device * adev,bool enable)147*b843c749SSergey Zigachev static void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
148*b843c749SSergey Zigachev bool enable)
149*b843c749SSergey Zigachev {
150*b843c749SSergey Zigachev uint32_t def, data;
151*b843c749SSergey Zigachev
152*b843c749SSergey Zigachev def = data = RREG32_PCIE(smnCPM_CONTROL);
153*b843c749SSergey Zigachev if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) {
154*b843c749SSergey Zigachev data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
155*b843c749SSergey Zigachev CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
156*b843c749SSergey Zigachev CPM_CONTROL__TXCLK_PERM_GATE_ENABLE_MASK |
157*b843c749SSergey Zigachev CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
158*b843c749SSergey Zigachev CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
159*b843c749SSergey Zigachev CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
160*b843c749SSergey Zigachev CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
161*b843c749SSergey Zigachev } else {
162*b843c749SSergey Zigachev data &= ~(CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
163*b843c749SSergey Zigachev CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
164*b843c749SSergey Zigachev CPM_CONTROL__TXCLK_PERM_GATE_ENABLE_MASK |
165*b843c749SSergey Zigachev CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
166*b843c749SSergey Zigachev CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
167*b843c749SSergey Zigachev CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
168*b843c749SSergey Zigachev CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
169*b843c749SSergey Zigachev }
170*b843c749SSergey Zigachev
171*b843c749SSergey Zigachev if (def != data)
172*b843c749SSergey Zigachev WREG32_PCIE(smnCPM_CONTROL, data);
173*b843c749SSergey Zigachev }
174*b843c749SSergey Zigachev
nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device * adev,bool enable)175*b843c749SSergey Zigachev static void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
176*b843c749SSergey Zigachev bool enable)
177*b843c749SSergey Zigachev {
178*b843c749SSergey Zigachev uint32_t def, data;
179*b843c749SSergey Zigachev
180*b843c749SSergey Zigachev def = data = RREG32_PCIE(smnPCIE_CNTL2);
181*b843c749SSergey Zigachev if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
182*b843c749SSergey Zigachev data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
183*b843c749SSergey Zigachev PCIE_CNTL2__MST_MEM_LS_EN_MASK |
184*b843c749SSergey Zigachev PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
185*b843c749SSergey Zigachev } else {
186*b843c749SSergey Zigachev data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
187*b843c749SSergey Zigachev PCIE_CNTL2__MST_MEM_LS_EN_MASK |
188*b843c749SSergey Zigachev PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
189*b843c749SSergey Zigachev }
190*b843c749SSergey Zigachev
191*b843c749SSergey Zigachev if (def != data)
192*b843c749SSergey Zigachev WREG32_PCIE(smnPCIE_CNTL2, data);
193*b843c749SSergey Zigachev }
194*b843c749SSergey Zigachev
nbio_v6_1_get_clockgating_state(struct amdgpu_device * adev,u32 * flags)195*b843c749SSergey Zigachev static void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev,
196*b843c749SSergey Zigachev u32 *flags)
197*b843c749SSergey Zigachev {
198*b843c749SSergey Zigachev int data;
199*b843c749SSergey Zigachev
200*b843c749SSergey Zigachev /* AMD_CG_SUPPORT_BIF_MGCG */
201*b843c749SSergey Zigachev data = RREG32_PCIE(smnCPM_CONTROL);
202*b843c749SSergey Zigachev if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
203*b843c749SSergey Zigachev *flags |= AMD_CG_SUPPORT_BIF_MGCG;
204*b843c749SSergey Zigachev
205*b843c749SSergey Zigachev /* AMD_CG_SUPPORT_BIF_LS */
206*b843c749SSergey Zigachev data = RREG32_PCIE(smnPCIE_CNTL2);
207*b843c749SSergey Zigachev if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
208*b843c749SSergey Zigachev *flags |= AMD_CG_SUPPORT_BIF_LS;
209*b843c749SSergey Zigachev }
210*b843c749SSergey Zigachev
nbio_v6_1_get_hdp_flush_req_offset(struct amdgpu_device * adev)211*b843c749SSergey Zigachev static u32 nbio_v6_1_get_hdp_flush_req_offset(struct amdgpu_device *adev)
212*b843c749SSergey Zigachev {
213*b843c749SSergey Zigachev return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
214*b843c749SSergey Zigachev }
215*b843c749SSergey Zigachev
nbio_v6_1_get_hdp_flush_done_offset(struct amdgpu_device * adev)216*b843c749SSergey Zigachev static u32 nbio_v6_1_get_hdp_flush_done_offset(struct amdgpu_device *adev)
217*b843c749SSergey Zigachev {
218*b843c749SSergey Zigachev return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
219*b843c749SSergey Zigachev }
220*b843c749SSergey Zigachev
nbio_v6_1_get_pcie_index_offset(struct amdgpu_device * adev)221*b843c749SSergey Zigachev static u32 nbio_v6_1_get_pcie_index_offset(struct amdgpu_device *adev)
222*b843c749SSergey Zigachev {
223*b843c749SSergey Zigachev return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
224*b843c749SSergey Zigachev }
225*b843c749SSergey Zigachev
nbio_v6_1_get_pcie_data_offset(struct amdgpu_device * adev)226*b843c749SSergey Zigachev static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev)
227*b843c749SSergey Zigachev {
228*b843c749SSergey Zigachev return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
229*b843c749SSergey Zigachev }
230*b843c749SSergey Zigachev
231*b843c749SSergey Zigachev static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
232*b843c749SSergey Zigachev .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
233*b843c749SSergey Zigachev .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
234*b843c749SSergey Zigachev .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
235*b843c749SSergey Zigachev .ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK,
236*b843c749SSergey Zigachev .ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK,
237*b843c749SSergey Zigachev .ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK,
238*b843c749SSergey Zigachev .ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK,
239*b843c749SSergey Zigachev .ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK,
240*b843c749SSergey Zigachev .ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK,
241*b843c749SSergey Zigachev .ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK,
242*b843c749SSergey Zigachev .ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
243*b843c749SSergey Zigachev .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
244*b843c749SSergey Zigachev };
245*b843c749SSergey Zigachev
nbio_v6_1_detect_hw_virt(struct amdgpu_device * adev)246*b843c749SSergey Zigachev static void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
247*b843c749SSergey Zigachev {
248*b843c749SSergey Zigachev uint32_t reg;
249*b843c749SSergey Zigachev
250*b843c749SSergey Zigachev reg = RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_IOV_FUNC_IDENTIFIER);
251*b843c749SSergey Zigachev if (reg & 1)
252*b843c749SSergey Zigachev adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
253*b843c749SSergey Zigachev
254*b843c749SSergey Zigachev if (reg & 0x80000000)
255*b843c749SSergey Zigachev adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
256*b843c749SSergey Zigachev
257*b843c749SSergey Zigachev if (!reg) {
258*b843c749SSergey Zigachev if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
259*b843c749SSergey Zigachev adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
260*b843c749SSergey Zigachev }
261*b843c749SSergey Zigachev }
262*b843c749SSergey Zigachev
nbio_v6_1_init_registers(struct amdgpu_device * adev)263*b843c749SSergey Zigachev static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
264*b843c749SSergey Zigachev {
265*b843c749SSergey Zigachev uint32_t def, data;
266*b843c749SSergey Zigachev
267*b843c749SSergey Zigachev def = data = RREG32_PCIE(smnPCIE_CONFIG_CNTL);
268*b843c749SSergey Zigachev data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
269*b843c749SSergey Zigachev data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
270*b843c749SSergey Zigachev
271*b843c749SSergey Zigachev if (def != data)
272*b843c749SSergey Zigachev WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
273*b843c749SSergey Zigachev }
274*b843c749SSergey Zigachev
275*b843c749SSergey Zigachev const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
276*b843c749SSergey Zigachev .hdp_flush_reg = &nbio_v6_1_hdp_flush_reg,
277*b843c749SSergey Zigachev .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
278*b843c749SSergey Zigachev .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
279*b843c749SSergey Zigachev .get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset,
280*b843c749SSergey Zigachev .get_pcie_data_offset = nbio_v6_1_get_pcie_data_offset,
281*b843c749SSergey Zigachev .get_rev_id = nbio_v6_1_get_rev_id,
282*b843c749SSergey Zigachev .mc_access_enable = nbio_v6_1_mc_access_enable,
283*b843c749SSergey Zigachev .hdp_flush = nbio_v6_1_hdp_flush,
284*b843c749SSergey Zigachev .get_memsize = nbio_v6_1_get_memsize,
285*b843c749SSergey Zigachev .sdma_doorbell_range = nbio_v6_1_sdma_doorbell_range,
286*b843c749SSergey Zigachev .enable_doorbell_aperture = nbio_v6_1_enable_doorbell_aperture,
287*b843c749SSergey Zigachev .enable_doorbell_selfring_aperture = nbio_v6_1_enable_doorbell_selfring_aperture,
288*b843c749SSergey Zigachev .ih_doorbell_range = nbio_v6_1_ih_doorbell_range,
289*b843c749SSergey Zigachev .update_medium_grain_clock_gating = nbio_v6_1_update_medium_grain_clock_gating,
290*b843c749SSergey Zigachev .update_medium_grain_light_sleep = nbio_v6_1_update_medium_grain_light_sleep,
291*b843c749SSergey Zigachev .get_clockgating_state = nbio_v6_1_get_clockgating_state,
292*b843c749SSergey Zigachev .ih_control = nbio_v6_1_ih_control,
293*b843c749SSergey Zigachev .init_registers = nbio_v6_1_init_registers,
294*b843c749SSergey Zigachev .detect_hw_virt = nbio_v6_1_detect_hw_virt,
295*b843c749SSergey Zigachev };
296