1 /* $NetBSD: radeon_r520.c,v 1.2 2021/12/18 23:45:43 riastradh Exp $ */
2
3 /*
4 * Copyright 2008 Advanced Micro Devices, Inc.
5 * Copyright 2008 Red Hat Inc.
6 * Copyright 2009 Jerome Glisse.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 *
26 * Authors: Dave Airlie
27 * Alex Deucher
28 * Jerome Glisse
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: radeon_r520.c,v 1.2 2021/12/18 23:45:43 riastradh Exp $");
33
34 #include "radeon.h"
35 #include "radeon_asic.h"
36 #include "atom.h"
37 #include "r520d.h"
38
39 /* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */
40
r520_mc_wait_for_idle(struct radeon_device * rdev)41 int r520_mc_wait_for_idle(struct radeon_device *rdev)
42 {
43 unsigned i;
44 uint32_t tmp;
45
46 for (i = 0; i < rdev->usec_timeout; i++) {
47 /* read MC_STATUS */
48 tmp = RREG32_MC(R520_MC_STATUS);
49 if (tmp & R520_MC_STATUS_IDLE) {
50 return 0;
51 }
52 udelay(1);
53 }
54 return -1;
55 }
56
r520_gpu_init(struct radeon_device * rdev)57 static void r520_gpu_init(struct radeon_device *rdev)
58 {
59 unsigned pipe_select_current, gb_pipe_select, tmp;
60
61 rv515_vga_render_disable(rdev);
62 /*
63 * DST_PIPE_CONFIG 0x170C
64 * GB_TILE_CONFIG 0x4018
65 * GB_FIFO_SIZE 0x4024
66 * GB_PIPE_SELECT 0x402C
67 * GB_PIPE_SELECT2 0x4124
68 * Z_PIPE_SHIFT 0
69 * Z_PIPE_MASK 0x000000003
70 * GB_FIFO_SIZE2 0x4128
71 * SC_SFIFO_SIZE_SHIFT 0
72 * SC_SFIFO_SIZE_MASK 0x000000003
73 * SC_MFIFO_SIZE_SHIFT 2
74 * SC_MFIFO_SIZE_MASK 0x00000000C
75 * FG_SFIFO_SIZE_SHIFT 4
76 * FG_SFIFO_SIZE_MASK 0x000000030
77 * ZB_MFIFO_SIZE_SHIFT 6
78 * ZB_MFIFO_SIZE_MASK 0x0000000C0
79 * GA_ENHANCE 0x4274
80 * SU_REG_DEST 0x42C8
81 */
82 /* workaround for RV530 */
83 if (rdev->family == CHIP_RV530) {
84 WREG32(0x4128, 0xFF);
85 }
86 r420_pipes_init(rdev);
87 gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
88 tmp = RREG32(R300_DST_PIPE_CONFIG);
89 pipe_select_current = (tmp >> 2) & 3;
90 tmp = (1 << pipe_select_current) |
91 (((gb_pipe_select >> 8) & 0xF) << 4);
92 WREG32_PLL(0x000D, tmp);
93 if (r520_mc_wait_for_idle(rdev)) {
94 pr_warn("Failed to wait MC idle while programming pipes. Bad things might happen.\n");
95 }
96 }
97
r520_vram_get_type(struct radeon_device * rdev)98 static void r520_vram_get_type(struct radeon_device *rdev)
99 {
100 uint32_t tmp;
101
102 rdev->mc.vram_width = 128;
103 rdev->mc.vram_is_ddr = true;
104 tmp = RREG32_MC(R520_MC_CNTL0);
105 switch ((tmp & R520_MEM_NUM_CHANNELS_MASK) >> R520_MEM_NUM_CHANNELS_SHIFT) {
106 case 0:
107 rdev->mc.vram_width = 32;
108 break;
109 case 1:
110 rdev->mc.vram_width = 64;
111 break;
112 case 2:
113 rdev->mc.vram_width = 128;
114 break;
115 case 3:
116 rdev->mc.vram_width = 256;
117 break;
118 default:
119 rdev->mc.vram_width = 128;
120 break;
121 }
122 if (tmp & R520_MC_CHANNEL_SIZE)
123 rdev->mc.vram_width *= 2;
124 }
125
r520_mc_init(struct radeon_device * rdev)126 static void r520_mc_init(struct radeon_device *rdev)
127 {
128
129 r520_vram_get_type(rdev);
130 r100_vram_init_sizes(rdev);
131 radeon_vram_location(rdev, &rdev->mc, 0);
132 rdev->mc.gtt_base_align = 0;
133 if (!(rdev->flags & RADEON_IS_AGP))
134 radeon_gtt_location(rdev, &rdev->mc);
135 radeon_update_bandwidth_info(rdev);
136 }
137
r520_mc_program(struct radeon_device * rdev)138 static void r520_mc_program(struct radeon_device *rdev)
139 {
140 struct rv515_mc_save save;
141
142 /* Stops all mc clients */
143 rv515_mc_stop(rdev, &save);
144
145 /* Wait for mc idle */
146 if (r520_mc_wait_for_idle(rdev))
147 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
148 /* Write VRAM size in case we are limiting it */
149 WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
150 /* Program MC, should be a 32bits limited address space */
151 WREG32_MC(R_000004_MC_FB_LOCATION,
152 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
153 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
154 WREG32(R_000134_HDP_FB_LOCATION,
155 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
156 if (rdev->flags & RADEON_IS_AGP) {
157 WREG32_MC(R_000005_MC_AGP_LOCATION,
158 S_000005_MC_AGP_START(rdev->mc.gtt_start >> 16) |
159 S_000005_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
160 WREG32_MC(R_000006_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
161 WREG32_MC(R_000007_AGP_BASE_2,
162 S_000007_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
163 } else {
164 WREG32_MC(R_000005_MC_AGP_LOCATION, 0xFFFFFFFF);
165 WREG32_MC(R_000006_AGP_BASE, 0);
166 WREG32_MC(R_000007_AGP_BASE_2, 0);
167 }
168
169 rv515_mc_resume(rdev, &save);
170 }
171
r520_startup(struct radeon_device * rdev)172 static int r520_startup(struct radeon_device *rdev)
173 {
174 int r;
175
176 r520_mc_program(rdev);
177 /* Resume clock */
178 rv515_clock_startup(rdev);
179 /* Initialize GPU configuration (# pipes, ...) */
180 r520_gpu_init(rdev);
181 /* Initialize GART (initialize after TTM so we can allocate
182 * memory through TTM but finalize after TTM) */
183 if (rdev->flags & RADEON_IS_PCIE) {
184 r = rv370_pcie_gart_enable(rdev);
185 if (r)
186 return r;
187 }
188
189 /* allocate wb buffer */
190 r = radeon_wb_init(rdev);
191 if (r)
192 return r;
193
194 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
195 if (r) {
196 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
197 return r;
198 }
199
200 /* Enable IRQ */
201 if (!rdev->irq.installed) {
202 r = radeon_irq_kms_init(rdev);
203 if (r)
204 return r;
205 }
206
207 rs600_irq_set(rdev);
208 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
209 /* 1M ring buffer */
210 r = r100_cp_init(rdev, 1024 * 1024);
211 if (r) {
212 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
213 return r;
214 }
215
216 r = radeon_ib_pool_init(rdev);
217 if (r) {
218 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
219 return r;
220 }
221
222 return 0;
223 }
224
r520_resume(struct radeon_device * rdev)225 int r520_resume(struct radeon_device *rdev)
226 {
227 int r;
228
229 /* Make sur GART are not working */
230 if (rdev->flags & RADEON_IS_PCIE)
231 rv370_pcie_gart_disable(rdev);
232 /* Resume clock before doing reset */
233 rv515_clock_startup(rdev);
234 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
235 if (radeon_asic_reset(rdev)) {
236 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
237 RREG32(R_000E40_RBBM_STATUS),
238 RREG32(R_0007C0_CP_STAT));
239 }
240 /* post */
241 atom_asic_init(rdev->mode_info.atom_context);
242 /* Resume clock after posting */
243 rv515_clock_startup(rdev);
244 /* Initialize surface registers */
245 radeon_surface_init(rdev);
246
247 rdev->accel_working = true;
248 r = r520_startup(rdev);
249 if (r) {
250 rdev->accel_working = false;
251 }
252 return r;
253 }
254
r520_init(struct radeon_device * rdev)255 int r520_init(struct radeon_device *rdev)
256 {
257 int r;
258
259 /* Initialize scratch registers */
260 radeon_scratch_init(rdev);
261 /* Initialize surface registers */
262 radeon_surface_init(rdev);
263 /* restore some register to sane defaults */
264 r100_restore_sanity(rdev);
265 /* TODO: disable VGA need to use VGA request */
266 /* BIOS*/
267 if (!radeon_get_bios(rdev)) {
268 if (ASIC_IS_AVIVO(rdev))
269 return -EINVAL;
270 }
271 if (rdev->is_atom_bios) {
272 r = radeon_atombios_init(rdev);
273 if (r)
274 return r;
275 } else {
276 dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
277 return -EINVAL;
278 }
279 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
280 if (radeon_asic_reset(rdev)) {
281 dev_warn(rdev->dev,
282 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
283 RREG32(R_000E40_RBBM_STATUS),
284 RREG32(R_0007C0_CP_STAT));
285 }
286 /* check if cards are posted or not */
287 if (radeon_boot_test_post_card(rdev) == false)
288 return -EINVAL;
289
290 if (!radeon_card_posted(rdev) && rdev->bios) {
291 DRM_INFO("GPU not posted. posting now...\n");
292 atom_asic_init(rdev->mode_info.atom_context);
293 }
294 /* Initialize clocks */
295 radeon_get_clock_info(rdev->ddev);
296 /* initialize AGP */
297 if (rdev->flags & RADEON_IS_AGP) {
298 r = radeon_agp_init(rdev);
299 if (r) {
300 radeon_agp_disable(rdev);
301 }
302 }
303 /* initialize memory controller */
304 r520_mc_init(rdev);
305 rv515_debugfs(rdev);
306 /* Fence driver */
307 r = radeon_fence_driver_init(rdev);
308 if (r)
309 return r;
310 /* Memory manager */
311 r = radeon_bo_init(rdev);
312 if (r)
313 return r;
314 r = rv370_pcie_gart_init(rdev);
315 if (r)
316 return r;
317 rv515_set_safe_registers(rdev);
318
319 /* Initialize power management */
320 radeon_pm_init(rdev);
321
322 rdev->accel_working = true;
323 r = r520_startup(rdev);
324 if (r) {
325 /* Somethings want wront with the accel init stop accel */
326 dev_err(rdev->dev, "Disabling GPU acceleration\n");
327 r100_cp_fini(rdev);
328 radeon_wb_fini(rdev);
329 radeon_ib_pool_fini(rdev);
330 radeon_irq_kms_fini(rdev);
331 rv370_pcie_gart_fini(rdev);
332 radeon_agp_fini(rdev);
333 rdev->accel_working = false;
334 }
335 return 0;
336 }
337