xref: /openbsd-src/sys/dev/pci/drm/radeon/radeon_device.c (revision 33a3edb17759836de63cc1c20bcbaf9911c431fc)
11099013bSjsg /*
21099013bSjsg  * Copyright 2008 Advanced Micro Devices, Inc.
31099013bSjsg  * Copyright 2008 Red Hat Inc.
41099013bSjsg  * Copyright 2009 Jerome Glisse.
51099013bSjsg  *
61099013bSjsg  * Permission is hereby granted, free of charge, to any person obtaining a
71099013bSjsg  * copy of this software and associated documentation files (the "Software"),
81099013bSjsg  * to deal in the Software without restriction, including without limitation
91099013bSjsg  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
101099013bSjsg  * and/or sell copies of the Software, and to permit persons to whom the
111099013bSjsg  * Software is furnished to do so, subject to the following conditions:
121099013bSjsg  *
131099013bSjsg  * The above copyright notice and this permission notice shall be included in
141099013bSjsg  * all copies or substantial portions of the Software.
151099013bSjsg  *
161099013bSjsg  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
171099013bSjsg  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
181099013bSjsg  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
191099013bSjsg  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
201099013bSjsg  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
211099013bSjsg  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
221099013bSjsg  * OTHER DEALINGS IN THE SOFTWARE.
231099013bSjsg  *
241099013bSjsg  * Authors: Dave Airlie
251099013bSjsg  *          Alex Deucher
261099013bSjsg  *          Jerome Glisse
271099013bSjsg  */
28c349dbc7Sjsg 
297f4dd379Sjsg #include <linux/console.h>
307f4dd379Sjsg #include <linux/efi.h>
31c349dbc7Sjsg #include <linux/pci.h>
32c349dbc7Sjsg #include <linux/pm_runtime.h>
33c349dbc7Sjsg #include <linux/slab.h>
34c349dbc7Sjsg #include <linux/vga_switcheroo.h>
35c349dbc7Sjsg #include <linux/vgaarb.h>
36c349dbc7Sjsg 
37c349dbc7Sjsg #include <drm/drm_cache.h>
38c349dbc7Sjsg #include <drm/drm_crtc_helper.h>
39c349dbc7Sjsg #include <drm/drm_device.h>
40c349dbc7Sjsg #include <drm/drm_file.h>
411bb76ff1Sjsg #include <drm/drm_framebuffer.h>
42c349dbc7Sjsg #include <drm/drm_probe_helper.h>
43c349dbc7Sjsg #include <drm/radeon_drm.h>
44c349dbc7Sjsg 
455ca02815Sjsg #include "radeon_device.h"
461099013bSjsg #include "radeon_reg.h"
471099013bSjsg #include "radeon.h"
481099013bSjsg #include "atom.h"
491099013bSjsg 
501099013bSjsg static const char radeon_family_name[][16] = {
511099013bSjsg 	"R100",
521099013bSjsg 	"RV100",
531099013bSjsg 	"RS100",
541099013bSjsg 	"RV200",
551099013bSjsg 	"RS200",
561099013bSjsg 	"R200",
571099013bSjsg 	"RV250",
581099013bSjsg 	"RS300",
591099013bSjsg 	"RV280",
601099013bSjsg 	"R300",
611099013bSjsg 	"R350",
621099013bSjsg 	"RV350",
631099013bSjsg 	"RV380",
641099013bSjsg 	"R420",
651099013bSjsg 	"R423",
661099013bSjsg 	"RV410",
671099013bSjsg 	"RS400",
681099013bSjsg 	"RS480",
691099013bSjsg 	"RS600",
701099013bSjsg 	"RS690",
711099013bSjsg 	"RS740",
721099013bSjsg 	"RV515",
731099013bSjsg 	"R520",
741099013bSjsg 	"RV530",
751099013bSjsg 	"RV560",
761099013bSjsg 	"RV570",
771099013bSjsg 	"R580",
781099013bSjsg 	"R600",
791099013bSjsg 	"RV610",
801099013bSjsg 	"RV630",
811099013bSjsg 	"RV670",
821099013bSjsg 	"RV620",
831099013bSjsg 	"RV635",
841099013bSjsg 	"RS780",
851099013bSjsg 	"RS880",
861099013bSjsg 	"RV770",
871099013bSjsg 	"RV730",
881099013bSjsg 	"RV710",
891099013bSjsg 	"RV740",
901099013bSjsg 	"CEDAR",
911099013bSjsg 	"REDWOOD",
921099013bSjsg 	"JUNIPER",
931099013bSjsg 	"CYPRESS",
941099013bSjsg 	"HEMLOCK",
951099013bSjsg 	"PALM",
961099013bSjsg 	"SUMO",
971099013bSjsg 	"SUMO2",
981099013bSjsg 	"BARTS",
991099013bSjsg 	"TURKS",
1001099013bSjsg 	"CAICOS",
1011099013bSjsg 	"CAYMAN",
1021099013bSjsg 	"ARUBA",
1031099013bSjsg 	"TAHITI",
1041099013bSjsg 	"PITCAIRN",
1051099013bSjsg 	"VERDE",
1067ccd5a2cSjsg 	"OLAND",
1077ccd5a2cSjsg 	"HAINAN",
1087ccd5a2cSjsg 	"BONAIRE",
1097ccd5a2cSjsg 	"KAVERI",
1107ccd5a2cSjsg 	"KABINI",
1117ccd5a2cSjsg 	"HAWAII",
1127ccd5a2cSjsg 	"MULLINS",
1131099013bSjsg 	"LAST",
1141099013bSjsg };
1151099013bSjsg 
1167f4dd379Sjsg #if defined(CONFIG_VGA_SWITCHEROO)
1177f4dd379Sjsg bool radeon_has_atpx_dgpu_power_cntl(void);
1187f4dd379Sjsg bool radeon_is_atpx_hybrid(void);
1197f4dd379Sjsg #else
1207f4dd379Sjsg static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
1217f4dd379Sjsg static inline bool radeon_is_atpx_hybrid(void) { return false; }
1227f4dd379Sjsg #endif
1237f4dd379Sjsg 
1247ccd5a2cSjsg #define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
1257ccd5a2cSjsg 
1267ccd5a2cSjsg struct radeon_px_quirk {
1277ccd5a2cSjsg 	u32 chip_vendor;
1287ccd5a2cSjsg 	u32 chip_device;
1297ccd5a2cSjsg 	u32 subsys_vendor;
1307ccd5a2cSjsg 	u32 subsys_device;
1317ccd5a2cSjsg 	u32 px_quirk_flags;
1327ccd5a2cSjsg };
1337ccd5a2cSjsg 
1347ccd5a2cSjsg static struct radeon_px_quirk radeon_px_quirk_list[] = {
1357ccd5a2cSjsg 	/* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
1367ccd5a2cSjsg 	 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
1377ccd5a2cSjsg 	 */
1387ccd5a2cSjsg 	{ PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
1397ccd5a2cSjsg 	/* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
1407ccd5a2cSjsg 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
1417ccd5a2cSjsg 	 */
1427ccd5a2cSjsg 	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
1437ccd5a2cSjsg 	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
1447ccd5a2cSjsg 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
1457ccd5a2cSjsg 	 */
1467ccd5a2cSjsg 	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
1477ccd5a2cSjsg 	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
1487ccd5a2cSjsg 	 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
1497ccd5a2cSjsg 	 */
1507ccd5a2cSjsg 	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
1517f4dd379Sjsg 	/* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
1527f4dd379Sjsg 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52
1537f4dd379Sjsg 	 */
1547f4dd379Sjsg 	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
1557ccd5a2cSjsg 	{ 0, 0, 0, 0, 0 },
1567ccd5a2cSjsg };
1577ccd5a2cSjsg 
1587ccd5a2cSjsg bool radeon_is_px(struct drm_device *dev)
1597ccd5a2cSjsg {
1607ccd5a2cSjsg 	struct radeon_device *rdev = dev->dev_private;
1617ccd5a2cSjsg 
1627ccd5a2cSjsg 	if (rdev->flags & RADEON_IS_PX)
1637ccd5a2cSjsg 		return true;
1647ccd5a2cSjsg 	return false;
1657ccd5a2cSjsg }
1667ccd5a2cSjsg 
1677ccd5a2cSjsg static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
1687ccd5a2cSjsg {
1697ccd5a2cSjsg 	struct radeon_px_quirk *p = radeon_px_quirk_list;
1707ccd5a2cSjsg 
1717ccd5a2cSjsg 	/* Apply PX quirks */
1727ccd5a2cSjsg 	while (p && p->chip_device != 0) {
1737ccd5a2cSjsg 		if (rdev->pdev->vendor == p->chip_vendor &&
1747ccd5a2cSjsg 		    rdev->pdev->device == p->chip_device &&
1757ccd5a2cSjsg 		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
1767ccd5a2cSjsg 		    rdev->pdev->subsystem_device == p->subsys_device) {
1777ccd5a2cSjsg 			rdev->px_quirk_flags = p->px_quirk_flags;
1787ccd5a2cSjsg 			break;
1797ccd5a2cSjsg 		}
1807ccd5a2cSjsg 		++p;
1817ccd5a2cSjsg 	}
1827ccd5a2cSjsg 
1837ccd5a2cSjsg 	if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
1847ccd5a2cSjsg 		rdev->flags &= ~RADEON_IS_PX;
1857f4dd379Sjsg 
1867f4dd379Sjsg 	/* disable PX is the system doesn't support dGPU power control or hybrid gfx */
1877f4dd379Sjsg 	if (!radeon_is_atpx_hybrid() &&
1887f4dd379Sjsg 	    !radeon_has_atpx_dgpu_power_cntl())
1897f4dd379Sjsg 		rdev->flags &= ~RADEON_IS_PX;
1907ccd5a2cSjsg }
1917ccd5a2cSjsg 
1927ccd5a2cSjsg /**
1937ccd5a2cSjsg  * radeon_program_register_sequence - program an array of registers.
1947ccd5a2cSjsg  *
1957ccd5a2cSjsg  * @rdev: radeon_device pointer
1967ccd5a2cSjsg  * @registers: pointer to the register array
1977ccd5a2cSjsg  * @array_size: size of the register array
1987ccd5a2cSjsg  *
1997ccd5a2cSjsg  * Programs an array or registers with and and or masks.
2007ccd5a2cSjsg  * This is a helper for setting golden registers.
2017ccd5a2cSjsg  */
2027ccd5a2cSjsg void radeon_program_register_sequence(struct radeon_device *rdev,
2037ccd5a2cSjsg 				      const u32 *registers,
2047ccd5a2cSjsg 				      const u32 array_size)
2057ccd5a2cSjsg {
2067ccd5a2cSjsg 	u32 tmp, reg, and_mask, or_mask;
2077ccd5a2cSjsg 	int i;
2087ccd5a2cSjsg 
2097ccd5a2cSjsg 	if (array_size % 3)
2107ccd5a2cSjsg 		return;
2117ccd5a2cSjsg 
2127ccd5a2cSjsg 	for (i = 0; i < array_size; i +=3) {
2137ccd5a2cSjsg 		reg = registers[i + 0];
2147ccd5a2cSjsg 		and_mask = registers[i + 1];
2157ccd5a2cSjsg 		or_mask = registers[i + 2];
2167ccd5a2cSjsg 
2177ccd5a2cSjsg 		if (and_mask == 0xffffffff) {
2187ccd5a2cSjsg 			tmp = or_mask;
2197ccd5a2cSjsg 		} else {
2207ccd5a2cSjsg 			tmp = RREG32(reg);
2217ccd5a2cSjsg 			tmp &= ~and_mask;
2227ccd5a2cSjsg 			tmp |= or_mask;
2237ccd5a2cSjsg 		}
2247ccd5a2cSjsg 		WREG32(reg, tmp);
2257ccd5a2cSjsg 	}
2267ccd5a2cSjsg }
2277ccd5a2cSjsg 
2287ccd5a2cSjsg void radeon_pci_config_reset(struct radeon_device *rdev)
2297ccd5a2cSjsg {
2307ccd5a2cSjsg 	pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
2317ccd5a2cSjsg }
2327ccd5a2cSjsg 
2331099013bSjsg /**
2341099013bSjsg  * radeon_surface_init - Clear GPU surface registers.
2351099013bSjsg  *
2361099013bSjsg  * @rdev: radeon_device pointer
2371099013bSjsg  *
2381099013bSjsg  * Clear GPU surface registers (r1xx-r5xx).
2391099013bSjsg  */
2401099013bSjsg void radeon_surface_init(struct radeon_device *rdev)
2411099013bSjsg {
2421099013bSjsg 	/* FIXME: check this out */
2431099013bSjsg 	if (rdev->family < CHIP_R600) {
2441099013bSjsg 		int i;
2451099013bSjsg 
2461099013bSjsg 		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
2471099013bSjsg 			if (rdev->surface_regs[i].bo)
2481099013bSjsg 				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
2491099013bSjsg 			else
2501099013bSjsg 				radeon_clear_surface_reg(rdev, i);
2511099013bSjsg 		}
2521099013bSjsg 		/* enable surfaces */
2531099013bSjsg 		WREG32(RADEON_SURFACE_CNTL, 0);
2541099013bSjsg 	}
2551099013bSjsg }
2561099013bSjsg 
2571099013bSjsg /*
2581099013bSjsg  * GPU scratch registers helpers function.
2591099013bSjsg  */
2601099013bSjsg /**
2611099013bSjsg  * radeon_scratch_init - Init scratch register driver information.
2621099013bSjsg  *
2631099013bSjsg  * @rdev: radeon_device pointer
2641099013bSjsg  *
2651099013bSjsg  * Init CP scratch register driver information (r1xx-r5xx)
2661099013bSjsg  */
2671099013bSjsg void radeon_scratch_init(struct radeon_device *rdev)
2681099013bSjsg {
2691099013bSjsg 	int i;
2701099013bSjsg 
2711099013bSjsg 	/* FIXME: check this out */
2721099013bSjsg 	if (rdev->family < CHIP_R300) {
2731099013bSjsg 		rdev->scratch.num_reg = 5;
2741099013bSjsg 	} else {
2751099013bSjsg 		rdev->scratch.num_reg = 7;
2761099013bSjsg 	}
2771099013bSjsg 	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
2781099013bSjsg 	for (i = 0; i < rdev->scratch.num_reg; i++) {
2791099013bSjsg 		rdev->scratch.free[i] = true;
2801099013bSjsg 		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2811099013bSjsg 	}
2821099013bSjsg }
2831099013bSjsg 
2841099013bSjsg /**
2851099013bSjsg  * radeon_scratch_get - Allocate a scratch register
2861099013bSjsg  *
2871099013bSjsg  * @rdev: radeon_device pointer
2881099013bSjsg  * @reg: scratch register mmio offset
2891099013bSjsg  *
2901099013bSjsg  * Allocate a CP scratch register for use by the driver (all asics).
2911099013bSjsg  * Returns 0 on success or -EINVAL on failure.
2921099013bSjsg  */
2931099013bSjsg int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
2941099013bSjsg {
2951099013bSjsg 	int i;
2961099013bSjsg 
2971099013bSjsg 	for (i = 0; i < rdev->scratch.num_reg; i++) {
2981099013bSjsg 		if (rdev->scratch.free[i]) {
2991099013bSjsg 			rdev->scratch.free[i] = false;
3001099013bSjsg 			*reg = rdev->scratch.reg[i];
3011099013bSjsg 			return 0;
3021099013bSjsg 		}
3031099013bSjsg 	}
3041099013bSjsg 	return -EINVAL;
3051099013bSjsg }
3061099013bSjsg 
3071099013bSjsg /**
3081099013bSjsg  * radeon_scratch_free - Free a scratch register
3091099013bSjsg  *
3101099013bSjsg  * @rdev: radeon_device pointer
3111099013bSjsg  * @reg: scratch register mmio offset
3121099013bSjsg  *
3131099013bSjsg  * Free a CP scratch register allocated for use by the driver (all asics)
3141099013bSjsg  */
3151099013bSjsg void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
3161099013bSjsg {
3171099013bSjsg 	int i;
3181099013bSjsg 
3191099013bSjsg 	for (i = 0; i < rdev->scratch.num_reg; i++) {
3201099013bSjsg 		if (rdev->scratch.reg[i] == reg) {
3211099013bSjsg 			rdev->scratch.free[i] = true;
3221099013bSjsg 			return;
3231099013bSjsg 		}
3241099013bSjsg 	}
3251099013bSjsg }
3261099013bSjsg 
3271099013bSjsg /*
3287ccd5a2cSjsg  * GPU doorbell aperture helpers function.
3297ccd5a2cSjsg  */
3307ccd5a2cSjsg /**
3317ccd5a2cSjsg  * radeon_doorbell_init - Init doorbell driver information.
3327ccd5a2cSjsg  *
3337ccd5a2cSjsg  * @rdev: radeon_device pointer
3347ccd5a2cSjsg  *
3357ccd5a2cSjsg  * Init doorbell driver information (CIK)
3367ccd5a2cSjsg  * Returns 0 on success, error on failure.
3377ccd5a2cSjsg  */
3387ccd5a2cSjsg static int radeon_doorbell_init(struct radeon_device *rdev)
3397ccd5a2cSjsg {
3407ccd5a2cSjsg 	/* doorbell bar mapping */
3417ccd5a2cSjsg #ifdef __linux__
3427ccd5a2cSjsg 	rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
3437ccd5a2cSjsg 	rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
3447ccd5a2cSjsg #endif
3457ccd5a2cSjsg 
3467ccd5a2cSjsg 	rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
3477ccd5a2cSjsg 	if (rdev->doorbell.num_doorbells == 0)
3487ccd5a2cSjsg 		return -EINVAL;
3497ccd5a2cSjsg 
3507ccd5a2cSjsg #ifdef __linux__
3517ccd5a2cSjsg 	rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
3527ccd5a2cSjsg 	if (rdev->doorbell.ptr == NULL) {
3537ccd5a2cSjsg 		return -ENOMEM;
3547ccd5a2cSjsg 	}
3557ccd5a2cSjsg #endif
3567ccd5a2cSjsg 	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
3577ccd5a2cSjsg 	DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
3587ccd5a2cSjsg 
3597ccd5a2cSjsg 	memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
3607ccd5a2cSjsg 
3617ccd5a2cSjsg 	return 0;
3627ccd5a2cSjsg }
3637ccd5a2cSjsg 
3647ccd5a2cSjsg /**
3657ccd5a2cSjsg  * radeon_doorbell_fini - Tear down doorbell driver information.
3667ccd5a2cSjsg  *
3677ccd5a2cSjsg  * @rdev: radeon_device pointer
3687ccd5a2cSjsg  *
3697ccd5a2cSjsg  * Tear down doorbell driver information (CIK)
3707ccd5a2cSjsg  */
3717ccd5a2cSjsg static void radeon_doorbell_fini(struct radeon_device *rdev)
3727ccd5a2cSjsg {
3737ccd5a2cSjsg #ifdef __linux__
3747ccd5a2cSjsg 	iounmap(rdev->doorbell.ptr);
3757ccd5a2cSjsg #else
3767ccd5a2cSjsg 	if (rdev->doorbell.size > 0)
3777ccd5a2cSjsg 		bus_space_unmap(rdev->memt, rdev->doorbell.bsh,
3787ccd5a2cSjsg 		    rdev->doorbell.size);
3797ccd5a2cSjsg 	rdev->doorbell.size = 0;
3807ccd5a2cSjsg #endif
381d6bc221bSkettenis 	rdev->doorbell.ptr = NULL;
3827ccd5a2cSjsg }
3837ccd5a2cSjsg 
3847ccd5a2cSjsg /**
3857ccd5a2cSjsg  * radeon_doorbell_get - Allocate a doorbell entry
3867ccd5a2cSjsg  *
3877ccd5a2cSjsg  * @rdev: radeon_device pointer
3887ccd5a2cSjsg  * @doorbell: doorbell index
3897ccd5a2cSjsg  *
3907ccd5a2cSjsg  * Allocate a doorbell for use by the driver (all asics).
3917ccd5a2cSjsg  * Returns 0 on success or -EINVAL on failure.
3927ccd5a2cSjsg  */
3937ccd5a2cSjsg int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
3947ccd5a2cSjsg {
3957ccd5a2cSjsg 	unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
3967ccd5a2cSjsg 	if (offset < rdev->doorbell.num_doorbells) {
3977ccd5a2cSjsg 		__set_bit(offset, rdev->doorbell.used);
3987ccd5a2cSjsg 		*doorbell = offset;
3997ccd5a2cSjsg 		return 0;
4007ccd5a2cSjsg 	} else {
4017ccd5a2cSjsg 		return -EINVAL;
4027ccd5a2cSjsg 	}
4037ccd5a2cSjsg }
4047ccd5a2cSjsg 
4057ccd5a2cSjsg /**
4067ccd5a2cSjsg  * radeon_doorbell_free - Free a doorbell entry
4077ccd5a2cSjsg  *
4087ccd5a2cSjsg  * @rdev: radeon_device pointer
4097ccd5a2cSjsg  * @doorbell: doorbell index
4107ccd5a2cSjsg  *
4117ccd5a2cSjsg  * Free a doorbell allocated for use by the driver (all asics)
4127ccd5a2cSjsg  */
4137ccd5a2cSjsg void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
4147ccd5a2cSjsg {
4157ccd5a2cSjsg 	if (doorbell < rdev->doorbell.num_doorbells)
4167ccd5a2cSjsg 		__clear_bit(doorbell, rdev->doorbell.used);
4177ccd5a2cSjsg }
4187ccd5a2cSjsg 
4197ccd5a2cSjsg /*
4201099013bSjsg  * radeon_wb_*()
4215ca02815Sjsg  * Writeback is the method by which the GPU updates special pages
4221099013bSjsg  * in memory with the status of certain GPU events (fences, ring pointers,
4231099013bSjsg  * etc.).
4241099013bSjsg  */
4251099013bSjsg 
4261099013bSjsg /**
4271099013bSjsg  * radeon_wb_disable - Disable Writeback
4281099013bSjsg  *
4291099013bSjsg  * @rdev: radeon_device pointer
4301099013bSjsg  *
4311099013bSjsg  * Disables Writeback (all asics).  Used for suspend.
4321099013bSjsg  */
4331099013bSjsg void radeon_wb_disable(struct radeon_device *rdev)
4341099013bSjsg {
4351099013bSjsg 	rdev->wb.enabled = false;
4361099013bSjsg }
4371099013bSjsg 
4381099013bSjsg /**
4391099013bSjsg  * radeon_wb_fini - Disable Writeback and free memory
4401099013bSjsg  *
4411099013bSjsg  * @rdev: radeon_device pointer
4421099013bSjsg  *
4431099013bSjsg  * Disables Writeback and frees the Writeback memory (all asics).
4441099013bSjsg  * Used at driver shutdown.
4451099013bSjsg  */
4461099013bSjsg void radeon_wb_fini(struct radeon_device *rdev)
4471099013bSjsg {
4481099013bSjsg 	radeon_wb_disable(rdev);
4491099013bSjsg 	if (rdev->wb.wb_obj) {
4507ccd5a2cSjsg 		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
4517ccd5a2cSjsg 			radeon_bo_kunmap(rdev->wb.wb_obj);
4527ccd5a2cSjsg 			radeon_bo_unpin(rdev->wb.wb_obj);
4537ccd5a2cSjsg 			radeon_bo_unreserve(rdev->wb.wb_obj);
4547ccd5a2cSjsg 		}
4551099013bSjsg 		radeon_bo_unref(&rdev->wb.wb_obj);
4561099013bSjsg 		rdev->wb.wb = NULL;
4571099013bSjsg 		rdev->wb.wb_obj = NULL;
4581099013bSjsg 	}
4591099013bSjsg }
4601099013bSjsg 
4611099013bSjsg /**
4621099013bSjsg  * radeon_wb_init- Init Writeback driver info and allocate memory
4631099013bSjsg  *
4641099013bSjsg  * @rdev: radeon_device pointer
4651099013bSjsg  *
4661099013bSjsg  * Disables Writeback and frees the Writeback memory (all asics).
4671099013bSjsg  * Used at driver startup.
4681099013bSjsg  * Returns 0 on success or an -error on failure.
4691099013bSjsg  */
4701099013bSjsg int radeon_wb_init(struct radeon_device *rdev)
4711099013bSjsg {
4721099013bSjsg 	int r;
4731099013bSjsg 
4741099013bSjsg 	if (rdev->wb.wb_obj == NULL) {
4751099013bSjsg 		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
4767ccd5a2cSjsg 				     RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
4777ccd5a2cSjsg 				     &rdev->wb.wb_obj);
4781099013bSjsg 		if (r) {
4791099013bSjsg 			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
4801099013bSjsg 			return r;
4811099013bSjsg 		}
4821099013bSjsg 		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
4831099013bSjsg 		if (unlikely(r != 0)) {
4841099013bSjsg 			radeon_wb_fini(rdev);
4851099013bSjsg 			return r;
4861099013bSjsg 		}
4871099013bSjsg 		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
4881099013bSjsg 				&rdev->wb.gpu_addr);
4891099013bSjsg 		if (r) {
4901099013bSjsg 			radeon_bo_unreserve(rdev->wb.wb_obj);
4911099013bSjsg 			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
4921099013bSjsg 			radeon_wb_fini(rdev);
4931099013bSjsg 			return r;
4941099013bSjsg 		}
4951099013bSjsg 		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
4961099013bSjsg 		radeon_bo_unreserve(rdev->wb.wb_obj);
4971099013bSjsg 		if (r) {
4981099013bSjsg 			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
4991099013bSjsg 			radeon_wb_fini(rdev);
5001099013bSjsg 			return r;
5011099013bSjsg 		}
5027ccd5a2cSjsg 	}
5031099013bSjsg 
5041099013bSjsg 	/* clear wb memory */
5051099013bSjsg 	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
5061099013bSjsg 	/* disable event_write fences */
5071099013bSjsg 	rdev->wb.use_event = false;
5081099013bSjsg 	/* disabled via module param */
5091099013bSjsg 	if (radeon_no_wb == 1) {
5101099013bSjsg 		rdev->wb.enabled = false;
5111099013bSjsg 	} else {
5121099013bSjsg 		if (rdev->flags & RADEON_IS_AGP) {
5131099013bSjsg 			/* often unreliable on AGP */
5141099013bSjsg 			rdev->wb.enabled = false;
5151099013bSjsg 		} else if (rdev->family < CHIP_R300) {
5161099013bSjsg 			/* often unreliable on pre-r300 */
5171099013bSjsg 			rdev->wb.enabled = false;
5181099013bSjsg 		} else {
5191099013bSjsg 			rdev->wb.enabled = true;
5201099013bSjsg 			/* event_write fences are only available on r600+ */
5211099013bSjsg 			if (rdev->family >= CHIP_R600) {
5221099013bSjsg 				rdev->wb.use_event = true;
5231099013bSjsg 			}
5241099013bSjsg 		}
5251099013bSjsg 	}
5261099013bSjsg 	/* always use writeback/events on NI, APUs */
5271099013bSjsg 	if (rdev->family >= CHIP_PALM) {
5281099013bSjsg 		rdev->wb.enabled = true;
5291099013bSjsg 		rdev->wb.use_event = true;
5301099013bSjsg 	}
5311099013bSjsg 
5321099013bSjsg 	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
5331099013bSjsg 
5341099013bSjsg 	return 0;
5351099013bSjsg }
5361099013bSjsg 
5371099013bSjsg /**
5381099013bSjsg  * radeon_vram_location - try to find VRAM location
5391099013bSjsg  * @rdev: radeon device structure holding all necessary informations
5401099013bSjsg  * @mc: memory controller structure holding memory informations
5411099013bSjsg  * @base: base address at which to put VRAM
5421099013bSjsg  *
5431099013bSjsg  * Function will place try to place VRAM at base address provided
5441099013bSjsg  * as parameter (which is so far either PCI aperture address or
5451099013bSjsg  * for IGP TOM base address).
5461099013bSjsg  *
5471099013bSjsg  * If there is not enough space to fit the unvisible VRAM in the 32bits
5481099013bSjsg  * address space then we limit the VRAM size to the aperture.
5491099013bSjsg  *
5501099013bSjsg  * If we are using AGP and if the AGP aperture doesn't allow us to have
5511099013bSjsg  * room for all the VRAM than we restrict the VRAM to the PCI aperture
5521099013bSjsg  * size and print a warning.
5531099013bSjsg  *
5541099013bSjsg  * This function will never fails, worst case are limiting VRAM.
5551099013bSjsg  *
5561099013bSjsg  * Note: GTT start, end, size should be initialized before calling this
5571099013bSjsg  * function on AGP platform.
5581099013bSjsg  *
5595ca02815Sjsg  * Note 1: We don't explicitly enforce VRAM start to be aligned on VRAM size,
5601099013bSjsg  * this shouldn't be a problem as we are using the PCI aperture as a reference.
5611099013bSjsg  * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
5621099013bSjsg  * not IGP.
5631099013bSjsg  *
5645ca02815Sjsg  * Note 2: we use mc_vram_size as on some board we need to program the mc to
5651099013bSjsg  * cover the whole aperture even if VRAM size is inferior to aperture size
5661099013bSjsg  * Novell bug 204882 + along with lots of ubuntu ones
5671099013bSjsg  *
5685ca02815Sjsg  * Note 3: when limiting vram it's safe to overwritte real_vram_size because
5691099013bSjsg  * we are not in case where real_vram_size is inferior to mc_vram_size (ie
5701099013bSjsg  * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
5711099013bSjsg  * ones)
5721099013bSjsg  *
5735ca02815Sjsg  * Note 4: IGP TOM addr should be the same as the aperture addr, we don't
5741099013bSjsg  * explicitly check for that thought.
5751099013bSjsg  *
5761099013bSjsg  * FIXME: when reducing VRAM size align new size on power of 2.
5771099013bSjsg  */
5781099013bSjsg void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
5791099013bSjsg {
5801099013bSjsg 	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
5811099013bSjsg 
5821099013bSjsg 	mc->vram_start = base;
5837ccd5a2cSjsg 	if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
5841099013bSjsg 		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
5851099013bSjsg 		mc->real_vram_size = mc->aper_size;
5861099013bSjsg 		mc->mc_vram_size = mc->aper_size;
5871099013bSjsg 	}
5881099013bSjsg 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
5891099013bSjsg 	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
5901099013bSjsg 		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
5911099013bSjsg 		mc->real_vram_size = mc->aper_size;
5921099013bSjsg 		mc->mc_vram_size = mc->aper_size;
5931099013bSjsg 	}
5941099013bSjsg 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
5951099013bSjsg 	if (limit && limit < mc->real_vram_size)
5961099013bSjsg 		mc->real_vram_size = limit;
5971099013bSjsg 	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
5981099013bSjsg 			mc->mc_vram_size >> 20, mc->vram_start,
5991099013bSjsg 			mc->vram_end, mc->real_vram_size >> 20);
6001099013bSjsg }
6011099013bSjsg 
6021099013bSjsg /**
6031099013bSjsg  * radeon_gtt_location - try to find GTT location
6041099013bSjsg  * @rdev: radeon device structure holding all necessary informations
6051099013bSjsg  * @mc: memory controller structure holding memory informations
6061099013bSjsg  *
6071099013bSjsg  * Function will place try to place GTT before or after VRAM.
6081099013bSjsg  *
6091099013bSjsg  * If GTT size is bigger than space left then we ajust GTT size.
6101099013bSjsg  * Thus function will never fails.
6111099013bSjsg  *
6121099013bSjsg  * FIXME: when reducing GTT size align new size on power of 2.
6131099013bSjsg  */
6141099013bSjsg void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
6151099013bSjsg {
6161099013bSjsg 	u64 size_af, size_bf;
6171099013bSjsg 
6187ccd5a2cSjsg 	size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
6191099013bSjsg 	size_bf = mc->vram_start & ~mc->gtt_base_align;
6201099013bSjsg 	if (size_bf > size_af) {
6211099013bSjsg 		if (mc->gtt_size > size_bf) {
6221099013bSjsg 			dev_warn(rdev->dev, "limiting GTT\n");
6231099013bSjsg 			mc->gtt_size = size_bf;
6241099013bSjsg 		}
6251099013bSjsg 		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
6261099013bSjsg 	} else {
6271099013bSjsg 		if (mc->gtt_size > size_af) {
6281099013bSjsg 			dev_warn(rdev->dev, "limiting GTT\n");
6291099013bSjsg 			mc->gtt_size = size_af;
6301099013bSjsg 		}
6311099013bSjsg 		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
6321099013bSjsg 	}
6331099013bSjsg 	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
6341099013bSjsg 	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
6351099013bSjsg 			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
6361099013bSjsg }
6371099013bSjsg 
6381099013bSjsg /*
6391099013bSjsg  * GPU helpers function.
6401099013bSjsg  */
6417ccd5a2cSjsg 
6425ca02815Sjsg /*
6437ccd5a2cSjsg  * radeon_device_is_virtual - check if we are running is a virtual environment
6447ccd5a2cSjsg  *
6457ccd5a2cSjsg  * Check if the asic has been passed through to a VM (all asics).
6467ccd5a2cSjsg  * Used at driver startup.
6477ccd5a2cSjsg  * Returns true if virtual or false if not.
6487ccd5a2cSjsg  */
6497f4dd379Sjsg bool radeon_device_is_virtual(void)
6507ccd5a2cSjsg {
65116fe02eaSjsg #ifdef CONFIG_X86
652f5f8653dSjsg 	return (cpu_ecxfeature & CPUIDECX_HV);
6537ccd5a2cSjsg #else
6547ccd5a2cSjsg 	return false;
6557ccd5a2cSjsg #endif
6567ccd5a2cSjsg }
6577ccd5a2cSjsg 
6581099013bSjsg /**
6591099013bSjsg  * radeon_card_posted - check if the hw has already been initialized
6601099013bSjsg  *
6611099013bSjsg  * @rdev: radeon_device pointer
6621099013bSjsg  *
6631099013bSjsg  * Check if the asic has been initialized (all asics).
6641099013bSjsg  * Used at driver startup.
6651099013bSjsg  * Returns true if initialized or false if not.
6661099013bSjsg  */
6671099013bSjsg bool radeon_card_posted(struct radeon_device *rdev)
6681099013bSjsg {
6691099013bSjsg 	uint32_t reg;
6701099013bSjsg 
6717ccd5a2cSjsg 	/* for pass through, always force asic_init for CI */
6727ccd5a2cSjsg 	if (rdev->family >= CHIP_BONAIRE &&
6737ccd5a2cSjsg 	    radeon_device_is_virtual())
6747ccd5a2cSjsg 		return false;
6757ccd5a2cSjsg 
6767ccd5a2cSjsg 	/* required for EFI mode on macbook2,1 which uses an r5xx asic */
6771099013bSjsg #ifdef notyet
6781099013bSjsg 	if (efi_enabled(EFI_BOOT) &&
6797ccd5a2cSjsg 	    (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
6807ccd5a2cSjsg 	    (rdev->family < CHIP_R600))
6811099013bSjsg 		return false;
6821099013bSjsg #endif
6831099013bSjsg 
6847ccd5a2cSjsg 	if (ASIC_IS_NODCE(rdev))
6857ccd5a2cSjsg 		goto check_memsize;
6867ccd5a2cSjsg 
6871099013bSjsg 	/* first check CRTCs */
688ef7f6af3Sjsg 	if (ASIC_IS_DCE4(rdev)) {
6891099013bSjsg 		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
6901099013bSjsg 			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
691ef7f6af3Sjsg 			if (rdev->num_crtc >= 4) {
692ef7f6af3Sjsg 				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
693ef7f6af3Sjsg 					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
694ef7f6af3Sjsg 			}
695ef7f6af3Sjsg 			if (rdev->num_crtc >= 6) {
696ef7f6af3Sjsg 				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
6971099013bSjsg 					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
698ef7f6af3Sjsg 			}
6991099013bSjsg 		if (reg & EVERGREEN_CRTC_MASTER_EN)
7001099013bSjsg 			return true;
7011099013bSjsg 	} else if (ASIC_IS_AVIVO(rdev)) {
7021099013bSjsg 		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
7031099013bSjsg 		      RREG32(AVIVO_D2CRTC_CONTROL);
7041099013bSjsg 		if (reg & AVIVO_CRTC_EN) {
7051099013bSjsg 			return true;
7061099013bSjsg 		}
7071099013bSjsg 	} else {
7081099013bSjsg 		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
7091099013bSjsg 		      RREG32(RADEON_CRTC2_GEN_CNTL);
7101099013bSjsg 		if (reg & RADEON_CRTC_EN) {
7111099013bSjsg 			return true;
7121099013bSjsg 		}
7131099013bSjsg 	}
7141099013bSjsg 
7157ccd5a2cSjsg check_memsize:
7161099013bSjsg 	/* then check MEM_SIZE, in case the crtcs are off */
7171099013bSjsg 	if (rdev->family >= CHIP_R600)
7181099013bSjsg 		reg = RREG32(R600_CONFIG_MEMSIZE);
7191099013bSjsg 	else
7201099013bSjsg 		reg = RREG32(RADEON_CONFIG_MEMSIZE);
7211099013bSjsg 
7221099013bSjsg 	if (reg)
7231099013bSjsg 		return true;
7241099013bSjsg 
7251099013bSjsg 	return false;
7261099013bSjsg 
7271099013bSjsg }
7281099013bSjsg 
7291099013bSjsg /**
7301099013bSjsg  * radeon_update_bandwidth_info - update display bandwidth params
7311099013bSjsg  *
7321099013bSjsg  * @rdev: radeon_device pointer
7331099013bSjsg  *
7341099013bSjsg  * Used when sclk/mclk are switched or display modes are set.
7351099013bSjsg  * params are used to calculate display watermarks (all asics)
7361099013bSjsg  */
7371099013bSjsg void radeon_update_bandwidth_info(struct radeon_device *rdev)
7381099013bSjsg {
7391099013bSjsg 	fixed20_12 a;
7401099013bSjsg 	u32 sclk = rdev->pm.current_sclk;
7411099013bSjsg 	u32 mclk = rdev->pm.current_mclk;
7421099013bSjsg 
7431099013bSjsg 	/* sclk/mclk in Mhz */
7441099013bSjsg 	a.full = dfixed_const(100);
7451099013bSjsg 	rdev->pm.sclk.full = dfixed_const(sclk);
7461099013bSjsg 	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
7471099013bSjsg 	rdev->pm.mclk.full = dfixed_const(mclk);
7481099013bSjsg 	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
7491099013bSjsg 
7501099013bSjsg 	if (rdev->flags & RADEON_IS_IGP) {
7511099013bSjsg 		a.full = dfixed_const(16);
7521099013bSjsg 		/* core_bandwidth = sclk(Mhz) * 16 */
7531099013bSjsg 		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
7541099013bSjsg 	}
7551099013bSjsg }
7561099013bSjsg 
7571099013bSjsg /**
7581099013bSjsg  * radeon_boot_test_post_card - check and possibly initialize the hw
7591099013bSjsg  *
7601099013bSjsg  * @rdev: radeon_device pointer
7611099013bSjsg  *
7621099013bSjsg  * Check if the asic is initialized and if not, attempt to initialize
7631099013bSjsg  * it (all asics).
7641099013bSjsg  * Returns true if initialized or false if not.
7651099013bSjsg  */
7661099013bSjsg bool radeon_boot_test_post_card(struct radeon_device *rdev)
7671099013bSjsg {
7681099013bSjsg 	if (radeon_card_posted(rdev))
7691099013bSjsg 		return true;
7701099013bSjsg 
7711099013bSjsg 	if (rdev->bios) {
7721099013bSjsg 		DRM_INFO("GPU not posted. posting now...\n");
7731099013bSjsg 		if (rdev->is_atom_bios)
7741099013bSjsg 			atom_asic_init(rdev->mode_info.atom_context);
7751099013bSjsg 		else
776*33a3edb1Sjsg 			radeon_combios_asic_init(rdev_to_drm(rdev));
7771099013bSjsg 		return true;
7781099013bSjsg 	} else {
7791099013bSjsg 		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
7801099013bSjsg 		return false;
7811099013bSjsg 	}
7821099013bSjsg }
7831099013bSjsg 
7841099013bSjsg /**
7851099013bSjsg  * radeon_dummy_page_init - init dummy page used by the driver
7861099013bSjsg  *
7871099013bSjsg  * @rdev: radeon_device pointer
7881099013bSjsg  *
7891099013bSjsg  * Allocate the dummy page used by the driver (all asics).
7901099013bSjsg  * This dummy page is used by the driver as a filler for gart entries
7911099013bSjsg  * when pages are taken out of the GART
7921099013bSjsg  * Returns 0 on sucess, -ENOMEM on failure.
7931099013bSjsg  */
7941099013bSjsg int radeon_dummy_page_init(struct radeon_device *rdev)
7951099013bSjsg {
7961099013bSjsg 	if (rdev->dummy_page.dmah)
7971099013bSjsg 		return 0;
7981099013bSjsg 	rdev->dummy_page.dmah = drm_dmamem_alloc(rdev->dmat, PAGE_SIZE, PAGE_SIZE, 1,
7991099013bSjsg 	    PAGE_SIZE, 0, BUS_DMA_WAITOK);
8001099013bSjsg 	if (!rdev->dummy_page.dmah)
8011099013bSjsg 		return -ENOMEM;
8021099013bSjsg 	rdev->dummy_page.addr = (bus_addr_t)rdev->dummy_page.dmah->map->dm_segs[0].ds_addr;
8037ccd5a2cSjsg 	rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
8047ccd5a2cSjsg 							    RADEON_GART_PAGE_DUMMY);
8051099013bSjsg 	return 0;
8061099013bSjsg }
8071099013bSjsg 
8081099013bSjsg /**
8091099013bSjsg  * radeon_dummy_page_fini - free dummy page used by the driver
8101099013bSjsg  *
8111099013bSjsg  * @rdev: radeon_device pointer
8121099013bSjsg  *
8131099013bSjsg  * Frees the dummy page used by the driver (all asics).
8141099013bSjsg  */
8151099013bSjsg void radeon_dummy_page_fini(struct radeon_device *rdev)
8161099013bSjsg {
8171099013bSjsg 	if (rdev->dummy_page.dmah == NULL)
8181099013bSjsg 		return;
8191099013bSjsg 
8201099013bSjsg 	drm_dmamem_free(rdev->dmat, rdev->dummy_page.dmah);
8211099013bSjsg 	rdev->dummy_page.dmah = NULL;
8221099013bSjsg 	rdev->dummy_page.addr = 0;
8231099013bSjsg }
8241099013bSjsg 
8251099013bSjsg 
8261099013bSjsg /* ATOM accessor methods */
8271099013bSjsg /*
8281099013bSjsg  * ATOM is an interpreted byte code stored in tables in the vbios.  The
8291099013bSjsg  * driver registers callbacks to access registers and the interpreter
8301099013bSjsg  * in the driver parses the tables and executes then to program specific
8311099013bSjsg  * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
8321099013bSjsg  * atombios.h, and atom.c
8331099013bSjsg  */
8341099013bSjsg 
8351099013bSjsg /**
8361099013bSjsg  * cail_pll_read - read PLL register
8371099013bSjsg  *
8381099013bSjsg  * @info: atom card_info pointer
8391099013bSjsg  * @reg: PLL register offset
8401099013bSjsg  *
8411099013bSjsg  * Provides a PLL register accessor for the atom interpreter (r4xx+).
8421099013bSjsg  * Returns the value of the PLL register.
8431099013bSjsg  */
8444b6e5ceaSjsg static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
8451099013bSjsg {
8461099013bSjsg 	struct radeon_device *rdev = info->dev->dev_private;
8471099013bSjsg 	uint32_t r;
8481099013bSjsg 
8491099013bSjsg 	r = rdev->pll_rreg(rdev, reg);
8501099013bSjsg 	return r;
8511099013bSjsg }
8521099013bSjsg 
8531099013bSjsg /**
8541099013bSjsg  * cail_pll_write - write PLL register
8551099013bSjsg  *
8561099013bSjsg  * @info: atom card_info pointer
8571099013bSjsg  * @reg: PLL register offset
8581099013bSjsg  * @val: value to write to the pll register
8591099013bSjsg  *
8601099013bSjsg  * Provides a PLL register accessor for the atom interpreter (r4xx+).
8611099013bSjsg  */
8624b6e5ceaSjsg static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
8631099013bSjsg {
8641099013bSjsg 	struct radeon_device *rdev = info->dev->dev_private;
8651099013bSjsg 
8661099013bSjsg 	rdev->pll_wreg(rdev, reg, val);
8671099013bSjsg }
8681099013bSjsg 
8691099013bSjsg /**
8701099013bSjsg  * cail_mc_read - read MC (Memory Controller) register
8711099013bSjsg  *
8721099013bSjsg  * @info: atom card_info pointer
8731099013bSjsg  * @reg: MC register offset
8741099013bSjsg  *
8751099013bSjsg  * Provides an MC register accessor for the atom interpreter (r4xx+).
8761099013bSjsg  * Returns the value of the MC register.
8771099013bSjsg  */
8784b6e5ceaSjsg static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
8791099013bSjsg {
8801099013bSjsg 	struct radeon_device *rdev = info->dev->dev_private;
8811099013bSjsg 	uint32_t r;
8821099013bSjsg 
8831099013bSjsg 	r = rdev->mc_rreg(rdev, reg);
8841099013bSjsg 	return r;
8851099013bSjsg }
8861099013bSjsg 
8871099013bSjsg /**
8881099013bSjsg  * cail_mc_write - write MC (Memory Controller) register
8891099013bSjsg  *
8901099013bSjsg  * @info: atom card_info pointer
8911099013bSjsg  * @reg: MC register offset
8921099013bSjsg  * @val: value to write to the pll register
8931099013bSjsg  *
8941099013bSjsg  * Provides a MC register accessor for the atom interpreter (r4xx+).
8951099013bSjsg  */
8964b6e5ceaSjsg static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
8971099013bSjsg {
8981099013bSjsg 	struct radeon_device *rdev = info->dev->dev_private;
8991099013bSjsg 
9001099013bSjsg 	rdev->mc_wreg(rdev, reg, val);
9011099013bSjsg }
9021099013bSjsg 
9031099013bSjsg /**
9041099013bSjsg  * cail_reg_write - write MMIO register
9051099013bSjsg  *
9061099013bSjsg  * @info: atom card_info pointer
9071099013bSjsg  * @reg: MMIO register offset
9081099013bSjsg  * @val: value to write to the pll register
9091099013bSjsg  *
9101099013bSjsg  * Provides a MMIO register accessor for the atom interpreter (r4xx+).
9111099013bSjsg  */
9124b6e5ceaSjsg static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
9131099013bSjsg {
9141099013bSjsg 	struct radeon_device *rdev = info->dev->dev_private;
9151099013bSjsg 
9161099013bSjsg 	WREG32(reg*4, val);
9171099013bSjsg }
9181099013bSjsg 
9191099013bSjsg /**
9201099013bSjsg  * cail_reg_read - read MMIO register
9211099013bSjsg  *
9221099013bSjsg  * @info: atom card_info pointer
9231099013bSjsg  * @reg: MMIO register offset
9241099013bSjsg  *
9251099013bSjsg  * Provides an MMIO register accessor for the atom interpreter (r4xx+).
9261099013bSjsg  * Returns the value of the MMIO register.
9271099013bSjsg  */
9284b6e5ceaSjsg static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
9291099013bSjsg {
9301099013bSjsg 	struct radeon_device *rdev = info->dev->dev_private;
9311099013bSjsg 	uint32_t r;
9321099013bSjsg 
9331099013bSjsg 	r = RREG32(reg*4);
9341099013bSjsg 	return r;
9351099013bSjsg }
9361099013bSjsg 
9371099013bSjsg /**
9381099013bSjsg  * cail_ioreg_write - write IO register
9391099013bSjsg  *
9401099013bSjsg  * @info: atom card_info pointer
9411099013bSjsg  * @reg: IO register offset
9421099013bSjsg  * @val: value to write to the pll register
9431099013bSjsg  *
9441099013bSjsg  * Provides a IO register accessor for the atom interpreter (r4xx+).
9451099013bSjsg  */
9464b6e5ceaSjsg static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
9471099013bSjsg {
9481099013bSjsg 	struct radeon_device *rdev = info->dev->dev_private;
9491099013bSjsg 
9501099013bSjsg 	WREG32_IO(reg*4, val);
9511099013bSjsg }
9521099013bSjsg 
9531099013bSjsg /**
9541099013bSjsg  * cail_ioreg_read - read IO register
9551099013bSjsg  *
9561099013bSjsg  * @info: atom card_info pointer
9571099013bSjsg  * @reg: IO register offset
9581099013bSjsg  *
9591099013bSjsg  * Provides an IO register accessor for the atom interpreter (r4xx+).
9601099013bSjsg  * Returns the value of the IO register.
9611099013bSjsg  */
9624b6e5ceaSjsg static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
9631099013bSjsg {
9641099013bSjsg 	struct radeon_device *rdev = info->dev->dev_private;
9651099013bSjsg 	uint32_t r;
9661099013bSjsg 
9671099013bSjsg 	r = RREG32_IO(reg*4);
9681099013bSjsg 	return r;
9691099013bSjsg }
9701099013bSjsg 
9711099013bSjsg /**
9721099013bSjsg  * radeon_atombios_init - init the driver info and callbacks for atombios
9731099013bSjsg  *
9741099013bSjsg  * @rdev: radeon_device pointer
9751099013bSjsg  *
9761099013bSjsg  * Initializes the driver info and register access callbacks for the
9771099013bSjsg  * ATOM interpreter (r4xx+).
9781099013bSjsg  * Returns 0 on sucess, -ENOMEM on failure.
9791099013bSjsg  * Called at driver startup.
9801099013bSjsg  */
9811099013bSjsg int radeon_atombios_init(struct radeon_device *rdev)
9821099013bSjsg {
9831099013bSjsg 	struct card_info *atom_card_info =
984de5631a0Sjsg 	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
9851099013bSjsg 
9861099013bSjsg 	if (!atom_card_info)
9871099013bSjsg 		return -ENOMEM;
9881099013bSjsg 
9891099013bSjsg 	rdev->mode_info.atom_card_info = atom_card_info;
990*33a3edb1Sjsg 	atom_card_info->dev = rdev_to_drm(rdev);
9911099013bSjsg 	atom_card_info->reg_read = cail_reg_read;
9921099013bSjsg 	atom_card_info->reg_write = cail_reg_write;
9931099013bSjsg 	/* needed for iio ops */
9941099013bSjsg 	if (rdev->rio_mem_size > 0) {
9951099013bSjsg 		atom_card_info->ioreg_read = cail_ioreg_read;
9961099013bSjsg 		atom_card_info->ioreg_write = cail_ioreg_write;
9971099013bSjsg 	} else {
99827491653Skettenis #ifndef __powerpc64__
9991099013bSjsg 		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
100027491653Skettenis #endif
10011099013bSjsg 		atom_card_info->ioreg_read = cail_reg_read;
10021099013bSjsg 		atom_card_info->ioreg_write = cail_reg_write;
10031099013bSjsg 	}
10041099013bSjsg 	atom_card_info->mc_read = cail_mc_read;
10051099013bSjsg 	atom_card_info->mc_write = cail_mc_write;
10061099013bSjsg 	atom_card_info->pll_read = cail_pll_read;
10071099013bSjsg 	atom_card_info->pll_write = cail_pll_write;
10081099013bSjsg 
10091099013bSjsg 	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
10107ccd5a2cSjsg 	if (!rdev->mode_info.atom_context) {
10117ccd5a2cSjsg 		radeon_atombios_fini(rdev);
10127ccd5a2cSjsg 		return -ENOMEM;
10137ccd5a2cSjsg 	}
10147ccd5a2cSjsg 
1015528273cbSjsg 	rw_init(&rdev->mode_info.atom_context->mutex, "atomcon");
10167ccd5a2cSjsg 	rw_init(&rdev->mode_info.atom_context->scratch_mutex, "atomscr");
1017*33a3edb1Sjsg 	radeon_atom_initialize_bios_scratch_regs(rdev_to_drm(rdev));
10181099013bSjsg 	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
10191099013bSjsg 	return 0;
10201099013bSjsg }
10211099013bSjsg 
10221099013bSjsg /**
10231099013bSjsg  * radeon_atombios_fini - free the driver info and callbacks for atombios
10241099013bSjsg  *
10251099013bSjsg  * @rdev: radeon_device pointer
10261099013bSjsg  *
10271099013bSjsg  * Frees the driver info and register access callbacks for the ATOM
10281099013bSjsg  * interpreter (r4xx+).
10291099013bSjsg  * Called at driver shutdown.
10301099013bSjsg  */
10311099013bSjsg void radeon_atombios_fini(struct radeon_device *rdev)
10321099013bSjsg {
10331099013bSjsg 	if (rdev->mode_info.atom_context) {
1034de5631a0Sjsg 		kfree(rdev->mode_info.atom_context->scratch);
103544428c59Sjsg 		kfree(rdev->mode_info.atom_context->iio);
1036f3eef2b6Sderaadt 	}
10377ccd5a2cSjsg 	kfree(rdev->mode_info.atom_context);
10387ccd5a2cSjsg 	rdev->mode_info.atom_context = NULL;
1039de5631a0Sjsg 	kfree(rdev->mode_info.atom_card_info);
10407ccd5a2cSjsg 	rdev->mode_info.atom_card_info = NULL;
10411099013bSjsg }
10421099013bSjsg 
10431099013bSjsg /* COMBIOS */
10441099013bSjsg /*
10451099013bSjsg  * COMBIOS is the bios format prior to ATOM. It provides
10461099013bSjsg  * command tables similar to ATOM, but doesn't have a unified
10471099013bSjsg  * parser.  See radeon_combios.c
10481099013bSjsg  */
10491099013bSjsg 
10501099013bSjsg /**
10511099013bSjsg  * radeon_combios_init - init the driver info for combios
10521099013bSjsg  *
10531099013bSjsg  * @rdev: radeon_device pointer
10541099013bSjsg  *
10551099013bSjsg  * Initializes the driver info for combios (r1xx-r3xx).
10561099013bSjsg  * Returns 0 on sucess.
10571099013bSjsg  * Called at driver startup.
10581099013bSjsg  */
10591099013bSjsg int radeon_combios_init(struct radeon_device *rdev)
10601099013bSjsg {
1061*33a3edb1Sjsg 	radeon_combios_initialize_bios_scratch_regs(rdev_to_drm(rdev));
10621099013bSjsg 	return 0;
10631099013bSjsg }
10641099013bSjsg 
10651099013bSjsg /**
10661099013bSjsg  * radeon_combios_fini - free the driver info for combios
10671099013bSjsg  *
10681099013bSjsg  * @rdev: radeon_device pointer
10691099013bSjsg  *
10701099013bSjsg  * Frees the driver info for combios (r1xx-r3xx).
10711099013bSjsg  * Called at driver shutdown.
10721099013bSjsg  */
10731099013bSjsg void radeon_combios_fini(struct radeon_device *rdev)
10741099013bSjsg {
10751099013bSjsg }
10761099013bSjsg 
10771099013bSjsg /* if we get transitioned to only one device, take VGA back */
10781099013bSjsg /**
10791099013bSjsg  * radeon_vga_set_decode - enable/disable vga decode
10801099013bSjsg  *
10815ca02815Sjsg  * @pdev: PCI device
10821099013bSjsg  * @state: enable/disable vga decode
10831099013bSjsg  *
10841099013bSjsg  * Enable/disable vga decode (all asics).
10851099013bSjsg  * Returns VGA resource flags.
10861099013bSjsg  */
10875ca02815Sjsg static unsigned int radeon_vga_set_decode(struct pci_dev *pdev, bool state)
10881099013bSjsg {
1089c349dbc7Sjsg 	STUB();
1090c349dbc7Sjsg 	return -ENOSYS;
1091c349dbc7Sjsg #ifdef notyet
10925ca02815Sjsg 	struct drm_device *dev = pci_get_drvdata(pdev);
10935ca02815Sjsg 	struct radeon_device *rdev = dev->dev_private;
10941099013bSjsg 	radeon_vga_set_state(rdev, state);
10951099013bSjsg 	if (state)
10961099013bSjsg 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
10971099013bSjsg 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
10981099013bSjsg 	else
10991099013bSjsg 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
11001099013bSjsg #endif
1101c349dbc7Sjsg }
11021099013bSjsg 
11031099013bSjsg /**
11045ca02815Sjsg  * radeon_gart_size_auto - Determine a sensible default GART size
11055ca02815Sjsg  *                         according to ASIC family.
11067ccd5a2cSjsg  *
11075ca02815Sjsg  * @family: ASIC family name
11087ccd5a2cSjsg  */
11097ccd5a2cSjsg static int radeon_gart_size_auto(enum radeon_family family)
11107ccd5a2cSjsg {
11117ccd5a2cSjsg 	/* default to a larger gart size on newer asics */
11127ccd5a2cSjsg 	if (family >= CHIP_TAHITI)
11137ccd5a2cSjsg 		return 2048;
11147ccd5a2cSjsg 	else if (family >= CHIP_RV770)
11157ccd5a2cSjsg 		return 1024;
11167ccd5a2cSjsg 	else
11177ccd5a2cSjsg 		return 512;
11187ccd5a2cSjsg }
11197ccd5a2cSjsg 
11207ccd5a2cSjsg /**
11211099013bSjsg  * radeon_check_arguments - validate module params
11221099013bSjsg  *
11231099013bSjsg  * @rdev: radeon_device pointer
11241099013bSjsg  *
11251099013bSjsg  * Validates certain module parameters and updates
11261099013bSjsg  * the associated values used by the driver (all asics).
11271099013bSjsg  */
11284b6e5ceaSjsg static void radeon_check_arguments(struct radeon_device *rdev)
11291099013bSjsg {
11301099013bSjsg 	/* vramlimit must be a power of two */
11311bb76ff1Sjsg 	if (radeon_vram_limit != 0 && !is_power_of_2(radeon_vram_limit)) {
11321099013bSjsg 		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
11331099013bSjsg 				radeon_vram_limit);
11341099013bSjsg 		radeon_vram_limit = 0;
11351099013bSjsg 	}
11361099013bSjsg 
11377ccd5a2cSjsg 	if (radeon_gart_size == -1) {
11387ccd5a2cSjsg 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
11397ccd5a2cSjsg 	}
11401099013bSjsg 	/* gtt size must be power of two and greater or equal to 32M */
11411099013bSjsg 	if (radeon_gart_size < 32) {
11427ccd5a2cSjsg 		dev_warn(rdev->dev, "gart size (%d) too small\n",
11431099013bSjsg 				radeon_gart_size);
11447ccd5a2cSjsg 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
11451bb76ff1Sjsg 	} else if (!is_power_of_2(radeon_gart_size)) {
11461099013bSjsg 		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
11471099013bSjsg 				radeon_gart_size);
11487ccd5a2cSjsg 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
11491099013bSjsg 	}
11501099013bSjsg 	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
11511099013bSjsg 
11521099013bSjsg 	/* AGP mode can only be -1, 1, 2, 4, 8 */
11531099013bSjsg 	switch (radeon_agpmode) {
11541099013bSjsg 	case -1:
11551099013bSjsg 	case 0:
11561099013bSjsg 	case 1:
11571099013bSjsg 	case 2:
11581099013bSjsg 	case 4:
11591099013bSjsg 	case 8:
11601099013bSjsg 		break;
11611099013bSjsg 	default:
11621099013bSjsg 		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
11631099013bSjsg 				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
11641099013bSjsg 		radeon_agpmode = 0;
11651099013bSjsg 		break;
11661099013bSjsg 	}
11677ccd5a2cSjsg 
11681bb76ff1Sjsg 	if (!is_power_of_2(radeon_vm_size)) {
11697ccd5a2cSjsg 		dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
11707ccd5a2cSjsg 			 radeon_vm_size);
11717ccd5a2cSjsg 		radeon_vm_size = 4;
11721099013bSjsg 	}
11731099013bSjsg 
11747ccd5a2cSjsg 	if (radeon_vm_size < 1) {
11757f4dd379Sjsg 		dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
11767ccd5a2cSjsg 			 radeon_vm_size);
11777ccd5a2cSjsg 		radeon_vm_size = 4;
11787ccd5a2cSjsg 	}
11797ccd5a2cSjsg 
11807ccd5a2cSjsg 	/*
11817ccd5a2cSjsg 	 * Max GPUVM size for Cayman, SI and CI are 40 bits.
11821099013bSjsg 	 */
11837ccd5a2cSjsg 	if (radeon_vm_size > 1024) {
11847ccd5a2cSjsg 		dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
11857ccd5a2cSjsg 			 radeon_vm_size);
11867ccd5a2cSjsg 		radeon_vm_size = 4;
11871099013bSjsg 	}
11881099013bSjsg 
11897ccd5a2cSjsg 	/* defines number of bits in page table versus page directory,
11907ccd5a2cSjsg 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
11917ccd5a2cSjsg 	 * page table and the remaining bits are in the page directory */
11927ccd5a2cSjsg 	if (radeon_vm_block_size == -1) {
11937ccd5a2cSjsg 
11947ccd5a2cSjsg 		/* Total bits covered by PD + PTs */
11957ccd5a2cSjsg 		unsigned bits = ilog2(radeon_vm_size) + 18;
11967ccd5a2cSjsg 
11977ccd5a2cSjsg 		/* Make sure the PD is 4K in size up to 8GB address space.
11987ccd5a2cSjsg 		   Above that split equal between PD and PTs */
11997ccd5a2cSjsg 		if (radeon_vm_size <= 8)
12007ccd5a2cSjsg 			radeon_vm_block_size = bits - 9;
12017ccd5a2cSjsg 		else
12027ccd5a2cSjsg 			radeon_vm_block_size = (bits + 3) / 2;
12037ccd5a2cSjsg 
12047ccd5a2cSjsg 	} else if (radeon_vm_block_size < 9) {
12057ccd5a2cSjsg 		dev_warn(rdev->dev, "VM page table size (%d) too small\n",
12067ccd5a2cSjsg 			 radeon_vm_block_size);
12077ccd5a2cSjsg 		radeon_vm_block_size = 9;
12087ccd5a2cSjsg 	}
12097ccd5a2cSjsg 
12107ccd5a2cSjsg 	if (radeon_vm_block_size > 24 ||
12117ccd5a2cSjsg 	    (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
12127ccd5a2cSjsg 		dev_warn(rdev->dev, "VM page table size (%d) too large\n",
12137ccd5a2cSjsg 			 radeon_vm_block_size);
12147ccd5a2cSjsg 		radeon_vm_block_size = 9;
12157ccd5a2cSjsg 	}
12161099013bSjsg }
12171099013bSjsg 
12181099013bSjsg /**
12191099013bSjsg  * radeon_switcheroo_set_state - set switcheroo state
12201099013bSjsg  *
12211099013bSjsg  * @pdev: pci dev pointer
12227ccd5a2cSjsg  * @state: vga_switcheroo state
12231099013bSjsg  *
1224f005ef32Sjsg  * Callback for the switcheroo driver.  Suspends or resumes
12251099013bSjsg  * the asics before or after it is powered up using ACPI methods.
12261099013bSjsg  */
12277ccd5a2cSjsg #ifdef notyet
12281099013bSjsg static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
12291099013bSjsg {
12301099013bSjsg 	struct drm_device *dev = pci_get_drvdata(pdev);
12317ccd5a2cSjsg 
12327ccd5a2cSjsg 	if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
12337ccd5a2cSjsg 		return;
12347ccd5a2cSjsg 
12351099013bSjsg 	if (state == VGA_SWITCHEROO_ON) {
12367f4dd379Sjsg 		pr_info("radeon: switched on\n");
12371099013bSjsg 		/* don't suspend or resume card normally */
12381099013bSjsg 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
12391099013bSjsg 
12407ccd5a2cSjsg 		radeon_resume_kms(dev, true, true);
12411099013bSjsg 
12421099013bSjsg 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
12431099013bSjsg 		drm_kms_helper_poll_enable(dev);
12441099013bSjsg 	} else {
12457f4dd379Sjsg 		pr_info("radeon: switched off\n");
12461099013bSjsg 		drm_kms_helper_poll_disable(dev);
12471099013bSjsg 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
12487f4dd379Sjsg 		radeon_suspend_kms(dev, true, true, false);
12491099013bSjsg 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
12501099013bSjsg 	}
12511099013bSjsg }
12521099013bSjsg 
12531099013bSjsg /**
12541099013bSjsg  * radeon_switcheroo_can_switch - see if switcheroo state can change
12551099013bSjsg  *
12561099013bSjsg  * @pdev: pci dev pointer
12571099013bSjsg  *
12581099013bSjsg  * Callback for the switcheroo driver.  Check of the switcheroo
12591099013bSjsg  * state can be changed.
12601099013bSjsg  * Returns true if the state can be changed, false if not.
12611099013bSjsg  */
12621099013bSjsg static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
12631099013bSjsg {
12641099013bSjsg 	struct drm_device *dev = pci_get_drvdata(pdev);
12651099013bSjsg 
12667ccd5a2cSjsg 	/*
12677ccd5a2cSjsg 	 * FIXME: open_count is protected by drm_global_mutex but that would lead to
12687ccd5a2cSjsg 	 * locking inversion with the driver load path. And the access here is
12697ccd5a2cSjsg 	 * completely racy anyway. So don't bother with locking for now.
12707ccd5a2cSjsg 	 */
1271c349dbc7Sjsg 	return atomic_read(&dev->open_count) == 0;
12721099013bSjsg }
12731099013bSjsg 
12741099013bSjsg static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
12751099013bSjsg 	.set_gpu_state = radeon_switcheroo_set_state,
12761099013bSjsg 	.reprobe = NULL,
12771099013bSjsg 	.can_switch = radeon_switcheroo_can_switch,
12781099013bSjsg };
12791099013bSjsg #endif
12801099013bSjsg 
12811099013bSjsg /**
12821099013bSjsg  * radeon_device_init - initialize the driver
12831099013bSjsg  *
12841099013bSjsg  * @rdev: radeon_device pointer
12855ca02815Sjsg  * @ddev: drm dev pointer
12861099013bSjsg  * @pdev: pci dev pointer
12871099013bSjsg  * @flags: driver flags
12881099013bSjsg  *
12891099013bSjsg  * Initializes the driver info and hw (all asics).
12901099013bSjsg  * Returns 0 for success or an error on failure.
12911099013bSjsg  * Called at driver startup.
12921099013bSjsg  */
12931099013bSjsg int radeon_device_init(struct radeon_device *rdev,
12947ccd5a2cSjsg 		       struct drm_device *ddev,
12957ccd5a2cSjsg 		       struct pci_dev *pdev,
12967ccd5a2cSjsg 		       uint32_t flags)
12971099013bSjsg {
12981099013bSjsg 	int r, i;
12991099013bSjsg 	int dma_bits;
13007ccd5a2cSjsg 	bool runtime = false;
13011099013bSjsg 
13021099013bSjsg 	rdev->shutdown = false;
13037ccd5a2cSjsg 	rdev->ddev = ddev;
13047ccd5a2cSjsg 	rdev->pdev = pdev;
13057ccd5a2cSjsg 	rdev->flags = flags;
13067ccd5a2cSjsg 	rdev->family = flags & RADEON_FAMILY_MASK;
13071099013bSjsg 	rdev->is_atom_bios = false;
13081099013bSjsg 	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
13097ccd5a2cSjsg 	rdev->mc.gtt_size = 512 * 1024 * 1024;
13101099013bSjsg 	rdev->accel_working = false;
13111099013bSjsg 	/* set up ring ids */
13121099013bSjsg 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
13131099013bSjsg 		rdev->ring[i].idx = i;
13141099013bSjsg 	}
13157f4dd379Sjsg 	rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
13161099013bSjsg 
13178a4fa980Sjsg 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1318b830ca6dSjsg 		 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
13197f4dd379Sjsg 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
13208a4fa980Sjsg 	printf("%s: %s\n", rdev->self.dv_xname, radeon_family_name[rdev->family]);
13211099013bSjsg 
1322528273cbSjsg 	/* mutex initialization are all done here so we
13231099013bSjsg 	 * can recall function without having locking issues */
13241099013bSjsg 	rw_init(&rdev->ring_lock, "ring");
1325528273cbSjsg 	rw_init(&rdev->dc_hw_i2c_mutex, "dciic");
13261099013bSjsg 	atomic_set(&rdev->ih.lock, 0);
1327528273cbSjsg 	rw_init(&rdev->gem.mutex, "gem");
1328528273cbSjsg 	rw_init(&rdev->pm.mutex, "pm");
1329528273cbSjsg 	rw_init(&rdev->gpu_clock_mutex, "gpuclk");
13307ccd5a2cSjsg 	rw_init(&rdev->srbm_mutex, "srbm");
1331f005ef32Sjsg 	rw_init(&rdev->audio.component_mutex, "racm");
13321099013bSjsg 	rw_init(&rdev->pm.mclk_lock, "mclk");
13331099013bSjsg 	rw_init(&rdev->exclusive_lock, "rdnexc");
13341099013bSjsg 	init_waitqueue_head(&rdev->irq.vblank_queue);
13351099013bSjsg 	r = radeon_gem_init(rdev);
13361099013bSjsg 	if (r)
13371099013bSjsg 		return r;
13387ccd5a2cSjsg 
13397ccd5a2cSjsg 	radeon_check_arguments(rdev);
13401099013bSjsg 	/* Adjust VM size here.
13417ccd5a2cSjsg 	 * Max GPUVM size for cayman+ is 40 bits.
13421099013bSjsg 	 */
13437ccd5a2cSjsg 	rdev->vm_manager.max_pfn = radeon_vm_size << 18;
13441099013bSjsg 
13451099013bSjsg 	/* Set asic functions */
13461099013bSjsg 	r = radeon_asic_init(rdev);
13471099013bSjsg 	if (r)
13481099013bSjsg 		return r;
13491099013bSjsg 
13501099013bSjsg 	/* all of the newer IGP chips have an internal gart
13511099013bSjsg 	 * However some rs4xx report as AGP, so remove that here.
13521099013bSjsg 	 */
13531099013bSjsg 	if ((rdev->family >= CHIP_RS400) &&
13541099013bSjsg 	    (rdev->flags & RADEON_IS_IGP)) {
13551099013bSjsg 		rdev->flags &= ~RADEON_IS_AGP;
13561099013bSjsg 	}
13571099013bSjsg 
13581099013bSjsg 	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
13591099013bSjsg 		radeon_agp_disable(rdev);
13601099013bSjsg 	}
13611099013bSjsg 
13627ccd5a2cSjsg 	/* Set the internal MC address mask
13637ccd5a2cSjsg 	 * This is the max address of the GPU's
13647ccd5a2cSjsg 	 * internal address space.
13657ccd5a2cSjsg 	 */
13667ccd5a2cSjsg 	if (rdev->family >= CHIP_CAYMAN)
13677ccd5a2cSjsg 		rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
13687ccd5a2cSjsg 	else if (rdev->family >= CHIP_CEDAR)
13697ccd5a2cSjsg 		rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
13707ccd5a2cSjsg 	else
13717ccd5a2cSjsg 		rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
13727ccd5a2cSjsg 
1373c349dbc7Sjsg 	/* set DMA mask.
13741099013bSjsg 	 * PCIE - can handle 40-bits.
13751099013bSjsg 	 * IGP - can handle 40-bits
13761099013bSjsg 	 * AGP - generally dma32 is safest
13771099013bSjsg 	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
13781099013bSjsg 	 */
1379c349dbc7Sjsg 	dma_bits = 40;
13801099013bSjsg 	if (rdev->flags & RADEON_IS_AGP)
1381c349dbc7Sjsg 		dma_bits = 32;
13821099013bSjsg 	if ((rdev->flags & RADEON_IS_PCI) &&
13831099013bSjsg 	    (rdev->family <= CHIP_RS740))
1384c349dbc7Sjsg 		dma_bits = 32;
13857f4dd379Sjsg #ifdef CONFIG_PPC64
13867f4dd379Sjsg 	if (rdev->family == CHIP_CEDAR)
1387c349dbc7Sjsg 		dma_bits = 32;
13887f4dd379Sjsg #endif
13891099013bSjsg 
1390c349dbc7Sjsg 	r = dma_set_mask_and_coherent(&rdev->pdev->dev, DMA_BIT_MASK(dma_bits));
13911099013bSjsg 	if (r) {
13927f4dd379Sjsg 		pr_warn("radeon: No suitable DMA available\n");
1393c349dbc7Sjsg 		return r;
13941099013bSjsg 	}
1395c349dbc7Sjsg 	rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
13961099013bSjsg 
13971099013bSjsg 	/* Registers mapping */
13981099013bSjsg 	/* TODO: block userspace mapping of io register */
13991099013bSjsg 	mtx_init(&rdev->mmio_idx_lock, IPL_TTY);
14007ccd5a2cSjsg 	mtx_init(&rdev->smc_idx_lock, IPL_TTY);
14017ccd5a2cSjsg 	mtx_init(&rdev->pll_idx_lock, IPL_TTY);
14027ccd5a2cSjsg 	mtx_init(&rdev->mc_idx_lock, IPL_TTY);
14037ccd5a2cSjsg 	mtx_init(&rdev->pcie_idx_lock, IPL_TTY);
14047ccd5a2cSjsg 	mtx_init(&rdev->pciep_idx_lock, IPL_TTY);
14057ccd5a2cSjsg 	mtx_init(&rdev->pif_idx_lock, IPL_TTY);
14067ccd5a2cSjsg 	mtx_init(&rdev->cg_idx_lock, IPL_TTY);
14077ccd5a2cSjsg 	mtx_init(&rdev->uvd_idx_lock, IPL_TTY);
14087ccd5a2cSjsg 	mtx_init(&rdev->rcu_idx_lock, IPL_TTY);
14097ccd5a2cSjsg 	mtx_init(&rdev->didt_idx_lock, IPL_TTY);
14107ccd5a2cSjsg 	mtx_init(&rdev->end_idx_lock, IPL_TTY);
14117ccd5a2cSjsg #ifdef __linux__
14127ccd5a2cSjsg 	if (rdev->family >= CHIP_BONAIRE) {
14137ccd5a2cSjsg 		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
14147ccd5a2cSjsg 		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
14157ccd5a2cSjsg 	} else {
14161099013bSjsg 		rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
14171099013bSjsg 		rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
14187ccd5a2cSjsg 	}
14191099013bSjsg 	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
14207f4dd379Sjsg 	if (rdev->rmmio == NULL)
14211099013bSjsg 		return -ENOMEM;
14227ccd5a2cSjsg #endif
14231099013bSjsg 
14247ccd5a2cSjsg 	/* doorbell bar mapping */
14257ccd5a2cSjsg 	if (rdev->family >= CHIP_BONAIRE)
14267ccd5a2cSjsg 		radeon_doorbell_init(rdev);
14277ccd5a2cSjsg 
14281099013bSjsg 	/* io port mapping */
14290fc63394Sjsg #ifdef __linux__
14301099013bSjsg 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
14311099013bSjsg 		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
14321099013bSjsg 			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
14331099013bSjsg 			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
14341099013bSjsg 			break;
14351099013bSjsg 		}
14361099013bSjsg 	}
14371099013bSjsg 	if (rdev->rio_mem == NULL)
14381099013bSjsg 		DRM_ERROR("Unable to find PCI I/O BAR\n");
14397ccd5a2cSjsg #endif
14407ccd5a2cSjsg 
14417ccd5a2cSjsg 	if (rdev->flags & RADEON_IS_PX)
14427ccd5a2cSjsg 		radeon_device_handle_px_quirks(rdev);
14431099013bSjsg 
14441099013bSjsg 	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
14451099013bSjsg 	/* this will fail for cards that aren't VGA class devices, just
14461099013bSjsg 	 * ignore it */
14475ca02815Sjsg 	vga_client_register(rdev->pdev, radeon_vga_set_decode);
14487ccd5a2cSjsg 
14497ccd5a2cSjsg 	if (rdev->flags & RADEON_IS_PX)
14507ccd5a2cSjsg 		runtime = true;
14517ccd5a2cSjsg #ifdef notyet
14527f4dd379Sjsg 	if (!pci_is_thunderbolt_attached(rdev->pdev))
14537f4dd379Sjsg 		vga_switcheroo_register_client(rdev->pdev,
14547f4dd379Sjsg 					       &radeon_switcheroo_ops, runtime);
1455c349dbc7Sjsg #endif
14567ccd5a2cSjsg 	if (runtime)
14577ccd5a2cSjsg 		vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
14581099013bSjsg 
14591099013bSjsg 	r = radeon_init(rdev);
14601099013bSjsg 	if (r)
14617ccd5a2cSjsg 		goto failed;
14621099013bSjsg 
14635ca02815Sjsg 	radeon_gem_debugfs_init(rdev);
14641099013bSjsg 
14651099013bSjsg 	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
14661099013bSjsg 		/* Acceleration not working on AGP card try again
14671099013bSjsg 		 * with fallback to PCI or PCIE GART
14681099013bSjsg 		 */
14691099013bSjsg 		radeon_asic_reset(rdev);
14701099013bSjsg 		radeon_fini(rdev);
14711099013bSjsg 		radeon_agp_disable(rdev);
14721099013bSjsg 		r = radeon_init(rdev);
14731099013bSjsg 		if (r)
14747ccd5a2cSjsg 			goto failed;
14751099013bSjsg 	}
14767ccd5a2cSjsg 
1477f005ef32Sjsg 	radeon_audio_component_init(rdev);
1478f005ef32Sjsg 
14797ccd5a2cSjsg 	r = radeon_ib_ring_tests(rdev);
14807ccd5a2cSjsg 	if (r)
14817ccd5a2cSjsg 		DRM_ERROR("ib ring test failed (%d).\n", r);
14827ccd5a2cSjsg 
14837ccd5a2cSjsg 	/*
14847ccd5a2cSjsg 	 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
14857ccd5a2cSjsg 	 * after the CP ring have chew one packet at least. Hence here we stop
14867ccd5a2cSjsg 	 * and restart DPM after the radeon_ib_ring_tests().
14877ccd5a2cSjsg 	 */
14887ccd5a2cSjsg 	if (rdev->pm.dpm_enabled &&
14897ccd5a2cSjsg 	    (rdev->pm.pm_method == PM_METHOD_DPM) &&
14907ccd5a2cSjsg 	    (rdev->family == CHIP_TURKS) &&
14917ccd5a2cSjsg 	    (rdev->flags & RADEON_IS_MOBILITY)) {
14927ccd5a2cSjsg 		mutex_lock(&rdev->pm.mutex);
14937ccd5a2cSjsg 		radeon_dpm_disable(rdev);
14947ccd5a2cSjsg 		radeon_dpm_enable(rdev);
14957ccd5a2cSjsg 		mutex_unlock(&rdev->pm.mutex);
14967ccd5a2cSjsg 	}
14977ccd5a2cSjsg 
14981099013bSjsg 	if ((radeon_testing & 1)) {
1499c069cd2bSjsg 		if (rdev->accel_working)
15001099013bSjsg 			radeon_test_moves(rdev);
1501c069cd2bSjsg 		else
1502c069cd2bSjsg 			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
15031099013bSjsg 	}
15041099013bSjsg 	if ((radeon_testing & 2)) {
1505c069cd2bSjsg 		if (rdev->accel_working)
15061099013bSjsg 			radeon_test_syncing(rdev);
1507c069cd2bSjsg 		else
1508c069cd2bSjsg 			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
15091099013bSjsg 	}
15101099013bSjsg 	if (radeon_benchmarking) {
1511c069cd2bSjsg 		if (rdev->accel_working)
15121099013bSjsg 			radeon_benchmark(rdev, radeon_benchmarking);
1513c069cd2bSjsg 		else
1514c069cd2bSjsg 			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
15151099013bSjsg 	}
15161099013bSjsg 	return 0;
15177ccd5a2cSjsg 
15187ccd5a2cSjsg failed:
15197f4dd379Sjsg 	/* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
15207f4dd379Sjsg 	if (radeon_is_px(ddev))
15217f4dd379Sjsg 		pm_runtime_put_noidle(ddev->dev);
15227ccd5a2cSjsg 	if (runtime)
15237ccd5a2cSjsg 		vga_switcheroo_fini_domain_pm_ops(rdev->dev);
15247ccd5a2cSjsg 	return r;
15251099013bSjsg }
15261099013bSjsg 
15271099013bSjsg /**
15281099013bSjsg  * radeon_device_fini - tear down the driver
15291099013bSjsg  *
15301099013bSjsg  * @rdev: radeon_device pointer
15311099013bSjsg  *
15321099013bSjsg  * Tear down the driver info (all asics).
15331099013bSjsg  * Called at driver shutdown.
15341099013bSjsg  */
15351099013bSjsg void radeon_device_fini(struct radeon_device *rdev)
15361099013bSjsg {
15371099013bSjsg 	DRM_INFO("radeon: finishing device.\n");
15381099013bSjsg 	rdev->shutdown = true;
15391099013bSjsg 	/* evict vram memory */
15401099013bSjsg 	radeon_bo_evict_vram(rdev);
1541f005ef32Sjsg 	radeon_audio_component_fini(rdev);
15421099013bSjsg 	radeon_fini(rdev);
15437f4dd379Sjsg 	if (!pci_is_thunderbolt_attached(rdev->pdev))
15441099013bSjsg 		vga_switcheroo_unregister_client(rdev->pdev);
15457ccd5a2cSjsg 	if (rdev->flags & RADEON_IS_PX)
15467ccd5a2cSjsg 		vga_switcheroo_fini_domain_pm_ops(rdev->dev);
15475ca02815Sjsg 	vga_client_unregister(rdev->pdev);
15487ccd5a2cSjsg #ifdef __linux__
15491099013bSjsg 	if (rdev->rio_mem)
15501099013bSjsg 		pci_iounmap(rdev->pdev, rdev->rio_mem);
15511099013bSjsg 	rdev->rio_mem = NULL;
15521099013bSjsg 	iounmap(rdev->rmmio);
15537ccd5a2cSjsg #else
15547ccd5a2cSjsg 	if (rdev->rio_mem_size > 0)
15557ccd5a2cSjsg 		bus_space_unmap(rdev->iot, rdev->rio_mem, rdev->rio_mem_size);
15567ccd5a2cSjsg 	rdev->rio_mem_size = 0;
15577ccd5a2cSjsg 
15587ccd5a2cSjsg 	if (rdev->rmmio_size > 0)
15597ccd5a2cSjsg 		bus_space_unmap(rdev->memt, rdev->rmmio_bsh, rdev->rmmio_size);
15607ccd5a2cSjsg 	rdev->rmmio_size = 0;
1561f3eef2b6Sderaadt #endif
1562d6bc221bSkettenis 	rdev->rmmio = NULL;
15637ccd5a2cSjsg 	if (rdev->family >= CHIP_BONAIRE)
15647ccd5a2cSjsg 		radeon_doorbell_fini(rdev);
15651099013bSjsg }
15661099013bSjsg 
15677ccd5a2cSjsg 
15681099013bSjsg /*
15691099013bSjsg  * Suspend & resume.
15701099013bSjsg  */
15715ca02815Sjsg /*
15721099013bSjsg  * radeon_suspend_kms - initiate device suspend
15731099013bSjsg  *
15741099013bSjsg  * Puts the hw in the suspend state (all asics).
15751099013bSjsg  * Returns 0 for success or an error on failure.
15761099013bSjsg  * Called at driver suspend.
15771099013bSjsg  */
15787f4dd379Sjsg int radeon_suspend_kms(struct drm_device *dev, bool suspend,
15797f4dd379Sjsg 		       bool fbcon, bool freeze)
15801099013bSjsg {
15811099013bSjsg 	struct radeon_device *rdev;
15825ca02815Sjsg 	struct pci_dev *pdev;
15831099013bSjsg 	struct drm_crtc *crtc;
15841099013bSjsg 	struct drm_connector *connector;
15851099013bSjsg 	int i, r;
15861099013bSjsg 
15871099013bSjsg 	if (dev == NULL || dev->dev_private == NULL) {
15881099013bSjsg 		return -ENODEV;
15891099013bSjsg 	}
15907ccd5a2cSjsg 
15911099013bSjsg 	rdev = dev->dev_private;
15925ca02815Sjsg #ifdef __linux__
15935ca02815Sjsg 	pdev = to_pci_dev(dev->dev);
15945ca02815Sjsg #else
15955ca02815Sjsg 	pdev = dev->pdev;
15965ca02815Sjsg #endif
15975ca02815Sjsg 
1598aebd09ecSderaadt 	if (rdev->shutdown)
1599aebd09ecSderaadt 		return 0;
16001099013bSjsg 
16011099013bSjsg #ifdef notyet
16021099013bSjsg 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
16031099013bSjsg 		return 0;
16041099013bSjsg #endif
16051099013bSjsg 
16061099013bSjsg 	drm_kms_helper_poll_disable(dev);
16071099013bSjsg 
16087ccd5a2cSjsg 	drm_modeset_lock_all(dev);
16091099013bSjsg 	/* turn off display hw */
16101099013bSjsg 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
16111099013bSjsg 		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
16121099013bSjsg 	}
16137ccd5a2cSjsg 	drm_modeset_unlock_all(dev);
16141099013bSjsg 
16157ccd5a2cSjsg 	/* unpin the front buffers and cursors */
16161099013bSjsg 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
16177ccd5a2cSjsg 		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
16187f4dd379Sjsg 		struct drm_framebuffer *fb = crtc->primary->fb;
16191099013bSjsg 		struct radeon_bo *robj;
16201099013bSjsg 
16217ccd5a2cSjsg 		if (radeon_crtc->cursor_bo) {
16227ccd5a2cSjsg 			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
16237ccd5a2cSjsg 			r = radeon_bo_reserve(robj, false);
16247ccd5a2cSjsg 			if (r == 0) {
16257ccd5a2cSjsg 				radeon_bo_unpin(robj);
16267ccd5a2cSjsg 				radeon_bo_unreserve(robj);
16277ccd5a2cSjsg 			}
16287ccd5a2cSjsg 		}
16297ccd5a2cSjsg 
16307f4dd379Sjsg 		if (fb == NULL || fb->obj[0] == NULL) {
16311099013bSjsg 			continue;
16321099013bSjsg 		}
16337f4dd379Sjsg 		robj = gem_to_radeon_bo(fb->obj[0]);
16341099013bSjsg 		/* don't unpin kernel fb objects */
16351099013bSjsg 		if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
16361099013bSjsg 			r = radeon_bo_reserve(robj, false);
16371099013bSjsg 			if (r == 0) {
16381099013bSjsg 				radeon_bo_unpin(robj);
16391099013bSjsg 				radeon_bo_unreserve(robj);
16401099013bSjsg 			}
16411099013bSjsg 		}
16421099013bSjsg 	}
16431099013bSjsg 	/* evict vram memory */
16441099013bSjsg 	radeon_bo_evict_vram(rdev);
16451099013bSjsg 
16461099013bSjsg 	/* wait for gpu to finish processing current batch */
16471099013bSjsg 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
16487ccd5a2cSjsg 		r = radeon_fence_wait_empty(rdev, i);
16491099013bSjsg 		if (r) {
16501099013bSjsg 			/* delay GPU reset to resume */
16517ccd5a2cSjsg 			radeon_fence_driver_force_completion(rdev, i);
1652c9bdf9e2Sjsg 		} else {
1653c9bdf9e2Sjsg 			/* finish executing delayed work */
1654c9bdf9e2Sjsg 			flush_delayed_work(&rdev->fence_drv[i].lockup_work);
16551099013bSjsg 		}
16561099013bSjsg 	}
16571099013bSjsg 
16581099013bSjsg 	radeon_save_bios_scratch_regs(rdev);
16591099013bSjsg 
16601099013bSjsg 	radeon_suspend(rdev);
16611099013bSjsg 	radeon_hpd_fini(rdev);
16627f4dd379Sjsg 	/* evict remaining vram memory
16637f4dd379Sjsg 	 * This second call to evict vram is to evict the gart page table
16647f4dd379Sjsg 	 * using the CPU.
16657f4dd379Sjsg 	 */
16661099013bSjsg 	radeon_bo_evict_vram(rdev);
16671099013bSjsg 
16681099013bSjsg 	radeon_agp_suspend(rdev);
16691099013bSjsg 
16705ca02815Sjsg 	pci_save_state(pdev);
16717f4dd379Sjsg 	if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
16727f4dd379Sjsg 		rdev->asic->asic_reset(rdev, true);
16735ca02815Sjsg 		pci_restore_state(pdev);
16747f4dd379Sjsg 	} else if (suspend) {
16751099013bSjsg 		/* Shut down the device */
16765ca02815Sjsg 		pci_disable_device(pdev);
16775ca02815Sjsg 		pci_set_power_state(pdev, PCI_D3hot);
16781099013bSjsg 	}
16797ccd5a2cSjsg 
16807ccd5a2cSjsg 	if (fbcon) {
16811099013bSjsg 		console_lock();
16821099013bSjsg 		radeon_fbdev_set_suspend(rdev, 1);
16831099013bSjsg 		console_unlock();
16847ccd5a2cSjsg 	}
16851099013bSjsg 	return 0;
16861099013bSjsg }
16871099013bSjsg 
16885ca02815Sjsg /*
16891099013bSjsg  * radeon_resume_kms - initiate device resume
16901099013bSjsg  *
16911099013bSjsg  * Bring the hw back to operating state (all asics).
16921099013bSjsg  * Returns 0 for success or an error on failure.
16931099013bSjsg  * Called at driver resume.
16941099013bSjsg  */
16957ccd5a2cSjsg int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
16961099013bSjsg {
16971099013bSjsg 	struct drm_connector *connector;
16981099013bSjsg 	struct radeon_device *rdev = dev->dev_private;
16995ca02815Sjsg #ifdef __linux__
17005ca02815Sjsg 	struct pci_dev *pdev = to_pci_dev(dev->dev);
17015ca02815Sjsg #else
17025ca02815Sjsg 	struct pci_dev *pdev = dev->pdev;
17035ca02815Sjsg #endif
17047ccd5a2cSjsg 	struct drm_crtc *crtc;
17051099013bSjsg 	int r;
17061099013bSjsg 
17071099013bSjsg #ifdef notyet
17081099013bSjsg 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
17091099013bSjsg 		return 0;
17101099013bSjsg #endif
17111099013bSjsg 
17127ccd5a2cSjsg 	if (fbcon) {
17131099013bSjsg 		console_lock();
17147ccd5a2cSjsg 	}
17157ccd5a2cSjsg 	if (resume) {
17165ca02815Sjsg 		pci_set_power_state(pdev, PCI_D0);
17175ca02815Sjsg 		pci_restore_state(pdev);
17185ca02815Sjsg 		if (pci_enable_device(pdev)) {
17197ccd5a2cSjsg 			if (fbcon)
17201099013bSjsg 				console_unlock();
17211099013bSjsg 			return -1;
17221099013bSjsg 		}
17237ccd5a2cSjsg 	}
17241099013bSjsg 	/* resume AGP if in use */
17251099013bSjsg 	radeon_agp_resume(rdev);
17261099013bSjsg 	radeon_resume(rdev);
17271099013bSjsg 
17281099013bSjsg 	r = radeon_ib_ring_tests(rdev);
17291099013bSjsg 	if (r)
17301099013bSjsg 		DRM_ERROR("ib ring test failed (%d).\n", r);
17311099013bSjsg 
17327ccd5a2cSjsg 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
17337ccd5a2cSjsg 		/* do dpm late init */
17347ccd5a2cSjsg 		r = radeon_pm_late_init(rdev);
17357ccd5a2cSjsg 		if (r) {
17367ccd5a2cSjsg 			rdev->pm.dpm_enabled = false;
17377ccd5a2cSjsg 			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
17387ccd5a2cSjsg 		}
17397ccd5a2cSjsg 	} else {
17407ccd5a2cSjsg 		/* resume old pm late */
17411099013bSjsg 		radeon_pm_resume(rdev);
17427ccd5a2cSjsg 	}
17437ccd5a2cSjsg 
17441099013bSjsg 	radeon_restore_bios_scratch_regs(rdev);
17451099013bSjsg 
17467ccd5a2cSjsg 	/* pin cursors */
17477ccd5a2cSjsg 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
17487ccd5a2cSjsg 		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
17497ccd5a2cSjsg 
17507ccd5a2cSjsg 		if (radeon_crtc->cursor_bo) {
17517ccd5a2cSjsg 			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
17527ccd5a2cSjsg 			r = radeon_bo_reserve(robj, false);
17537ccd5a2cSjsg 			if (r == 0) {
17547ccd5a2cSjsg 				/* Only 27 bit offset for legacy cursor */
17557ccd5a2cSjsg 				r = radeon_bo_pin_restricted(robj,
17567ccd5a2cSjsg 							     RADEON_GEM_DOMAIN_VRAM,
17577ccd5a2cSjsg 							     ASIC_IS_AVIVO(rdev) ?
17587ccd5a2cSjsg 							     0 : 1 << 27,
17597ccd5a2cSjsg 							     &radeon_crtc->cursor_addr);
17607ccd5a2cSjsg 				if (r != 0)
17617ccd5a2cSjsg 					DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
17627ccd5a2cSjsg 				radeon_bo_unreserve(robj);
17637ccd5a2cSjsg 			}
17647ccd5a2cSjsg 		}
17657ccd5a2cSjsg 	}
17661099013bSjsg 
17671099013bSjsg 	/* init dig PHYs, disp eng pll */
17681099013bSjsg 	if (rdev->is_atom_bios) {
17691099013bSjsg 		radeon_atom_encoder_init(rdev);
17701099013bSjsg 		radeon_atom_disp_eng_pll_init(rdev);
17711099013bSjsg 		/* turn on the BL */
17721099013bSjsg 		if (rdev->mode_info.bl_encoder) {
17731099013bSjsg 			u8 bl_level = radeon_get_backlight_level(rdev,
17741099013bSjsg 								 rdev->mode_info.bl_encoder);
17751099013bSjsg 			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
17761099013bSjsg 						   bl_level);
17771099013bSjsg 		}
17781099013bSjsg 	}
17791099013bSjsg 	/* reset hpd state */
17801099013bSjsg 	radeon_hpd_init(rdev);
17811099013bSjsg 	/* blat the mode back in */
17827ccd5a2cSjsg 	if (fbcon) {
17831099013bSjsg 		drm_helper_resume_force_mode(dev);
17841099013bSjsg 		/* turn on display hw */
17857ccd5a2cSjsg 		drm_modeset_lock_all(dev);
17861099013bSjsg 		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
17871099013bSjsg 			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
17881099013bSjsg 		}
17897ccd5a2cSjsg 		drm_modeset_unlock_all(dev);
17907ccd5a2cSjsg 	}
17911099013bSjsg 
17921099013bSjsg 	drm_kms_helper_poll_enable(dev);
17937ccd5a2cSjsg 
17947ccd5a2cSjsg 	/* set the power state here in case we are a PX system or headless */
17957ccd5a2cSjsg 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
17967ccd5a2cSjsg 		radeon_pm_compute_clocks(rdev);
17977ccd5a2cSjsg 
17987ccd5a2cSjsg 	if (fbcon) {
17997ccd5a2cSjsg 		radeon_fbdev_set_suspend(rdev, 0);
18007ccd5a2cSjsg 		console_unlock();
18017ccd5a2cSjsg 	}
18027ccd5a2cSjsg 
18031099013bSjsg 	return 0;
18041099013bSjsg }
18051099013bSjsg 
18061099013bSjsg /**
18071099013bSjsg  * radeon_gpu_reset - reset the asic
18081099013bSjsg  *
18091099013bSjsg  * @rdev: radeon device pointer
18101099013bSjsg  *
18111099013bSjsg  * Attempt the reset the GPU if it has hung (all asics).
18121099013bSjsg  * Returns 0 for success or an error on failure.
18131099013bSjsg  */
18141099013bSjsg int radeon_gpu_reset(struct radeon_device *rdev)
18151099013bSjsg {
18161099013bSjsg 	unsigned ring_sizes[RADEON_NUM_RINGS];
18171099013bSjsg 	uint32_t *ring_data[RADEON_NUM_RINGS];
18181099013bSjsg 
18191099013bSjsg 	bool saved = false;
18201099013bSjsg 
18211099013bSjsg 	int i, r;
18221099013bSjsg 
1823528273cbSjsg 	down_write(&rdev->exclusive_lock);
18247ccd5a2cSjsg 
18257ccd5a2cSjsg 	if (!rdev->needs_reset) {
18267ccd5a2cSjsg 		up_write(&rdev->exclusive_lock);
18277ccd5a2cSjsg 		return 0;
18287ccd5a2cSjsg 	}
18297ccd5a2cSjsg 
18307ccd5a2cSjsg 	atomic_inc(&rdev->gpu_reset_counter);
18317ccd5a2cSjsg 
18321099013bSjsg 	radeon_save_bios_scratch_regs(rdev);
18331099013bSjsg 	radeon_suspend(rdev);
18347ccd5a2cSjsg 	radeon_hpd_fini(rdev);
18351099013bSjsg 
18361099013bSjsg 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
18371099013bSjsg 		ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
18381099013bSjsg 						   &ring_data[i]);
18391099013bSjsg 		if (ring_sizes[i]) {
18401099013bSjsg 			saved = true;
18411099013bSjsg 			dev_info(rdev->dev, "Saved %d dwords of commands "
18421099013bSjsg 				 "on ring %d.\n", ring_sizes[i], i);
18431099013bSjsg 		}
18441099013bSjsg 	}
18451099013bSjsg 
18461099013bSjsg 	r = radeon_asic_reset(rdev);
18471099013bSjsg 	if (!r) {
18481099013bSjsg 		dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
18491099013bSjsg 		radeon_resume(rdev);
18501099013bSjsg 	}
18511099013bSjsg 
18521099013bSjsg 	radeon_restore_bios_scratch_regs(rdev);
18531099013bSjsg 
18541099013bSjsg 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
18557ccd5a2cSjsg 		if (!r && ring_data[i]) {
18561099013bSjsg 			radeon_ring_restore(rdev, &rdev->ring[i],
18571099013bSjsg 					    ring_sizes[i], ring_data[i]);
18581099013bSjsg 		} else {
18597ccd5a2cSjsg 			radeon_fence_driver_force_completion(rdev, i);
1860de5631a0Sjsg 			kfree(ring_data[i]);
18611099013bSjsg 		}
18621099013bSjsg 	}
18631099013bSjsg 
18647ccd5a2cSjsg 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
18657ccd5a2cSjsg 		/* do dpm late init */
18667ccd5a2cSjsg 		r = radeon_pm_late_init(rdev);
18677ccd5a2cSjsg 		if (r) {
18687ccd5a2cSjsg 			rdev->pm.dpm_enabled = false;
18697ccd5a2cSjsg 			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
18707ccd5a2cSjsg 		}
18717ccd5a2cSjsg 	} else {
18727ccd5a2cSjsg 		/* resume old pm late */
18737ccd5a2cSjsg 		radeon_pm_resume(rdev);
18747ccd5a2cSjsg 	}
18757ccd5a2cSjsg 
18767ccd5a2cSjsg 	/* init dig PHYs, disp eng pll */
18777ccd5a2cSjsg 	if (rdev->is_atom_bios) {
18787ccd5a2cSjsg 		radeon_atom_encoder_init(rdev);
18797ccd5a2cSjsg 		radeon_atom_disp_eng_pll_init(rdev);
18807ccd5a2cSjsg 		/* turn on the BL */
18817ccd5a2cSjsg 		if (rdev->mode_info.bl_encoder) {
18827ccd5a2cSjsg 			u8 bl_level = radeon_get_backlight_level(rdev,
18837ccd5a2cSjsg 								 rdev->mode_info.bl_encoder);
18847ccd5a2cSjsg 			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
18857ccd5a2cSjsg 						   bl_level);
18867ccd5a2cSjsg 		}
18877ccd5a2cSjsg 	}
18887ccd5a2cSjsg 	/* reset hpd state */
18897ccd5a2cSjsg 	radeon_hpd_init(rdev);
189039214a00Sderaadt 
18917ccd5a2cSjsg 	rdev->in_reset = true;
18927ccd5a2cSjsg 	rdev->needs_reset = false;
18937ccd5a2cSjsg 
18947ccd5a2cSjsg 	downgrade_write(&rdev->exclusive_lock);
18957ccd5a2cSjsg 
1896*33a3edb1Sjsg 	drm_helper_resume_force_mode(rdev_to_drm(rdev));
18977ccd5a2cSjsg 
18987ccd5a2cSjsg 	/* set the power state here in case we are a PX system or headless */
18997ccd5a2cSjsg 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
19007ccd5a2cSjsg 		radeon_pm_compute_clocks(rdev);
19017ccd5a2cSjsg 
19027ccd5a2cSjsg 	if (!r) {
19037ccd5a2cSjsg 		r = radeon_ib_ring_tests(rdev);
19047ccd5a2cSjsg 		if (r && saved)
19057ccd5a2cSjsg 			r = -EAGAIN;
19067ccd5a2cSjsg 	} else {
19071099013bSjsg 		/* bad news, how to tell it to userspace ? */
19081099013bSjsg 		dev_info(rdev->dev, "GPU reset failed\n");
19091099013bSjsg 	}
19101099013bSjsg 
19117ccd5a2cSjsg 	rdev->needs_reset = r == -EAGAIN;
19127ccd5a2cSjsg 	rdev->in_reset = false;
19137ccd5a2cSjsg 
19147ccd5a2cSjsg 	up_read(&rdev->exclusive_lock);
19151099013bSjsg 	return r;
19161099013bSjsg }
1917