xref: /netbsd-src/sys/arch/arm/marvell/dove.c (revision 8d5eabfe33cb040043ebfef8fc61cb9577bf67f6)
1 /*	$NetBSD: dove.c,v 1.3 2021/09/30 10:19:52 skrll Exp $	*/
2 /*
3  * Copyright (c) 2016 KIYOHARA Takashi
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: dove.c,v 1.3 2021/09/30 10:19:52 skrll Exp $");
30 
31 #define _INTR_PRIVATE
32 
33 #include "mvsocgpp.h"
34 #include "mvsocpmu.h"
35 
36 #include <sys/param.h>
37 #include <sys/bus.h>
38 #include <sys/device.h>
39 #include <sys/errno.h>
40 
41 #include <machine/intr.h>
42 
43 #include <arm/cpufunc.h>
44 #include <arm/pic/picvar.h>
45 #include <arm/pic/picvar.h>
46 
47 #include <arm/marvell/mvsocreg.h>
48 #include <arm/marvell/mvsocvar.h>
49 #include <arm/marvell/mvsocpmuvar.h>
50 #include <arm/marvell/dovereg.h>
51 
52 #include <dev/marvell/marvellreg.h>
53 
54 
55 #define read_dbreg	read_mlmbreg
56 #define write_dbreg	write_mlmbreg
57 #if NMVSOCPMU > 0
58 #define READ_PMUREG(sc, o)	\
59 		bus_space_read_4((sc)->sc_iot, (sc)->sc_pmch, (o))
60 #define WRITE_PMUREG(sc, o, v)	\
61 		bus_space_write_4((sc)->sc_iot, (sc)->sc_pmch, (o), (v))
62 #else
63 vaddr_t pmu_base = -1;
64 #define READ_PMUREG(sc, o)	le32toh(*(volatile uint32_t *)(pmu_base + (o)))
65 #endif
66 
67 static void dove_intr_init(void);
68 
69 static void dove_pic_unblock_irqs(struct pic_softc *, size_t, uint32_t);
70 static void dove_pic_block_irqs(struct pic_softc *, size_t, uint32_t);
71 static void dove_pic_establish_irq(struct pic_softc *, struct intrsource *);
72 static void dove_pic_source_name(struct pic_softc *, int, char *, size_t);
73 
74 static int dove_find_pending_irqs(void);
75 
76 static void dove_getclks(bus_addr_t);
77 static int dove_clkgating(struct marvell_attach_args *);
78 
79 #if NMVSOCPMU > 0
80 struct dove_pmu_softc {
81 	struct mvsocpmu_softc sc_mvsocpmu_sc;
82 
83 	bus_space_tag_t sc_iot;
84 	bus_space_handle_t sc_pmch;	/* Power Management Core handler */
85 	bus_space_handle_t sc_pmh;	/* Power Management handler */
86 
87 	int sc_xpratio;
88 	int sc_dpratio;
89 };
90 static int dove_pmu_match(device_t, struct cfdata *, void *);
91 static void dove_pmu_attach(device_t, device_t, void *);
92 static int dove_pmu_intr(void *);
93 static int dove_tm_val2uc(int);
94 static int dove_tm_uc2val(int);
95 static int dove_dfs_slow(struct dove_pmu_softc *, bool);
96 
97 CFATTACH_DECL_NEW(mvsocpmu, sizeof(struct dove_pmu_softc),
98     dove_pmu_match, dove_pmu_attach, NULL, NULL);
99 #endif
100 
101 
102 static const char * const sources[64] = {
103     "Bridge(0)",       "Host2CPUDoorbell(1)","CPU2HostDoorbell(2)","NF(3)",
104     "PDMA(4)",         "SPI1(5)",         "SPI0(6)",         "UART0(7)",
105     "UART1(8)",        "UART2(9)",        "UART3(10)",       "TWSI(11)",
106     "GPIO7_0(12)",     "GPIO15_8(13)",    "GPIO23_16(14)",   "PEX0_Err(15)",
107     "PEX0_INT(16)",    "PEX1_Err(17)",    "PEX1_INT(18)",    "Audio0_INT(19)",
108     "Audio0_Err(20)",  "Audio1_INT(21)",  "Audio1_Err(22)",  "USBBr(23)",
109     "USB0Cnt(24)",     "USB1Cnt(25)",     "GbERx(26)",       "GbETx(27)",
110     "GbEMisc(28)",     "GbESum(29)",      "GbEErr(30)",      "SecurityInt(31)",
111 
112     "AC97(32)",        "PMU(33)",         "CAM(34)",         "SD0(35)",
113     "SD1(36)",        "SD0_wakeup_Int(37)","SD1_wakeup_Int(38)","XOR0_DMA0(39)",
114     "XOR0_DMA1(40)",   "XOR0Err(41)",     "XOR1_DMA0(42)",   "XOR1_DMA1(43)",
115     "XOR1Err(44)",     "IRE_DCON(45)",    "LCD1(46)",        "LCD0(47)",
116     "GPU(48)",         "Reserved(49)",    "Reserved_18(50)", "Vmeta(51)",
117     "Reserved_20(52)", "Reserved_21(53)", "SSPTimer(54)",    "SSPInt(55)",
118     "MemoryErr(56)", "DwnstrmExclTrn(57)","UpstrmAddrErr(58)","SecurityErr(59)",
119     "GPIO_31_24(60)",  "HighGPIO(61)",    "SATAInt(62)",     "Reserved_31(63)"
120 };
121 
122 static struct pic_ops dove_picops = {
123 	.pic_unblock_irqs = dove_pic_unblock_irqs,
124 	.pic_block_irqs = dove_pic_block_irqs,
125 	.pic_establish_irq = dove_pic_establish_irq,
126 	.pic_source_name = dove_pic_source_name,
127 };
128 static struct pic_softc dove_pic = {
129 	.pic_ops = &dove_picops,
130 	.pic_maxsources = 64,
131 	.pic_name = "dove",
132 };
133 
134 static struct {
135 	bus_size_t offset;
136 	uint32_t bits;
137 } clkgatings[]= {
138 	{ DOVE_USB0_BASE,	(1 << 0) },
139 	{ DOVE_USB1_BASE,	(1 << 1) },
140 	{ DOVE_GBE_BASE,	(1 << 2) | (1 << 30) },
141 	{ DOVE_SATAHC_BASE,	(1 << 3) },
142 	{ MVSOC_PEX_BASE,	(1 << 4) },
143 	{ DOVE_PEX1_BASE,	(1 << 5) },
144 	{ DOVE_SDHC0_BASE,	(1 << 8) },
145 	{ DOVE_SDHC1_BASE,	(1 << 9) },
146 	{ DOVE_NAND_BASE,	(1 << 10) },
147 	{ DOVE_CAMERA_BASE,	(1 << 11) },
148 	{ DOVE_AUDIO0_BASE,	(1 << 12) },
149 	{ DOVE_AUDIO1_BASE,	(1 << 13) },
150 	{ DOVE_CESA_BASE,	(1 << 15) },
151 #if 0
152 	{ PDMA, (1 << 22) },	/* PdmaEnClock */
153 #endif
154 	{ DOVE_XORE_BASE,	(1 << 23) | (1 << 24) },
155 };
156 
157 
158 /*
159  * dove_bootstrap:
160  *
161  *	Initialize the rest of the Dove dependencies, making it
162  *	ready to handle interrupts from devices.
163  *	And clks, PMU.
164  */
165 void
dove_bootstrap(bus_addr_t iobase)166 dove_bootstrap(bus_addr_t iobase)
167 {
168 
169 	/* disable all interrupts */
170 	write_dbreg(DOVE_DB_MIRQIMR, 0);
171 	write_dbreg(DOVE_DB_SMIRQIMR, 0);
172 
173 	/* disable all bridge interrupts */
174 	write_mlmbreg(MVSOC_MLMB_MLMBIMR, 0);
175 
176 	mvsoc_intr_init = dove_intr_init;
177 
178 #if NMVSOCGPP > 0
179 	/*
180 	 * 64 General Purpose Port I/O (GPIO [63:0]) and
181 	 * an additional eight General Purpose Outputs (GPO [71:64]).
182 	 */
183 	gpp_npins = 72;
184 	gpp_irqbase = 96;	/* Main(32) + Second Main(32) + Bridge(32) */
185 #endif
186 
187 	dove_getclks(iobase);
188 
189 	mvsoc_clkgating = dove_clkgating;
190 #if NMVSOCPMU == 0
191 	pmu_base = iobase + DOVE_PMU_BASE;
192 #endif
193 }
194 
195 static void
dove_intr_init(void)196 dove_intr_init(void)
197 {
198 	extern struct pic_softc mvsoc_bridge_pic;
199 	void *ih __diagused;
200 
201 	pic_add(&dove_pic, 0);
202 
203 	pic_add(&mvsoc_bridge_pic, 64);
204 	ih = intr_establish(DOVE_IRQ_BRIDGE, IPL_HIGH, IST_LEVEL_HIGH,
205 	    pic_handle_intr, &mvsoc_bridge_pic);
206 	KASSERT(ih != NULL);
207 
208 	find_pending_irqs = dove_find_pending_irqs;
209 }
210 
211 /* ARGSUSED */
212 static void
dove_pic_unblock_irqs(struct pic_softc * pic,size_t irqbase,uint32_t irq_mask)213 dove_pic_unblock_irqs(struct pic_softc *pic, size_t irqbase, uint32_t irq_mask)
214 {
215 	const size_t reg = DOVE_DB_MIRQIMR
216 	   + irqbase * (DOVE_DB_SMIRQIMR - DOVE_DB_MIRQIMR) / 32;
217 
218 	KASSERT(irqbase < 64);
219 	write_dbreg(reg, read_dbreg(reg) | irq_mask);
220 }
221 
222 /* ARGSUSED */
223 static void
dove_pic_block_irqs(struct pic_softc * pic,size_t irqbase,uint32_t irq_mask)224 dove_pic_block_irqs(struct pic_softc *pic, size_t irqbase,
225 			uint32_t irq_mask)
226 {
227 	const size_t reg = DOVE_DB_MIRQIMR
228 	   + irqbase * (DOVE_DB_SMIRQIMR - DOVE_DB_MIRQIMR) / 32;
229 
230 	KASSERT(irqbase < 64);
231 	write_dbreg(reg, read_dbreg(reg) & ~irq_mask);
232 }
233 
234 /* ARGSUSED */
235 static void
dove_pic_establish_irq(struct pic_softc * pic,struct intrsource * is)236 dove_pic_establish_irq(struct pic_softc *pic, struct intrsource *is)
237 {
238 	/* Nothing */
239 }
240 
241 static void
dove_pic_source_name(struct pic_softc * pic,int irq,char * buf,size_t len)242 dove_pic_source_name(struct pic_softc *pic, int irq, char *buf, size_t len)
243 {
244 
245 	strlcpy(buf, sources[pic->pic_irqbase + irq], len);
246 }
247 
248 /*
249  * Called with interrupts disabled
250  */
251 static int
dove_find_pending_irqs(void)252 dove_find_pending_irqs(void)
253 {
254 	int ipl = 0;
255 
256 	uint32_t cause = read_dbreg(DOVE_DB_MICR);
257 	uint32_t pending = read_dbreg(DOVE_DB_MIRQIMR);
258 	pending &= cause;
259 	if (pending)
260 		ipl |= pic_mark_pending_sources(&dove_pic, 0, pending);
261 
262 	uint32_t cause2 = read_dbreg(DOVE_DB_SMICR);
263 	uint32_t pending2 = read_dbreg(DOVE_DB_SMIRQIMR);
264 	pending2 &= cause2;
265 	if (pending2)
266 		ipl |= pic_mark_pending_sources(&dove_pic, 32, pending2);
267 
268 	return ipl;
269 }
270 
271 /*
272  * Clock functions
273  */
274 
275 static void
dove_getclks(bus_addr_t iobase)276 dove_getclks(bus_addr_t iobase)
277 {
278 	uint32_t val;
279 
280 #define MHz	* 1000 * 1000
281 
282 	val = le32toh(*(volatile uint32_t *)(iobase + DOVE_MISC_BASE +
283 	    DOVE_MISC_SAMPLE_AT_RESET0));
284 
285 	switch (val & 0x01800000) {
286 	case 0x00000000: mvTclk = 166 MHz; break;
287 	case 0x00800000: mvTclk = 125 MHz; break;
288 	default:
289 		panic("unknown mvTclk\n");
290 	}
291 
292 	switch (val & 0x000001e0) {
293 	case 0x000000a0: mvPclk = 1000 MHz; break;
294 	case 0x000000c0: mvPclk =  933 MHz; break;
295 	case 0x000000e0: mvPclk =  933 MHz; break;
296 	case 0x00000100: mvPclk =  800 MHz; break;
297 	case 0x00000120: mvPclk =  800 MHz; break;
298 	case 0x00000140: mvPclk =  800 MHz; break;
299 	case 0x00000160: mvPclk = 1067 MHz; break;
300 	case 0x00000180: mvPclk =  667 MHz; break;
301 	case 0x000001a0: mvPclk =  533 MHz; break;
302 	case 0x000001c0: mvPclk =  400 MHz; break;
303 	case 0x000001e0: mvPclk =  333 MHz; break;
304 	default:
305 		panic("unknown mvPclk\n");
306 	}
307 
308 	switch (val & 0x0000f000) {
309 	case 0x00000000: mvSysclk = mvPclk /  1; break;
310 	case 0x00002000: mvSysclk = mvPclk /  2; break;
311 	case 0x00004000: mvSysclk = mvPclk /  3; break;
312 	case 0x00006000: mvSysclk = mvPclk /  4; break;
313 	case 0x00008000: mvSysclk = mvPclk /  5; break;
314 	case 0x0000a000: mvSysclk = mvPclk /  6; break;
315 	case 0x0000c000: mvSysclk = mvPclk /  7; break;
316 	case 0x0000e000: mvSysclk = mvPclk /  8; break;
317 	case 0x0000f000: mvSysclk = mvPclk / 10; break;
318 	}
319 
320 #undef MHz
321 
322 }
323 
324 static int
dove_clkgating(struct marvell_attach_args * mva)325 dove_clkgating(struct marvell_attach_args *mva)
326 {
327 	uint32_t val;
328 	int i;
329 
330 #if NMVSOCPMU > 0
331 	struct dove_pmu_softc *pmu =
332 	    device_private(device_find_by_xname("mvsocpmu0"));
333 
334 	if (pmu == NULL)
335 		return 0;
336 #else
337 	KASSERT(pmu_base != -1);
338 #endif
339 
340 	if (strcmp(mva->mva_name, "mvsocpmu") == 0)
341 		return 0;
342 
343 	for (i = 0; i < __arraycount(clkgatings); i++) {
344 		if (clkgatings[i].offset == mva->mva_offset) {
345 			val = READ_PMUREG(pmu, DOVE_PMU_CGCR);
346 			if ((val & clkgatings[i].bits) == clkgatings[i].bits)
347 				/* Clock enabled */
348 				return 0;
349 			return 1;
350 		}
351 	}
352 	/* Clock Gating not support */
353 	return 0;
354 }
355 
356 #if NMVSOCPMU > 0
357 static int
dove_pmu_match(device_t parent,struct cfdata * match,void * aux)358 dove_pmu_match(device_t parent, struct cfdata *match, void *aux)
359 {
360 	struct marvell_attach_args *mva = aux;
361 
362 	if (mvsocpmu_match(parent, match, aux) == 0)
363 		return 0;
364 
365 	if (mva->mva_offset == MVA_OFFSET_DEFAULT ||
366 	    mva->mva_irq == MVA_IRQ_DEFAULT)
367 		return 0;
368 
369 	mva->mva_size = DOVE_PMU_SIZE;
370 	return 1;
371 }
372 
373 static void
dove_pmu_attach(device_t parent,device_t self,void * aux)374 dove_pmu_attach(device_t parent, device_t self, void *aux)
375 {
376 	struct dove_pmu_softc *sc = device_private(self);
377 	struct marvell_attach_args *mva = aux;
378 	uint32_t tdc0, cpucdc0;
379 
380 	sc->sc_iot = mva->mva_iot;
381 	if (bus_space_subregion(sc->sc_iot, mva->mva_ioh,
382 	    mva->mva_offset, mva->mva_size, &sc->sc_pmch))
383 		panic("%s: Cannot map core registers", device_xname(self));
384 	if (bus_space_subregion(sc->sc_iot, mva->mva_ioh,
385 	    mva->mva_offset + (DOVE_PMU_BASE2 - DOVE_PMU_BASE),
386 	    DOVE_PMU_SIZE, &sc->sc_pmh))
387 		panic("%s: Cannot map registers", device_xname(self));
388 	if (bus_space_subregion(sc->sc_iot, mva->mva_ioh,
389 	    mva->mva_offset + (DOVE_PMU_SRAM_BASE - DOVE_PMU_BASE),
390 	    DOVE_PMU_SRAM_SIZE, &sc->sc_pmh))
391 		panic("%s: Cannot map SRAM", device_xname(self));
392 
393 	tdc0 = READ_PMUREG(sc, DOVE_PMU_TDC0R);
394 	tdc0 &= ~(DOVE_PMU_TDC0R_THERMAVGNUM_MASK |
395 	    DOVE_PMU_TDC0R_THERMREFCALCOUNT_MASK |
396 	    DOVE_PMU_TDC0R_THERMSELVCAL_MASK);
397 	tdc0 |= (DOVE_PMU_TDC0R_THERMAVGNUM_2 |
398 	    DOVE_PMU_TDC0R_THERMREFCALCOUNT(0xf1) |
399 	    DOVE_PMU_TDC0R_THERMSELVCAL(2));
400 	WRITE_PMUREG(sc, DOVE_PMU_TDC0R, tdc0);
401 	WRITE_PMUREG(sc, DOVE_PMU_TDC0R,
402 	    READ_PMUREG(sc, DOVE_PMU_TDC0R) | DOVE_PMU_TDC0R_THERMSOFTRESET);
403 	delay(1);
404 	WRITE_PMUREG(sc, DOVE_PMU_TDC0R,
405 	    READ_PMUREG(sc, DOVE_PMU_TDC0R) & ~DOVE_PMU_TDC0R_THERMSOFTRESET);
406 	cpucdc0 = READ_PMUREG(sc, DOVE_PMU_CPUCDC0R);
407 	sc->sc_xpratio = DOVE_PMU_CPUCDC0R_XPRATIO(cpucdc0);
408 	sc->sc_dpratio = DOVE_PMU_CPUCDC0R_DPRATIO(cpucdc0);
409 
410 	sc->sc_mvsocpmu_sc.sc_iot = mva->mva_iot;
411 
412 	if (bus_space_subregion(sc->sc_iot, sc->sc_pmch,
413 	    DOVE_PMU_TM_BASE, MVSOC_PMU_TM_SIZE, &sc->sc_mvsocpmu_sc.sc_tmh))
414 		panic("%s: Cannot map thermal managaer registers",
415 		    device_xname(self));
416 	sc->sc_mvsocpmu_sc.sc_uc2val = dove_tm_uc2val;
417 	sc->sc_mvsocpmu_sc.sc_val2uc = dove_tm_val2uc;
418 
419 	mvsocpmu_attach(parent, self, aux);
420 
421 	WRITE_PMUREG(sc, DOVE_PMU_PMUICR, 0);
422 	WRITE_PMUREG(sc, DOVE_PMU_PMUIMR, DOVE_PMU_PMUI_THERMOVERHEAT);
423 
424 	marvell_intr_establish(mva->mva_irq, IPL_HIGH, dove_pmu_intr, sc);
425 }
426 
427 static int
dove_pmu_intr(void * arg)428 dove_pmu_intr(void *arg)
429 {
430 	struct dove_pmu_softc *sc = arg;
431 	uint32_t cause, mask;
432 
433 	mask = READ_PMUREG(sc, DOVE_PMU_PMUIMR);
434 	cause = READ_PMUREG(sc, DOVE_PMU_PMUICR);
435 printf("dove pmu intr: cause 0x%x, mask 0x%x\n", cause, mask);
436 	WRITE_PMUREG(sc, DOVE_PMU_PMUICR, 0);
437 	cause &= mask;
438 
439 	if (cause & DOVE_PMU_PMUI_BATTFAULT) {
440 printf("  Battery Falut\n");
441 	}
442 	if (cause & DOVE_PMU_PMUI_RTCALARM) {
443 printf("  RTC Alarm\n");
444 	}
445 	if (cause & DOVE_PMU_PMUI_THERMOVERHEAT) {
446 		mask |= DOVE_PMU_PMUI_THERMCOOLING;
447 		if (dove_dfs_slow(sc, true) == 0)
448 			mask &= ~DOVE_PMU_PMUI_THERMOVERHEAT;
449 		WRITE_PMUREG(sc, DOVE_PMU_PMUIMR, mask);
450 	}
451 	if (cause & DOVE_PMU_PMUI_THERMCOOLING) {
452 		mask |= DOVE_PMU_PMUI_THERMOVERHEAT;
453 		if (dove_dfs_slow(sc, false) == 0)
454 			mask &= ~DOVE_PMU_PMUI_THERMCOOLING;
455 		WRITE_PMUREG(sc, DOVE_PMU_PMUIMR, mask);
456 	}
457 	if (cause & DOVE_PMU_PMUI_DVSDONE) {
458 printf("  DVS Done\n");
459 	}
460 	if (cause & DOVE_PMU_PMUI_DFSDONE) {
461 printf("  DFS Done\n");
462 	}
463 
464 	return 0;
465 }
466 
467 static int
dove_tm_uc2val(int v)468 dove_tm_uc2val(int v)
469 {
470 
471 	return (2281638 - v / 1000 * 10) / 7298;
472 }
473 
474 static int
dove_tm_val2uc(int v)475 dove_tm_val2uc(int v)
476 {
477 
478 	return (2281638 - 7298 * v) / 10 * 1000;
479 }
480 
481 static int
dove_dfs_slow(struct dove_pmu_softc * sc,bool slow)482 dove_dfs_slow(struct dove_pmu_softc *sc, bool slow)
483 {
484 	uint32_t control, status, psw, pmucr;
485 	int rv;
486 	uint32_t cause0, cause1, cause2;
487 
488 	status = READ_PMUREG(sc, DOVE_PMU_CPUSDFSSR);
489 	status &= DOVE_PMU_CPUSDFSSR_CPUSLOWMODESTTS_MASK;
490 	if ((slow && status == DOVE_PMU_CPUSDFSSR_CPUSLOWMODESTTS_SLOW) ||
491 	    (!slow && status == DOVE_PMU_CPUSDFSSR_CPUSLOWMODESTTS_TURBO))
492 		return 0;
493 
494 	cause0 = READ_PMUREG(sc, DOVE_PMU_PMUICR);
495 	/*
496 	 * 1. Disable the CPU FIQ and IRQ interrupts.
497 	 */
498 	psw = disable_interrupts(I32_bit | F32_bit);
499 
500 	/*
501 	 * 2. Program the new CPU Speed mode in the CPU Subsystem DFS Control
502 	 *    Register.
503 	 */
504 	control = READ_PMUREG(sc, DOVE_PMU_CPUSDFSCR);
505 	if (slow) {
506 		control |= DOVE_PMU_CPUSDFSCR_CPUSLOWEN;
507 		control |= DOVE_PMU_CPUSDFSCR_CPUL2CR(sc->sc_dpratio);
508 	} else {
509 		control &= ~DOVE_PMU_CPUSDFSCR_CPUSLOWEN;
510 		control |= DOVE_PMU_CPUSDFSCR_CPUL2CR(sc->sc_xpratio);
511 	}
512 	WRITE_PMUREG(sc, DOVE_PMU_CPUSDFSCR, control);
513 
514 	/*
515 	 * 3. Enable the <DFSDone> field in the PMU Interrupts Mask Register
516 	 *    to wake up the CPU when the DFS procedure has been completed.
517 	 */
518 	WRITE_PMUREG(sc, DOVE_PMU_PMUIMR,
519 	    READ_PMUREG(sc, DOVE_PMU_PMUIMR) | DOVE_PMU_PMUI_DFSDONE);
520 
521 	/*
522 	 * 4. Set the <MaskFIQ> and <MaskIRQ> field in the PMU Control Register.
523 	 *    The PMU masks the main interrupt pins of the Interrupt Controller
524 	 *    (FIQ and IRQ) from, so that they cannot be asserted to the CPU
525 	 *    core.
526 	 */
527 	pmucr = bus_space_read_4(sc->sc_iot, sc->sc_pmh, DOVE_PMU_PMUCR);
528 	cause1 = READ_PMUREG(sc, DOVE_PMU_PMUICR);
529 	bus_space_write_4(sc->sc_iot, sc->sc_pmh, DOVE_PMU_PMUCR,
530 	    pmucr | DOVE_PMU_PMUCR_MASKFIQ | DOVE_PMU_PMUCR_MASKIRQ);
531 
532 	/*
533 	 * 5. Set the <DFSEn> field in the CPU Subsystem DFS Control Register.
534 	 */
535 	WRITE_PMUREG(sc, DOVE_PMU_CPUSDFSCR,
536 	    READ_PMUREG(sc, DOVE_PMU_CPUSDFSCR) | DOVE_PMU_CPUSDFSCR_DFSEN);
537 
538 	/*
539 	 * 6. Use the WFI instruction (Wait for Interrupt), to place the CPU
540 	 *    in Sleep mode.
541 	 */
542 	cause2 = READ_PMUREG(sc, DOVE_PMU_PMUICR);
543 	__asm("wfi");
544 
545 	status = READ_PMUREG(sc, DOVE_PMU_CPUSDFSSR);
546 	status &= DOVE_PMU_CPUSDFSSR_CPUSLOWMODESTTS_MASK;
547 	if ((slow && status == DOVE_PMU_CPUSDFSSR_CPUSLOWMODESTTS_SLOW) ||
548 	    (!slow && status == DOVE_PMU_CPUSDFSSR_CPUSLOWMODESTTS_TURBO)) {
549 		rv = 0;
550 		printf("DFS changed to %s\n", slow ? "slow" : "turbo");
551 	} else {
552 		rv = 1;
553 		printf("DFS failed to %s\n", slow ? "slow" : "turbo");
554 	}
555 
556 	bus_space_write_4(sc->sc_iot, sc->sc_pmh, DOVE_PMU_PMUCR, pmucr);
557 	restore_interrupts(psw);
558 printf("causes: 0x%x -> 0x%x -> 0x%x\n", cause0, cause1, cause2);
559 
560 	return rv;
561 }
562 #endif
563