xref: /netbsd-src/sys/arch/arm/cortex/a9ptmr.c (revision 53d1339bf7f9c7367b35a9e1ebe693f9b047a47b)
1 /*	$NetBSD: a9ptmr.c,v 1.2 2019/08/14 09:20:00 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 2019 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nick Hudson
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: a9ptmr.c,v 1.2 2019/08/14 09:20:00 skrll Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/bus.h>
37 #include <sys/cpu.h>
38 #include <sys/device.h>
39 #include <sys/kernel.h>
40 
41 #include <prop/proplib.h>
42 
43 #include <arm/cortex/a9tmr_reg.h>
44 #include <arm/cortex/a9ptmr_var.h>
45 
46 #include <arm/cortex/mpcore_var.h>
47 
48 static struct a9ptmr_softc *a9ptmr_sc;
49 
50 static int a9ptmr_match(device_t, cfdata_t, void *);
51 static void a9ptmr_attach(device_t, device_t, void *);
52 
53 struct a9ptmr_softc {
54 	device_t sc_dev;
55 	bus_space_tag_t sc_memt;
56 	bus_space_handle_t sc_memh;
57 
58 	uint32_t sc_ctl;
59 	uint32_t sc_freq;
60 	uint32_t sc_load;
61 
62 	uint32_t sc_prescaler;
63 };
64 
65 
66 CFATTACH_DECL_NEW(arma9ptmr, sizeof(struct a9ptmr_softc),
67     a9ptmr_match, a9ptmr_attach, NULL, NULL);
68 
69 static bool attached;
70 
71 static inline uint32_t
72 a9ptmr_read(struct a9ptmr_softc *sc, bus_size_t o)
73 {
74 	return bus_space_read_4(sc->sc_memt, sc->sc_memh, o);
75 }
76 
77 static inline void
78 a9ptmr_write(struct a9ptmr_softc *sc, bus_size_t o, uint32_t v)
79 {
80 	bus_space_write_4(sc->sc_memt, sc->sc_memh, o, v);
81 }
82 
83 /* ARGSUSED */
84 static int
85 a9ptmr_match(device_t parent, cfdata_t cf, void *aux)
86 {
87 	struct mpcore_attach_args * const mpcaa = aux;
88 
89 	if (attached)
90 		return 0;
91 
92 	if (!CPU_ID_CORTEX_A9_P(curcpu()->ci_arm_cpuid) &&
93 	    !CPU_ID_CORTEX_A5_P(curcpu()->ci_arm_cpuid))
94 		return 0;
95 
96 	if (strcmp(mpcaa->mpcaa_name, cf->cf_name) != 0)
97 		return 0;
98 
99 #if 0
100 	/*
101 	 * This isn't present on UP A9s (since CBAR isn't present).
102 	 */
103 	uint32_t mpidr = armreg_mpidr_read();
104 	if (mpidr == 0 || (mpidr & MPIDR_U))
105 		return 0;
106 #endif
107 
108 	return 1;
109 }
110 
111 
112 static void
113 a9ptmr_attach(device_t parent, device_t self, void *aux)
114 {
115 	struct a9ptmr_softc * const sc = device_private(self);
116 	struct mpcore_attach_args * const mpcaa = aux;
117 	prop_dictionary_t dict = device_properties(self);
118 	char freqbuf[sizeof("XXX SHz")];
119 	const char *cpu_type;
120 
121 
122 	sc->sc_dev = self;
123 	sc->sc_memt = mpcaa->mpcaa_memt;
124 
125 	bus_space_subregion(sc->sc_memt, mpcaa->mpcaa_memh,
126 	    mpcaa->mpcaa_off1, TMR_PRIVATE_SIZE, &sc->sc_memh);
127 
128 	/*
129 	 * This runs at the ARM PERIPHCLOCK.
130 	 * The MD code should have setup our frequency for us.
131 	 */
132 	if (!prop_dictionary_get_uint32(dict, "frequency", &sc->sc_freq)) {
133 		dict = device_properties(parent);
134 		prop_dictionary_get_uint32(dict, "frequency", &sc->sc_freq);
135 	}
136 
137 	humanize_number(freqbuf, sizeof(freqbuf), sc->sc_freq, "Hz", 1000);
138 
139 	a9ptmr_sc = sc;
140 	sc->sc_dev = self;
141 	sc->sc_memt = mpcaa->mpcaa_memt;
142 	sc->sc_memh = mpcaa->mpcaa_memh;
143 
144 	sc->sc_ctl = a9ptmr_read(sc, TMR_CTL);
145 
146 	sc->sc_prescaler = 1;
147 #if 0
148 	/*
149 	 * Let's hope the timer frequency isn't prime.
150 	 */
151 	for (size_t div = 256; div >= 2; div--) {
152 		if (sc->sc_freq % div == 0) {
153 			sc->sc_prescaler = div;
154 			break;
155 		}
156 	}
157 	sc->sc_freq /= sc->sc_prescaler;
158 #endif
159 
160 	aprint_debug(": freq %d prescaler %d", sc->sc_freq,
161 	    sc->sc_prescaler);
162 	sc->sc_ctl = TMR_CTL_INT_ENABLE | TMR_CTL_AUTO_RELOAD | TMR_CTL_ENABLE;
163 	sc->sc_ctl |= __SHIFTIN(sc->sc_prescaler - 1, TMR_CTL_PRESCALER);
164 
165 	sc->sc_load = (sc->sc_freq / hz) - 1;
166 
167 	aprint_debug(": load %d ", sc->sc_load);
168 
169 	a9ptmr_init_cpu_clock(curcpu());
170 
171 	aprint_naive("\n");
172 	if (CPU_ID_CORTEX_A5_P(curcpu()->ci_arm_cpuid)) {
173 		cpu_type = "A5";
174 	} else {
175 		cpu_type = "A9";
176 	}
177 	aprint_normal(": %s Private Timer (%s)\n", cpu_type, freqbuf);
178 
179 	attached = true;
180 }
181 
182 
183 
184 void
185 a9ptmr_delay(unsigned int n)
186 {
187 	struct a9ptmr_softc * const sc = a9ptmr_sc;
188 
189 	KASSERT(sc != NULL);
190 
191 	uint32_t freq = sc->sc_freq ? sc->sc_freq :
192 	    curcpu()->ci_data.cpu_cc_freq / 2;
193 	KASSERT(freq != 0);
194 
195 	const uint64_t counts_per_usec = freq / 1000000;
196 	uint32_t delta, usecs, last, curr;
197 
198 	KASSERT(sc != NULL);
199 
200 	last = a9ptmr_read(sc, TMR_CTR);
201 
202 	delta = usecs = 0;
203 	while (n > usecs) {
204 		curr = a9ptmr_read(sc, TMR_CTR);
205 
206 		/* Check to see if the timer has reloaded. */
207 		if (curr > last)
208 			delta += (sc->sc_load - curr) + last;
209 		else
210 			delta += last - curr;
211 
212 		last = curr;
213 
214 		if (delta >= counts_per_usec) {
215 			usecs += delta / counts_per_usec;
216 			delta %= counts_per_usec;
217 		}
218 	}
219 }
220 
221 
222 void
223 a9ptmr_cpu_initclocks(void)
224 {
225 	struct a9ptmr_softc * const sc __diagused = a9ptmr_sc;
226 
227 	KASSERT(sc->sc_dev != NULL);
228 	KASSERT(sc->sc_freq != 0);
229 
230 }
231 
232 void
233 a9ptmr_init_cpu_clock(struct cpu_info *ci)
234 {
235 	struct a9ptmr_softc * const sc = a9ptmr_sc;
236 
237 	/* Disable Private timer and acknowledge any event */
238 	a9ptmr_write(sc, TMR_CTL, 0);
239 	a9ptmr_write(sc, TMR_INT, TMR_INT_EVENT);
240 
241 	/*
242 	 * Provide the auto load value for the decrementing counter and
243 	 * start it.
244 	 */
245 	a9ptmr_write(sc, TMR_LOAD, sc->sc_load);
246 	a9ptmr_write(sc, TMR_CTL, sc->sc_ctl);
247 
248 }
249 
250 
251 
252 /*
253  * a9ptmr_intr:
254  *
255  *	Handle the hardclock interrupt.
256  */
257 int
258 a9ptmr_intr(void *arg)
259 {
260 	struct clockframe * const cf = arg;
261 	struct a9ptmr_softc * const sc = a9ptmr_sc;
262 
263 	a9ptmr_write(sc, TMR_INT, TMR_INT_EVENT);
264 	hardclock(cf);
265 
266 	return 1;
267 }
268