1 /* $NetBSD: timer.c,v 1.34 2018/01/12 09:47:44 mrg Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 * Copyright (c) 1994 Gordon W. Ross
7 * Copyright (c) 1993 Adam Glass
8 * Copyright (c) 1996 Paul Kranenburg
9 * Copyright (c) 1996
10 * The President and Fellows of Harvard College. All rights reserved.
11 *
12 * This software was developed by the Computer Systems Engineering group
13 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
14 * contributed to Berkeley.
15 *
16 * All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Harvard University.
19 * This product includes software developed by the University of
20 * California, Lawrence Berkeley Laboratory.
21 *
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
24 * are met:
25 *
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 3. All advertising materials mentioning features or use of this software
32 * must display the following acknowledgement:
33 * This product includes software developed by the University of
34 * California, Berkeley and its contributors.
35 * This product includes software developed by Paul Kranenburg.
36 * This product includes software developed by Harvard University.
37 * 4. Neither the name of the University nor the names of its contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 *
53 * @(#)clock.c 8.1 (Berkeley) 6/11/93
54 */
55
56 /*
57 * Kernel clocks provided by "timer" device. The hardclock is provided by
58 * the timer register (aka system counter). The statclock is provided by
59 * per CPU counter register(s) (aka processor counter(s)).
60 */
61
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: timer.c,v 1.34 2018/01/12 09:47:44 mrg Exp $");
64
65 #include <sys/param.h>
66 #include <sys/kernel.h>
67 #include <sys/device.h>
68 #include <sys/systm.h>
69 #include <sys/timetc.h>
70
71 #include <machine/autoconf.h>
72 #include <sys/bus.h>
73
74 #include <sparc/sparc/timerreg.h>
75 #include <sparc/sparc/timervar.h>
76
77 static struct intrhand level10;
78 static struct intrhand level14;
79
80 static u_int timer_get_timecount(struct timecounter *);
81
82 void *sched_cookie;
83
84 /*
85 * timecounter local state
86 */
87 static struct counter {
88 __cpu_simple_lock_t lock; /* protects access to offset, reg, last* */
89 volatile u_int *cntreg; /* counter register to read */
90 u_int limit; /* limit we count up to */
91 u_int offset; /* accumulated offset due to wraps */
92 u_int shift; /* scaling for valid bits */
93 u_int mask; /* valid bit mask */
94 u_int lastcnt; /* the last* values are used to notice */
95 u_int lastres; /* and fix up cases where it would appear */
96 u_int lastoffset; /* time went backwards. */
97 } cntr __aligned(CACHE_LINE_SIZE);
98
99 /*
100 * define timecounter
101 */
102
103 static struct timecounter counter_timecounter = {
104 .tc_get_timecount = timer_get_timecount,
105 .tc_poll_pps = NULL,
106 .tc_counter_mask = ~0u,
107 .tc_frequency = 0,
108 .tc_name = "timer-counter",
109 .tc_quality = 100,
110 .tc_priv = &cntr,
111 };
112
113 /*
114 * timer_get_timecount provide current counter value
115 */
116 __attribute__((__optimize__("Os")))
117 static u_int
timer_get_timecount(struct timecounter * tc)118 timer_get_timecount(struct timecounter *tc)
119 {
120 u_int cnt, res, fixup, offset;
121 int s;
122
123 /*
124 * We use splhigh/__cpu_simple_lock here as we don't want
125 * any mutex or lockdebug overhead. The lock protects a
126 * bunch of the members of cntr that are written here to
127 * deal with the various minor races to be observed and
128 * worked around.
129 */
130 s = splhigh();
131 __cpu_simple_lock(&cntr.lock);
132 res = cnt = *cntr.cntreg;
133
134 res &= ~TMR_LIMIT;
135 offset = cntr.offset;
136
137 /*
138 * There are 3 cases here:
139 * - limit reached, interrupt not yet processed.
140 * - count reset but offset the same, race between handling
141 * the interrupt and tickle_tc() updating the offset.
142 * - normal case.
143 *
144 * For the first two cases, add the limit so that we avoid
145 * time going backwards.
146 */
147 if (cnt != res) {
148 fixup = cntr.limit;
149 } else if (res < cntr.lastcnt && offset == cntr.lastoffset) {
150 fixup = cntr.limit;
151 } else {
152 fixup = 0;
153 }
154
155 cntr.lastcnt = res;
156 cntr.lastoffset = offset;
157
158 res >>= cntr.shift;
159 res &= cntr.mask;
160
161 res += fixup + offset;
162
163 /*
164 * This handles early-boot cases where the counter resets twice
165 * before the offset is updated, and we have a stupid check to
166 * ensure overflow hasn't happened.
167 */
168 if (res < cntr.lastres && res > (TMR_MASK+1) << 3)
169 res = cntr.lastres + 1;
170
171 cntr.lastres = res;
172
173 __cpu_simple_unlock(&cntr.lock);
174 splx(s);
175
176 return res;
177 }
178
179 void
tickle_tc(void)180 tickle_tc(void)
181 {
182
183 if (timecounter->tc_get_timecount == timer_get_timecount) {
184 /*
185 * This could be protected by cntr.lock/splhigh but the update
186 * happens at IPL10 already and as a 32 bit value it should
187 * never be seen as a partial update, so skip it here. This
188 * also probably slows down the actual offset update, making
189 * one of the cases above more likely to need the workaround.
190 */
191 cntr.offset += cntr.limit;
192 }
193 }
194
195 /*
196 * sun4/sun4c/sun4m common timer attach code
197 */
198 void
timerattach(volatile int * cntreg,volatile int * limreg)199 timerattach(volatile int *cntreg, volatile int *limreg)
200 {
201 u_int prec = 0, t0;
202
203 /*
204 * Calibrate delay() by tweaking the magic constant
205 * until a delay(100) actually reads (at least) 100 us on the clock.
206 * Note: sun4m clocks tick with 500ns periods.
207 */
208 for (timerblurb = 1; ; timerblurb++) {
209 int discard;
210 u_int t1;
211
212 /* Reset counter register by writing some large limit value */
213 discard = *limreg;
214 __USE(discard);
215 *limreg = tmr_ustolim(TMR_MASK-1);
216
217 t0 = *cntreg;
218 delay(100);
219 t1 = *cntreg;
220
221 prec |= (t0 ^ t1) | (*cntreg ^ *cntreg);
222
223 if (t1 & TMR_LIMIT)
224 panic("delay calibration");
225
226 t0 = (t0 >> TMR_SHIFT) & TMR_MASK;
227 t1 = (t1 >> TMR_SHIFT) & TMR_MASK;
228
229 if (t1 >= t0 + 100)
230 break;
231 }
232
233 /* find lowest active bit */
234 for (t0 = 0; t0 < TMR_SHIFT; t0++)
235 if ((1 << t0) & prec)
236 break;
237
238 __cpu_simple_lock_init(&cntr.lock);
239
240 cntr.shift = t0;
241 cntr.mask = (1 << (31-t0))-1;
242 counter_timecounter.tc_frequency = 1000000 * (TMR_SHIFT - t0 + 1);
243
244 printf(": delay constant %d, frequency = %" PRIu64 " Hz\n",
245 timerblurb, counter_timecounter.tc_frequency);
246
247 #if defined(SUN4) || defined(SUN4C)
248 if (CPU_ISSUN4 || CPU_ISSUN4C) {
249 timer_init = timer_init_4;
250 level10.ih_fun = clockintr_4;
251 level14.ih_fun = statintr_4;
252 cntr.limit = tmr_ustolim(tick);
253 }
254 #endif
255 #if defined(SUN4M)
256 if (CPU_ISSUN4M) {
257 timer_init = timer_init_4m;
258 level10.ih_fun = clockintr_4m;
259 level14.ih_fun = statintr_4m;
260 cntr.limit = tmr_ustolim4m(tick);
261 }
262 #endif
263
264 /* link interrupt handlers */
265 intr_establish(10, 0, &level10, NULL, true);
266 intr_establish(14, 0, &level14, NULL, true);
267
268 /* Establish a soft interrupt at a lower level for schedclock */
269 sched_cookie = sparc_softintr_establish(IPL_SCHED, schedintr, NULL);
270 if (sched_cookie == NULL)
271 panic("timerattach: cannot establish schedintr");
272
273 cntr.cntreg = cntreg;
274 cntr.limit >>= cntr.shift;
275
276 /* start at non-zero, so that cntr.oldoffset is less */
277 cntr.offset = cntr.limit;
278
279 tc_init(&counter_timecounter);
280 }
281
282 /*
283 * Both sun4 and sun4m can attach a timer on obio.
284 * The sun4m OPENPROM calls the timer the "counter".
285 * The sun4 timer must be probed.
286 */
287 static int
timermatch_obio(device_t parent,cfdata_t cf,void * aux)288 timermatch_obio(device_t parent, cfdata_t cf, void *aux)
289 {
290 #if defined(SUN4) || defined(SUN4M)
291 union obio_attach_args *uoba = aux;
292 #endif
293 #if defined(SUN4)
294 struct obio4_attach_args *oba;
295 #endif
296
297 #if defined(SUN4M)
298 if (uoba->uoba_isobio4 == 0)
299 return (strcmp("counter", uoba->uoba_sbus.sa_name) == 0);
300 #endif /* SUN4M */
301
302 if (CPU_ISSUN4 == 0) {
303 printf("timermatch_obio: attach args mixed up\n");
304 return (0);
305 }
306
307 #if defined(SUN4)
308 /* Only these sun4s have "timer" (others have "oclock") */
309 if (cpuinfo.cpu_type != CPUTYP_4_300 &&
310 cpuinfo.cpu_type != CPUTYP_4_400)
311 return (0);
312
313 /* Make sure there is something there */
314 oba = &uoba->uoba_oba4;
315 return (bus_space_probe(oba->oba_bustag, oba->oba_paddr,
316 4, /* probe size */
317 0, /* offset */
318 0, /* flags */
319 NULL, NULL));
320 #endif /* SUN4 */
321 panic("timermatch_obio: impossible");
322 }
323
324 static void
timerattach_obio(device_t parent,device_t self,void * aux)325 timerattach_obio(device_t parent, device_t self, void *aux)
326 {
327 union obio_attach_args *uoba = aux;
328
329 if (uoba->uoba_isobio4 == 0) {
330 #if defined(SUN4M)
331 /* sun4m timer at obio */
332 timerattach_obio_4m(parent, self, aux);
333 #endif /* SUN4M */
334 return;
335 }
336
337 if (uoba->uoba_isobio4 != 0) {
338 #if defined(SUN4)
339 /* sun4 timer at obio */
340 timerattach_obio_4(parent, self, aux);
341 #endif /* SUN4 */
342 }
343 }
344
345 CFATTACH_DECL_NEW(timer_obio, 0,
346 timermatch_obio, timerattach_obio, NULL, NULL);
347
348 /*
349 * Only sun4c attaches a timer at mainbus
350 */
351 static int
timermatch_mainbus(device_t parent,cfdata_t cf,void * aux)352 timermatch_mainbus(device_t parent, cfdata_t cf, void *aux)
353 {
354 #if defined(SUN4C)
355 struct mainbus_attach_args *ma = aux;
356
357 return (strcmp("counter-timer", ma->ma_name) == 0);
358 #else
359 return (0);
360 #endif
361 }
362
363 static void
timerattach_mainbus(device_t parent,device_t self,void * aux)364 timerattach_mainbus(device_t parent, device_t self, void *aux)
365 {
366
367 #if defined(SUN4C)
368 timerattach_mainbus_4c(parent, self, aux);
369 #endif /* SUN4C */
370 }
371
372 CFATTACH_DECL_NEW(timer_mainbus, 0,
373 timermatch_mainbus, timerattach_mainbus, NULL, NULL);
374