1 /* $NetBSD: timer_sun4m.c,v 1.33 2023/06/02 08:51:47 andvar Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 * Copyright (c) 1994 Gordon W. Ross
7 * Copyright (c) 1993 Adam Glass
8 * Copyright (c) 1996 Paul Kranenburg
9 * Copyright (c) 1996
10 * The President and Fellows of Harvard College. All rights reserved.
11 *
12 * This software was developed by the Computer Systems Engineering group
13 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
14 * contributed to Berkeley.
15 *
16 * All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Harvard University.
19 * This product includes software developed by the University of
20 * California, Lawrence Berkeley Laboratory.
21 *
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
24 * are met:
25 *
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 3. All advertising materials mentioning features or use of this software
32 * must display the following acknowledgement:
33 * This product includes software developed by the University of
34 * California, Berkeley and its contributors.
35 * This product includes software developed by Paul Kranenburg.
36 * This product includes software developed by Harvard University.
37 * 4. Neither the name of the University nor the names of its contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 *
53 * @(#)clock.c 8.1 (Berkeley) 6/11/93
54 */
55
56 /*
57 * Sun4m timer support.
58 */
59
60 #include <sys/cdefs.h>
61 __KERNEL_RCSID(0, "$NetBSD: timer_sun4m.c,v 1.33 2023/06/02 08:51:47 andvar Exp $");
62
63 #include <sys/param.h>
64 #include <sys/kernel.h>
65 #include <sys/device.h>
66 #include <sys/systm.h>
67 #include <sys/cpu.h>
68
69 #include <machine/autoconf.h>
70 #include <sys/bus.h>
71
72 #include <sparc/sparc/vaddrs.h>
73 #include <sparc/sparc/cpuvar.h>
74 #include <sparc/sparc/timerreg.h>
75 #include <sparc/sparc/timervar.h>
76
77 static struct timer_4m *timerreg4m;
78 #define counterreg4m cpuinfo.counterreg_4m
79
80 /*
81 * SMP hardclock handler.
82 */
83 #define IPL_HARDCLOCK 10
84
85 /*
86 * Set up the real-time and statistics clocks.
87 * Leave stathz 0 only if no alternative timer is available.
88 *
89 * The frequencies of these clocks must be an even number of microseconds.
90 */
91 void
timer_init_4m(void)92 timer_init_4m(void)
93 {
94 struct cpu_info *cpi;
95 CPU_INFO_ITERATOR n;
96
97 timerreg4m->t_limit = tmr_ustolim4m(tick);
98 for (CPU_INFO_FOREACH(n, cpi)) {
99 cpi->counterreg_4m->t_limit = tmr_ustolim4m(statint);
100 }
101 icr_si_bic(SINTR_T);
102 }
103
104 #ifdef MULTIPROCESSOR
105 /*
106 * Handle SMP hardclock() calling for this CPU.
107 */
108 static void
hardclock_ipi(void * cap)109 hardclock_ipi(void *cap)
110 {
111 int s = splsched();
112
113 hardclock((struct clockframe *)cap);
114 splx(s);
115 }
116 #endif
117
118 /*
119 * Call hardclock on all CPUs.
120 */
121 static void
handle_hardclock(struct clockframe * cap)122 handle_hardclock(struct clockframe *cap)
123 {
124 int s;
125 #ifdef MULTIPROCESSOR
126 struct cpu_info *cpi;
127 CPU_INFO_ITERATOR n;
128
129 for (CPU_INFO_FOREACH(n, cpi)) {
130 if (cpi == cpuinfo.ci_self) {
131 KASSERT(CPU_IS_PRIMARY(cpi));
132 continue;
133 }
134
135 raise_ipi(cpi, IPL_HARDCLOCK);
136 }
137 #endif
138 s = splsched();
139 hardclock(cap);
140 splx(s);
141 }
142
143 /*
144 * Level 10 (clock) interrupts from system counter.
145 */
146 int
clockintr_4m(void * cap)147 clockintr_4m(void *cap)
148 {
149
150 /*
151 * XXX this needs to be fixed in a more general way
152 * problem is that the kernel enables interrupts and THEN
153 * sets up clocks. In between there's an opportunity to catch
154 * a timer interrupt - if we call hardclock() at that point we'll
155 * panic
156 * so for now just bail when cold
157 *
158 * For MP, we defer calling hardclock() to the schedintr so
159 * that we call it on all cpus.
160 */
161 if (cold)
162 return 0;
163
164 kpreempt_disable();
165
166 /* Read the limit register to clear the interrupt. */
167 *((volatile int *)&timerreg4m->t_limit);
168
169 /* Update the timecounter offset. */
170 tickle_tc();
171
172 /*
173 * We don't have a system-clock per-cpu, and we'd like to keep
174 * the per-cpu timer for the statclock, so, send an IPI to
175 * everyone to call hardclock.
176 */
177 handle_hardclock(cap);
178
179 kpreempt_enable();
180 return (1);
181 }
182
183 /*
184 * Level 14 (stat clock) interrupts from processor counter.
185 */
186 int
statintr_4m(void * cap)187 statintr_4m(void *cap)
188 {
189 struct clockframe *frame = cap;
190 u_long newint;
191
192 kpreempt_disable();
193
194 /* read the limit register to clear the interrupt */
195 *((volatile int *)&counterreg4m->t_limit);
196
197 statclock(frame);
198
199 /*
200 * Compute new randomized interval.
201 */
202 newint = new_interval();
203
204 /*
205 * Use the `non-resetting' limit register, so we don't
206 * lose the counter ticks that happened since this
207 * interrupt was raised.
208 */
209 counterreg4m->t_limit_nr = tmr_ustolim4m(newint);
210
211 /*
212 * The factor 8 is only valid for stathz==100.
213 * See also clock.c
214 */
215 if ((++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0 && schedhz != 0) {
216 if (CLKF_LOPRI(frame, IPL_SCHED)) {
217 /* No need to schedule a soft interrupt */
218 spllowerschedclock();
219 schedintr(cap);
220 } else {
221 /*
222 * We're interrupting a thread that may have the
223 * scheduler lock; run schedintr() on this CPU later.
224 */
225 raise_ipi(&cpuinfo, IPL_SCHED); /* sched_cookie->pil */
226 }
227 }
228 kpreempt_enable();
229
230 return (1);
231 }
232
233 void
timerattach_obio_4m(device_t parent,device_t self,void * aux)234 timerattach_obio_4m(device_t parent, device_t self, void *aux)
235 {
236 union obio_attach_args *uoba = aux;
237 struct sbus_attach_args *sa = &uoba->uoba_sbus;
238 struct cpu_info *cpi;
239 bus_space_handle_t bh;
240 int i;
241 CPU_INFO_ITERATOR n;
242
243 if (sa->sa_nreg < 2) {
244 printf(": only %d register sets\n", sa->sa_nreg);
245 return;
246 }
247
248 /* Map the system timer */
249 i = sa->sa_nreg - 1;
250 if (bus_space_map2(sa->sa_bustag,
251 BUS_ADDR(sa->sa_reg[i].oa_space,
252 sa->sa_reg[i].oa_base),
253 sizeof(struct timer_4m),
254 BUS_SPACE_MAP_LINEAR,
255 TIMERREG_VA, &bh) != 0) {
256 printf(": can't map registers\n");
257 return;
258 }
259 timerreg4m = (struct timer_4m *)TIMERREG_VA;
260
261 /* Map each CPU's counter */
262 for (i = 0; i < sa->sa_nreg - 1; i++) {
263 /*
264 * Check whether the CPU corresponding to this timer
265 * register is installed.
266 */
267 for (CPU_INFO_FOREACH(n, cpi)) {
268 if ((i == 0 && sparc_ncpus == 1) || cpi->mid == i + 8) {
269 /* We got a corresponding MID. */
270 break;
271 }
272 cpi = NULL;
273 }
274 if (cpi == NULL)
275 continue;
276
277 if (sbus_bus_map(sa->sa_bustag,
278 sa->sa_reg[i].oa_space,
279 sa->sa_reg[i].oa_base,
280 sizeof(struct timer_4m),
281 BUS_SPACE_MAP_LINEAR,
282 &bh) != 0) {
283 printf(": can't map CPU counter %d\n", i);
284 return;
285 }
286 cpi->counterreg_4m = (struct counter_4m *)bh;
287 }
288
289 #if defined(MULTIPROCESSOR)
290 if (sparc_ncpus > 1) {
291 /*
292 * Note that we don't actually use this cookie after checking
293 * it was established, we call directly via raise_ipi() on
294 * IPL_HARDCLOCK.
295 */
296 void *hardclock_cookie;
297
298 hardclock_cookie = sparc_softintr_establish(IPL_HARDCLOCK,
299 hardclock_ipi, NULL);
300 if (hardclock_cookie == NULL)
301 panic("timerattach: cannot establish hardclock_intr");
302 }
303 #endif
304
305 /* Put processor counter in "timer" mode */
306 timerreg4m->t_cfg = 0;
307
308 timerattach(&timerreg4m->t_counter, &timerreg4m->t_limit);
309 }
310