xref: /netbsd-src/sys/arch/sparc/sparc/cpu.c (revision 4ad7371d80aadfc057b82f145debea893be4dddf)
1 /*	$NetBSD: cpu.c,v 1.260 2021/05/29 02:58:37 mrg Exp $ */
2 
3 /*
4  * Copyright (c) 1996
5  *	The President and Fellows of Harvard College. All rights reserved.
6  * Copyright (c) 1992, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  *
9  * This software was developed by the Computer Systems Engineering group
10  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
11  * contributed to Berkeley.
12  *
13  * All advertising materials mentioning features or use of this software
14  * must display the following acknowledgement:
15  *	This product includes software developed by Harvard University.
16  *	This product includes software developed by the University of
17  *	California, Lawrence Berkeley Laboratory.
18  *
19  * Redistribution and use in source and binary forms, with or without
20  * modification, are permitted provided that the following conditions
21  * are met:
22  *
23  * 1. Redistributions of source code must retain the above copyright
24  *    notice, this list of conditions and the following disclaimer.
25  * 2. Redistributions in binary form must reproduce the above copyright
26  *    notice, this list of conditions and the following disclaimer in the
27  *    documentation and/or other materials provided with the distribution.
28  * 3. All advertising materials mentioning features or use of this software
29  *    must display the following acknowledgement:
30  *	This product includes software developed by Aaron Brown and
31  *	Harvard University.
32  *	This product includes software developed by the University of
33  *	California, Berkeley and its contributors.
34  * 4. Neither the name of the University nor the names of its contributors
35  *    may be used to endorse or promote products derived from this software
36  *    without specific prior written permission.
37  *
38  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48  * SUCH DAMAGE.
49  *
50  *	@(#)cpu.c	8.5 (Berkeley) 11/23/93
51  *
52  */
53 
54 #include <sys/cdefs.h>
55 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.260 2021/05/29 02:58:37 mrg Exp $");
56 
57 #include "opt_multiprocessor.h"
58 #include "opt_lockdebug.h"
59 #include "opt_ddb.h"
60 #include "opt_sparc_arch.h"
61 
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/device.h>
65 #include <sys/malloc.h>
66 #include <sys/kernel.h>
67 #include <sys/evcnt.h>
68 #include <sys/xcall.h>
69 #include <sys/ipi.h>
70 #include <sys/cpu.h>
71 #include <sys/reboot.h>
72 #include <sys/sysctl.h>
73 #include <sys/kmem.h>
74 
75 #include <uvm/uvm.h>
76 
77 #include <machine/promlib.h>
78 #include <machine/autoconf.h>
79 #include <machine/cpu.h>
80 #include <machine/reg.h>
81 #include <machine/ctlreg.h>
82 #include <machine/trap.h>
83 #include <machine/pcb.h>
84 #include <machine/pmap.h>
85 #include <machine/locore.h>
86 
87 #if defined(MULTIPROCESSOR) && defined(DDB)
88 #include <ddb/db_output.h>
89 #include <machine/db_machdep.h>
90 #endif
91 
92 #include <sparc/sparc/cache.h>
93 #include <sparc/sparc/asm.h>
94 #include <sparc/sparc/cpuvar.h>
95 #include <sparc/sparc/memreg.h>
96 #include <sparc/sparc/cache_print.h>
97 #if defined(SUN4D)
98 #include <sparc/sparc/cpuunitvar.h>
99 #endif
100 
101 #ifdef DEBUG
102 #ifndef DEBUG_XCALL
103 #define DEBUG_XCALL 0
104 #endif
105 int	debug_xcall = DEBUG_XCALL;
106 #else
107 #define debug_xcall 0
108 #endif
109 
110 struct cpu_softc {
111 	device_t sc_dev;
112 	struct cpu_info	*sc_cpuinfo;
113 };
114 
115 /* The following are used externally (sysctl_hw). */
116 char	machine[] = MACHINE;		/* from <machine/param.h> */
117 char	machine_arch[] = MACHINE_ARCH;	/* from <machine/param.h> */
118 int	cpu_arch;			/* sparc architecture version */
119 
120 int	sparc_ncpus;			/* # of CPUs detected by PROM */
121 struct cpu_info *cpus[_MAXNCPU+1];	/* we only support 4 CPUs. */
122 
123 /* The CPU configuration driver. */
124 static void cpu_mainbus_attach(device_t, device_t, void *);
125 int  cpu_mainbus_match(device_t, cfdata_t, void *);
126 
127 CFATTACH_DECL_NEW(cpu_mainbus, sizeof(struct cpu_softc),
128     cpu_mainbus_match, cpu_mainbus_attach, NULL, NULL);
129 
130 #if defined(SUN4D)
131 static int cpu_cpuunit_match(device_t, cfdata_t, void *);
132 static void cpu_cpuunit_attach(device_t, device_t, void *);
133 
134 CFATTACH_DECL_NEW(cpu_cpuunit, sizeof(struct cpu_softc),
135     cpu_cpuunit_match, cpu_cpuunit_attach, NULL, NULL);
136 #endif /* SUN4D */
137 
138 static void cpu_setup_sysctl(struct cpu_softc *);
139 static void cpu_init_evcnt(struct cpu_info *);
140 static void cpu_attach(struct cpu_softc *, int, int);
141 
142 static const char *fsrtoname(int, int, int);
143 static void cache_print(struct cpu_softc *);
144 void cpu_setup(void);
145 void fpu_init(struct cpu_info *);
146 
147 #define	IU_IMPL(psr)	((u_int)(psr) >> 28)
148 #define	IU_VERS(psr)	(((psr) >> 24) & 0xf)
149 
150 #define SRMMU_IMPL(mmusr)	((u_int)(mmusr) >> 28)
151 #define SRMMU_VERS(mmusr)	(((mmusr) >> 24) & 0xf)
152 
153 int bootmid;		/* Module ID of boot CPU */
154 
155 #ifdef notdef
156 /*
157  * IU implementations are parceled out to vendors (with some slight
158  * glitches).  Printing these is cute but takes too much space.
159  */
160 static char *iu_vendor[16] = {
161 	"Fujitsu",	/* and also LSI Logic */
162 	"ROSS",		/* ROSS (ex-Cypress) */
163 	"BIT",
164 	"LSIL",		/* LSI Logic finally got their own */
165 	"TI",		/* Texas Instruments */
166 	"Matsushita",
167 	"Philips",
168 	"Harvest",	/* Harvest VLSI Design Center */
169 	"SPEC",		/* Systems and Processes Engineering Corporation */
170 	"Weitek",
171 	"vendor#10",
172 	"vendor#11",
173 	"vendor#12",
174 	"vendor#13",
175 	"vendor#14",
176 	"vendor#15"
177 };
178 #endif
179 
180 #if defined(MULTIPROCESSOR)
181 u_int	cpu_ready_mask;			/* the set of CPUs marked as READY */
182 void cpu_spinup(struct cpu_info *);
183 static void cpu_attach_non_boot(struct cpu_softc *, struct cpu_info *, int);
184 
185 int go_smp_cpus = 0;	/* non-primary CPUs wait for this to go */
186 
187 /*
188  * This must be locked around all message transactions to ensure only
189  * one CPU is generating them.
190  */
191 kmutex_t xpmsg_mutex;
192 
193 #endif /* MULTIPROCESSOR */
194 
195 /*
196  * 4/110 comment: the 4/110 chops off the top 4 bits of an OBIO address.
197  *	this confuses autoconf.  for example, if you try and map
198  *	0xfe000000 in obio space on a 4/110 it actually maps 0x0e000000.
199  *	this is easy to verify with the PROM.   this causes problems
200  *	with devices like "esp0 at obio0 addr 0xfa000000" because the
201  *	4/110 treats it as esp0 at obio0 addr 0x0a000000" which is the
202  *	address of the 4/110's "sw0" scsi chip.   the same thing happens
203  *	between zs1 and zs2.    since the sun4 line is "closed" and
204  *	we know all the "obio" devices that will ever be on it we just
205  *	put in some special case "if"'s in the match routines of esp,
206  *	dma, and zs.
207  */
208 
209 int
cpu_mainbus_match(device_t parent,cfdata_t cf,void * aux)210 cpu_mainbus_match(device_t parent, cfdata_t cf, void *aux)
211 {
212 	struct mainbus_attach_args *ma = aux;
213 
214 	return (strcmp(cf->cf_name, ma->ma_name) == 0);
215 }
216 
217 static void
cpu_mainbus_attach(device_t parent,device_t self,void * aux)218 cpu_mainbus_attach(device_t parent, device_t self, void *aux)
219 {
220 	struct mainbus_attach_args *ma = aux;
221 	struct { uint32_t va; uint32_t size; } *mbprop = NULL;
222 	struct openprom_addr *rrp = NULL;
223 	struct cpu_info *cpi;
224 	struct cpu_softc *sc;
225 	int mid, node;
226 	int error, n;
227 
228 	node = ma->ma_node;
229 	mid = (node != 0) ? prom_getpropint(node, "mid", 0) : 0;
230 	sc = device_private(self);
231 	sc->sc_dev = self;
232 	cpu_attach(sc, node, mid);
233 
234 	cpi = sc->sc_cpuinfo;
235 	if (cpi == NULL)
236 		return;
237 
238 	/*
239 	 * Map CPU mailbox if available
240 	 */
241 	if (node != 0 && (error = prom_getprop(node, "mailbox-virtual",
242 					sizeof(*mbprop),
243 					&n, &mbprop)) == 0) {
244 		cpi->mailbox = mbprop->va;
245 		free(mbprop, M_DEVBUF);
246 	} else if (node != 0 && (error = prom_getprop(node, "mailbox",
247 					sizeof(struct openprom_addr),
248 					&n, &rrp)) == 0) {
249 		/* XXX - map cached/uncached? If cached, deal with
250 		 *	 cache congruency!
251 		 */
252 		if (rrp[0].oa_space == 0)
253 			printf("%s: mailbox in mem space\n", device_xname(self));
254 
255 		if (bus_space_map(ma->ma_bustag,
256 				BUS_ADDR(rrp[0].oa_space, rrp[0].oa_base),
257 				rrp[0].oa_size,
258 				BUS_SPACE_MAP_LINEAR,
259 				&cpi->mailbox) != 0)
260 			panic("%s: can't map CPU mailbox", device_xname(self));
261 		free(rrp, M_DEVBUF);
262 	}
263 
264 	/*
265 	 * Map Module Control Space if available
266 	 */
267 	if (cpi->mxcc == 0)
268 		/* We only know what it means on MXCCs */
269 		return;
270 
271 	rrp = NULL;
272 	if (node == 0 || (error = prom_getprop(node, "reg",
273 					sizeof(struct openprom_addr),
274 					&n, &rrp)) != 0)
275 		return;
276 
277 	/* register set #0 is the MBus port register */
278 	if (bus_space_map(ma->ma_bustag,
279 			BUS_ADDR(rrp[0].oa_space, rrp[0].oa_base),
280 			rrp[0].oa_size,
281 			BUS_SPACE_MAP_LINEAR,
282 			&cpi->ci_mbusport) != 0) {
283 		panic("%s: can't map CPU regs", device_xname(self));
284 	}
285 	/* register set #1: MCXX control */
286 	if (bus_space_map(ma->ma_bustag,
287 			BUS_ADDR(rrp[1].oa_space, rrp[1].oa_base),
288 			rrp[1].oa_size,
289 			BUS_SPACE_MAP_LINEAR,
290 			&cpi->ci_mxccregs) != 0) {
291 		panic("%s: can't map CPU regs", device_xname(self));
292 	}
293 	/* register sets #3 and #4 are E$ cache data and tags */
294 
295 	free(rrp, M_DEVBUF);
296 }
297 
298 #if defined(SUN4D)
299 static int
cpu_cpuunit_match(device_t parent,cfdata_t cf,void * aux)300 cpu_cpuunit_match(device_t parent, cfdata_t cf, void *aux)
301 {
302 	struct cpuunit_attach_args *cpua = aux;
303 
304 	return (strcmp(cf->cf_name, cpua->cpua_type) == 0);
305 }
306 
307 static void
cpu_cpuunit_attach(device_t parent,device_t self,void * aux)308 cpu_cpuunit_attach(device_t parent, device_t self, void *aux)
309 {
310 	struct cpuunit_attach_args *cpua = aux;
311 	struct cpu_softc *sc = device_private(self);
312 
313 	sc->sc_dev = self;
314 	cpu_attach(sc, cpua->cpua_node, cpua->cpua_device_id);
315 }
316 #endif /* SUN4D */
317 
318 static const char * const hard_intr_names[] = {
319 	"spur hard",
320 	"lev1 hard",
321 	"lev2 hard",
322 	"lev3 hard",
323 	"lev4 hard",
324 	"lev5 hard",
325 	"lev6 hard",
326 	"lev7 hard",
327 	"lev8 hard",
328 	"lev9 hard",
329 	"clock hard",
330 	"lev11 hard",
331 	"lev12 hard",
332 	"lev13 hard",
333 	"prof hard",
334 	"nmi hard",
335 };
336 
337 static const char * const soft_intr_names[] = {
338 	"spur soft",
339 	"lev1 soft",
340 	"lev2 soft",
341 	"lev3 soft",
342 	"lev4 soft",
343 	"lev5 soft",
344 	"lev6 soft",
345 	"lev7 soft",
346 	"lev8 soft",
347 	"lev9 soft",
348 	"lev10 soft",
349 	"lev11 soft",
350 	"lev12 soft",
351 	"xcall std",
352 	"xcall fast",
353 	"nmi soft",
354 };
355 
356 static void
cpu_init_evcnt(struct cpu_info * cpi)357 cpu_init_evcnt(struct cpu_info *cpi)
358 {
359 	int i;
360 
361 	/*
362 	 * Setup the per-cpu counters.
363 	 *
364 	 * The "savefp null" counter should go away when the NULL
365 	 * struct fpstate * bug is fixed.
366 	 */
367 	evcnt_attach_dynamic(&cpi->ci_savefpstate, EVCNT_TYPE_INTR,
368 			     NULL, cpu_name(cpi), "savefp ipi");
369 	evcnt_attach_dynamic(&cpi->ci_savefpstate_null, EVCNT_TYPE_MISC,
370 			     NULL, cpu_name(cpi), "savefp null ipi");
371 	evcnt_attach_dynamic(&cpi->ci_xpmsg_mutex_fail, EVCNT_TYPE_MISC,
372 			     NULL, cpu_name(cpi), "IPI mutex_trylock fail");
373 	evcnt_attach_dynamic(&cpi->ci_xpmsg_mutex_fail_call, EVCNT_TYPE_MISC,
374 			     NULL, cpu_name(cpi), "IPI mutex_trylock fail/call");
375 	evcnt_attach_dynamic(&cpi->ci_xpmsg_mutex_not_held, EVCNT_TYPE_MISC,
376 			     NULL, cpu_name(cpi), "IPI with mutex not held");
377 	evcnt_attach_dynamic(&cpi->ci_xpmsg_bogus, EVCNT_TYPE_MISC,
378 			     NULL, cpu_name(cpi), "bogus IPI");
379 
380 	/*
381 	 * These are the per-cpu per-IPL hard & soft interrupt counters.
382 	 */
383 	for (i = 0; i < 16; i++) {
384 		evcnt_attach_dynamic(&cpi->ci_intrcnt[i], EVCNT_TYPE_INTR,
385 				     NULL, cpu_name(cpi), hard_intr_names[i]);
386 		evcnt_attach_dynamic(&cpi->ci_sintrcnt[i], EVCNT_TYPE_INTR,
387 				     NULL, cpu_name(cpi), soft_intr_names[i]);
388 	}
389 }
390 
391 /* setup the hw.cpuN.* nodes for this cpu */
392 static void
cpu_setup_sysctl(struct cpu_softc * sc)393 cpu_setup_sysctl(struct cpu_softc *sc)
394 {
395 	struct cpu_info	*ci = sc->sc_cpuinfo;
396 	const struct sysctlnode *cpunode = NULL;
397 
398 	sysctl_createv(NULL, 0, NULL, &cpunode,
399 		       CTLFLAG_PERMANENT,
400 		       CTLTYPE_NODE, device_xname(sc->sc_dev), NULL,
401 		       NULL, 0, NULL, 0,
402 		       CTL_HW,
403 		       CTL_CREATE, CTL_EOL);
404 
405 	if (cpunode == NULL)
406 		return;
407 
408 #define SETUPS(name, member)					\
409 	sysctl_createv(NULL, 0, &cpunode, NULL,			\
410 		       CTLFLAG_PERMANENT,			\
411 		       CTLTYPE_STRING, name, NULL,		\
412 		       NULL, 0, member, 0,			\
413 		       CTL_CREATE, CTL_EOL);
414 
415 	SETUPS("name", __UNCONST(ci->cpu_longname))
416 	SETUPS("fpuname", __UNCONST(ci->fpu_name))
417 #undef SETUPS
418 
419 #define SETUPI(name, member)					\
420 	sysctl_createv(NULL, 0, &cpunode, NULL,			\
421 		       CTLFLAG_PERMANENT,			\
422 		       CTLTYPE_INT, name, NULL,			\
423 		       NULL, 0, member, 0,			\
424 		       CTL_CREATE, CTL_EOL);
425 
426 	SETUPI("mid", &ci->mid)
427 	SETUPI("clock_frequency", &ci->hz)
428 	SETUPI("psr_implementation", &ci->cpu_impl)
429 	SETUPI("psr_version", &ci->cpu_vers)
430 	SETUPI("mmu_implementation", &ci->mmu_impl)
431 	SETUPI("mmu_version", &ci->mmu_vers)
432 	SETUPI("mmu_nctx", &ci->mmu_ncontext)
433 #undef SETUPI
434 
435         sysctl_createv(NULL, 0, &cpunode, NULL,
436                        CTLFLAG_PERMANENT,
437                        CTLTYPE_STRUCT, "cacheinfo", NULL,
438                        NULL, 0, &ci->cacheinfo, sizeof(ci->cacheinfo),
439 		       CTL_CREATE, CTL_EOL);
440 
441 }
442 
443 /*
444  * Attach the CPU.
445  * Discover interesting goop about the virtual address cache
446  * (slightly funny place to do it, but this is where it is to be found).
447  */
448 static void
cpu_attach(struct cpu_softc * sc,int node,int mid)449 cpu_attach(struct cpu_softc *sc, int node, int mid)
450 {
451 	char buf[100];
452 	struct cpu_info *cpi;
453 	int idx;
454 	static int cpu_attach_count = 0;
455 
456 	/*
457 	 * The first CPU we're attaching must be the boot CPU.
458 	 * (see autoconf.c and cpuunit.c)
459 	 */
460 	idx = cpu_attach_count++;
461 
462 #if !defined(MULTIPROCESSOR)
463 	if (cpu_attach_count > 1) {
464 		printf(": no SMP support in kernel\n");
465 		return;
466 	}
467 #endif
468 
469 	/*
470 	 * Initialise this cpu's cpu_info.
471 	 */
472 	cpi = sc->sc_cpuinfo = cpus[idx];
473 	getcpuinfo(cpi, node);
474 
475 	cpi->ci_cpuid = idx;
476 	cpi->mid = mid;
477 	cpi->node = node;
478 #ifdef DEBUG
479 	cpi->redzone = (void *)((long)cpi->eintstack + REDSIZE);
480 #endif
481 
482 	if (sparc_ncpus > 1) {
483 		printf(": mid %d", mid);
484 		if (mid == 0 && !CPU_ISSUN4D)
485 			printf(" [WARNING: mid should not be 0]");
486 	}
487 
488 #if defined(MULTIPROCESSOR)
489 	if (cpu_attach_count > 1) {
490 		if ((boothowto & RB_MD1) != 0) {
491 			aprint_naive("\n");
492 			aprint_normal(": multiprocessor boot disabled\n");
493 			return;
494 		}
495 		cpu_attach_non_boot(sc, cpi, node);
496 		cpu_init_evcnt(cpi);
497 		cpu_setup_sysctl(sc);
498 		return;
499 	}
500 #endif /* MULTIPROCESSOR */
501 
502 	cpu_init_evcnt(cpi);
503 
504 	/* Stuff to only run on the boot CPU */
505 	cpu_setup();
506 	snprintf(buf, sizeof buf, "%s @ %s MHz, %s FPU",
507 		cpi->cpu_longname, clockfreq(cpi->hz), cpi->fpu_name);
508 	cpu_setmodel("%s (%s)", machine_model, buf);
509 	printf(": %s\n", buf);
510 	cache_print(sc);
511 	cpu_setup_sysctl(sc);
512 
513 	cpi->master = 1;
514 	cpi->eintstack = eintstack;
515 
516 	/*
517 	 * If we haven't been able to determine the Id of the
518 	 * boot CPU, set it now. In this case we can only boot
519 	 * from CPU #0 (see also the CPU attach code in autoconf.c)
520 	 */
521 	if (bootmid == 0)
522 		bootmid = mid;
523 
524 	/*
525 	 * Set speeds now we've attached all CPUs.
526 	 */
527 	if (sparc_ncpus > 1 && sparc_ncpus == cpu_attach_count) {
528 		CPU_INFO_ITERATOR n;
529 		unsigned best_hz = 0;
530 
531 		for (CPU_INFO_FOREACH(n, cpi))
532 			best_hz = MAX(cpi->hz, best_hz);
533 		for (CPU_INFO_FOREACH(n, cpi))
534 			cpu_topology_setspeed(cpi, cpi->hz < best_hz);
535 	}
536 }
537 
538 /*
539  * Finish CPU attach.
540  * Must be run by the CPU which is being attached.
541  */
542 void
cpu_setup(void)543 cpu_setup(void)
544 {
545  	if (cpuinfo.hotfix)
546 		(*cpuinfo.hotfix)(&cpuinfo);
547 
548 	/* Initialize FPU */
549 	fpu_init(&cpuinfo);
550 
551 	/* Enable the cache */
552 	cpuinfo.cache_enable();
553 
554 	cpuinfo.flags |= CPUFLG_HATCHED;
555 }
556 
557 #if defined(MULTIPROCESSOR)
558 /*
559  * Perform most of the tasks needed for a non-boot CPU.
560  */
561 static void
cpu_attach_non_boot(struct cpu_softc * sc,struct cpu_info * cpi,int node)562 cpu_attach_non_boot(struct cpu_softc *sc, struct cpu_info *cpi, int node)
563 {
564 	vaddr_t intstack, va;
565 	int error;
566 
567 	/*
568 	 * Arrange interrupt stack.  This cpu will also abuse the bottom
569 	 * half of the interrupt stack before it gets to run its idle LWP.
570 	 */
571 	intstack = uvm_km_alloc(kernel_map, INT_STACK_SIZE, 0, UVM_KMF_WIRED);
572 	if (intstack == 0)
573 		panic("%s: no uspace/intstack", __func__);
574 	cpi->eintstack = (void*)(intstack + INT_STACK_SIZE);
575 
576 	/* Allocate virtual space for pmap page_copy/page_zero */
577 	va = uvm_km_alloc(kernel_map, 2*PAGE_SIZE, 0, UVM_KMF_VAONLY);
578 	if (va == 0)
579 		panic("%s: no virtual space", __func__);
580 
581 	cpi->vpage[0] = (void *)(va + 0);
582 	cpi->vpage[1] = (void *)(va + PAGE_SIZE);
583 
584 	/*
585 	 * Call the MI attach which creates an idle LWP for us.
586 	 */
587 	error = mi_cpu_attach(cpi);
588 	if (error != 0) {
589 		aprint_normal("\n");
590 		aprint_error("%s: mi_cpu_attach failed with %d\n",
591 		    device_xname(sc->sc_dev), error);
592 		return;
593 	}
594 
595 	/*
596 	 * Note: `eintstack' is set in cpu_attach_non_boot() above.
597 	 * The %wim register will be initialized in cpu_hatch().
598 	 */
599 	cpi->ci_curlwp = cpi->ci_data.cpu_idlelwp;
600 	cpi->curpcb = lwp_getpcb(cpi->ci_curlwp);
601 	cpi->curpcb->pcb_wim = 1;
602 
603 	/* for now use the fixed virtual addresses setup in autoconf.c */
604 	cpi->intreg_4m = (struct icr_pi *)
605 		(PI_INTR_VA + (_MAXNBPG * CPU_MID2CPUNO(cpi->mid)));
606 
607 	/* Now start this CPU */
608 	cpu_spinup(cpi);
609 	printf(": %s @ %s MHz, %s FPU\n", cpi->cpu_longname,
610 		clockfreq(cpi->hz), cpi->fpu_name);
611 
612 	cache_print(sc);
613 
614 	/*
615 	 * Now we're on the last CPU to be attaching.
616 	 */
617 	if (sparc_ncpus > 1 && cpi->ci_cpuid == sparc_ncpus - 1) {
618 		CPU_INFO_ITERATOR n;
619 		/*
620 		 * Install MP cache flush functions, unless the
621 		 * single-processor versions are no-ops.
622 		 */
623 		for (CPU_INFO_FOREACH(n, cpi)) {
624 #define SET_CACHE_FUNC(x) \
625 	if (cpi->x != __CONCAT(noop_,x)) cpi->x = __CONCAT(smp_,x)
626 			SET_CACHE_FUNC(vcache_flush_page);
627 			SET_CACHE_FUNC(vcache_flush_segment);
628 			SET_CACHE_FUNC(vcache_flush_region);
629 			SET_CACHE_FUNC(vcache_flush_context);
630 		}
631 	}
632 #undef SET_CACHE_FUNC
633 }
634 
635 /*
636  * Start secondary processors in motion.
637  */
638 void
cpu_boot_secondary_processors(void)639 cpu_boot_secondary_processors(void)
640 {
641 	CPU_INFO_ITERATOR n;
642 	struct cpu_info *cpi;
643 
644 	printf("cpu0: booting secondary processors:");
645 	for (CPU_INFO_FOREACH(n, cpi)) {
646 		if (cpuinfo.mid == cpi->mid ||
647 		    (cpi->flags & CPUFLG_HATCHED) == 0)
648 			continue;
649 
650 		printf(" cpu%d", cpi->ci_cpuid);
651 		cpu_ready_mask |= (1 << n);
652 	}
653 
654 	/* Mark the boot CPU as ready */
655 	cpu_ready_mask |= (1 << 0);
656 
657 	/* Tell the other CPU's to start up.  */
658 	go_smp_cpus = 1;
659 
660 	printf("\n");
661 }
662 
663 /*
664  * Early initialisation, before main().
665  */
666 void
cpu_init_system(void)667 cpu_init_system(void)
668 {
669 
670 	mutex_init(&xpmsg_mutex, MUTEX_SPIN, IPL_SCHED);
671 }
672 
673 /*
674  * Allocate per-CPU data, then start up this CPU using PROM.
675  */
676 void
cpu_spinup(struct cpu_info * cpi)677 cpu_spinup(struct cpu_info *cpi)
678 {
679 	struct openprom_addr oa;
680 	void *pc;
681 	int n;
682 
683 	pc = (void *)cpu_hatch;
684 
685 	/* Setup CPU-specific MMU tables */
686 	pmap_alloc_cpu(cpi);
687 
688 	cpi->flags &= ~CPUFLG_HATCHED;
689 
690 	/*
691 	 * The physical address of the context table is passed to
692 	 * the PROM in a "physical address descriptor".
693 	 */
694 	oa.oa_space = 0;
695 	oa.oa_base = (uint32_t)cpi->ctx_tbl_pa;
696 	oa.oa_size = cpi->mmu_ncontext * sizeof(cpi->ctx_tbl[0]); /*???*/
697 
698 	/*
699 	 * Flush entire cache here, since the CPU may start with
700 	 * caches off, hence no cache-coherency may be assumed.
701 	 */
702 	cpuinfo.cache_flush_all();
703 	prom_cpustart(cpi->node, &oa, 0, pc);
704 
705 	/*
706 	 * Wait for this CPU to spin up.
707 	 */
708 	for (n = 10000; n != 0; n--) {
709 		cache_flush((void *) __UNVOLATILE(&cpi->flags),
710 			    sizeof(cpi->flags));
711 		if (cpi->flags & CPUFLG_HATCHED)
712 			return;
713 		delay(100);
714 	}
715 	printf("CPU did not spin up\n");
716 }
717 
718 /*
719  * Call a function on some CPUs.  `cpuset' can be set to CPUSET_ALL
720  * to call every CPU, or `1 << cpi->ci_cpuid' for each CPU to call.
721  */
722 void
xcall(xcall_func_t func,xcall_trap_t trap,int arg0,int arg1,int arg2,u_int cpuset)723 xcall(xcall_func_t func, xcall_trap_t trap, int arg0, int arg1, int arg2,
724       u_int cpuset)
725 {
726 	struct cpu_info *cpi;
727 	int n, i, done, callself, mybit;
728 	volatile struct xpmsg_func *p;
729 	u_int pil;
730 	int fasttrap;
731 	int is_noop = func == (xcall_func_t)sparc_noop;
732 	static char errbuf[160];
733 	char *bufp = errbuf;
734 	size_t bufsz = sizeof errbuf, wrsz;
735 
736 	if (is_noop) return;
737 
738 	mybit = (1 << cpuinfo.ci_cpuid);
739 	callself = func && (cpuset & mybit) != 0;
740 	cpuset &= ~mybit;
741 
742 	/* Mask any CPUs that are not ready */
743 	cpuset &= cpu_ready_mask;
744 
745 #if 0
746 	mutex_spin_enter(&xpmsg_mutex);
747 #else
748 	/*
749 	 * There's a deadlock potential between multiple CPUs trying
750 	 * to xcall() at the same time, and the thread that loses the
751 	 * race to get xpmsg_lock is at an IPL above the incoming IPI
752 	 * IPL level, so it sits around waiting to take the lock while
753 	 * the other CPU is waiting for this CPU to handle the IPI and
754 	 * mark it as completed.
755 	 *
756 	 * If we fail to get the mutex, and we're at high enough IPL,
757 	 * call xcallintr() if there is a valid msg.tag.
758 	 */
759 	pil = (getpsr() & PSR_PIL) >> 8;
760 
761 	if (cold || pil <= IPL_SCHED)
762 		mutex_spin_enter(&xpmsg_mutex);
763 	else {
764 		/*
765 		 * Warn about xcall at high IPL.
766 		 *
767 		 * XXX This is probably bogus (logging at high IPL),
768 		 * XXX so we don't do it by default.
769 		 */
770 		if (debug_xcall && (void *)func != sparc_noop) {
771 			u_int pc;
772 
773 			__asm("mov %%i7, %0" : "=r" (pc) : );
774 			printf_nolog("%d: xcall %p at lvl %u from 0x%x\n",
775 			    cpu_number(), func, pil, pc);
776 		}
777 
778 		while (mutex_tryenter(&xpmsg_mutex) == 0) {
779 			cpuinfo.ci_xpmsg_mutex_fail.ev_count++;
780 			if (cpuinfo.msg.tag) {
781 				cpuinfo.ci_xpmsg_mutex_fail_call.ev_count++;
782 				xcallintr(xcallintr);
783 			}
784 		}
785 	}
786 #endif
787 
788 	/*
789 	 * Firstly, call each CPU.  We do this so that they might have
790 	 * finished by the time we start looking.
791 	 */
792 	fasttrap = trap != NULL ? 1 : 0;
793 	for (CPU_INFO_FOREACH(n, cpi)) {
794 
795 		/* Note: n == cpi->ci_cpuid */
796 		if ((cpuset & (1 << n)) == 0)
797 			continue;
798 
799 		/*
800 		 * Write msg.tag last - if another CPU is polling above it may
801 		 * end up seeing an incomplete message. Not likely but still.
802 		 */
803 		cpi->msg.complete = 0;
804 		p = &cpi->msg.u.xpmsg_func;
805 		p->func = func;
806 		p->trap = trap;
807 		p->arg0 = arg0;
808 		p->arg1 = arg1;
809 		p->arg2 = arg2;
810 		__insn_barrier();
811 		cpi->msg.tag = XPMSG_FUNC;
812 		__insn_barrier();
813 		/* Fast cross calls use interrupt level 14 */
814 		raise_ipi(cpi,13+fasttrap);/*xcall_cookie->pil*/
815 	}
816 
817 	/*
818 	 * Second, call ourselves.
819 	 */
820 	if (callself)
821 		(*func)(arg0, arg1, arg2);
822 
823 	/*
824 	 * Lastly, start looping, waiting for all CPUs to register that they
825 	 * have completed (bailing if it takes "too long", being loud about
826 	 * this in the process).
827 	 */
828 	done = 0;
829 	i = 1000000;	/* time-out, not too long, but still an _AGE_ */
830 	while (!done) {
831 		if (--i < 0) {
832 			wrsz = snprintf(bufp, bufsz,
833 			    "xcall(cpu%d,%p) from %p: couldn't ping cpus:",
834 			    cpu_number(), fasttrap ? trap : func,
835 			    __builtin_return_address(0));
836 			if (wrsz > bufsz)
837 				break;
838 			bufsz -= wrsz;
839 			bufp += wrsz;
840 		}
841 
842 		done = 1;
843 		for (CPU_INFO_FOREACH(n, cpi)) {
844 			if ((cpuset & (1 << n)) == 0)
845 				continue;
846 
847 			if (cpi->msg.complete == 0) {
848 				if (i < 0) {
849 					wrsz = snprintf(bufp, bufsz,
850 							" cpu%d", cpi->ci_cpuid);
851 					if (wrsz > bufsz)
852 						break;
853 					bufsz -= wrsz;
854 					bufp += wrsz;
855 				} else {
856 					done = 0;
857 					break;
858 				}
859 			}
860 		}
861 	}
862 
863 	if (i >= 0 || debug_xcall == 0) {
864 		if (i < 0)
865 			aprint_error("%s\n", errbuf);
866 		mutex_spin_exit(&xpmsg_mutex);
867 		return;
868 	}
869 
870 	/*
871 	 * Let's make this a hard panic for now, and figure out why it
872 	 * happens.
873 	 *
874 	 * We call mp_pause_cpus() so we can capture their state *now*
875 	 * as opposed to after we've written all the below to the console.
876 	 */
877 #ifdef DDB
878 	mp_pause_cpus_ddb();
879 #else
880 	mp_pause_cpus();
881 #endif
882 	printf_nolog("%s\n", errbuf);
883 	mutex_spin_exit(&xpmsg_mutex);
884 
885 	panic("failed to ping cpus");
886 }
887 
888 /*
889  * MD support for MI xcall(9) interface.
890  */
891 void
xc_send_ipi(struct cpu_info * target)892 xc_send_ipi(struct cpu_info *target)
893 {
894 	u_int cpuset;
895 
896 	KASSERT(kpreempt_disabled());
897 	KASSERT(curcpu() != target);
898 
899 	if (target)
900 		cpuset = 1 << target->ci_cpuid;
901 	else
902 		cpuset = CPUSET_ALL & ~(1 << cpuinfo.ci_cpuid);
903 	XCALL0(xc_ipi_handler, cpuset);
904 }
905 
906 void
cpu_ipi(struct cpu_info * target)907 cpu_ipi(struct cpu_info *target)
908 {
909 	u_int cpuset;
910 
911 	KASSERT(kpreempt_disabled());
912 	KASSERT(curcpu() != target);
913 
914 	if (target)
915 		cpuset = 1 << target->ci_cpuid;
916 	else
917 		cpuset = CPUSET_ALL & ~(1 << cpuinfo.ci_cpuid);
918 	XCALL0(ipi_cpu_handler, cpuset);
919 }
920 
921 /*
922  * Tell all CPUs other than the current one to enter the PROM idle loop.
923  */
924 void
mp_pause_cpus(void)925 mp_pause_cpus(void)
926 {
927 	CPU_INFO_ITERATOR n;
928 	struct cpu_info *cpi;
929 
930 	for (CPU_INFO_FOREACH(n, cpi)) {
931 		if (cpuinfo.mid == cpi->mid ||
932 		    (cpi->flags & CPUFLG_HATCHED) == 0)
933 			continue;
934 
935 		/*
936 		 * This PROM utility will put the OPENPROM_MBX_ABORT
937 		 * message (0xfc) in the target CPU's mailbox and then
938 		 * send it a level 15 soft interrupt.
939 		 */
940 		if (prom_cpuidle(cpi->node) != 0)
941 			printf("cpu%d could not be paused\n", cpi->ci_cpuid);
942 	}
943 }
944 
945 /*
946  * Resume all idling CPUs.
947  */
948 void
mp_resume_cpus(void)949 mp_resume_cpus(void)
950 {
951 	CPU_INFO_ITERATOR n;
952 	struct cpu_info *cpi;
953 
954 	for (CPU_INFO_FOREACH(n, cpi)) {
955 		if (cpuinfo.mid == cpi->mid ||
956 		    (cpi->flags & CPUFLG_HATCHED) == 0)
957 			continue;
958 
959 		/*
960 		 * This PROM utility makes the target CPU return
961 		 * from its prom_cpuidle(0) call (see intr.c:nmi_soft()).
962 		 */
963 		if (prom_cpuresume(cpi->node) != 0)
964 			printf("cpu%d could not be resumed\n", cpi->ci_cpuid);
965 	}
966 }
967 
968 /*
969  * Tell all CPUs except the current one to hurry back into the prom
970  */
971 void
mp_halt_cpus(void)972 mp_halt_cpus(void)
973 {
974 	CPU_INFO_ITERATOR n;
975 	struct cpu_info *cpi;
976 
977 	for (CPU_INFO_FOREACH(n, cpi)) {
978 		int r;
979 
980 		if (cpuinfo.mid == cpi->mid)
981 			continue;
982 
983 		/*
984 		 * This PROM utility will put the OPENPROM_MBX_STOP
985 		 * message (0xfb) in the target CPU's mailbox and then
986 		 * send it a level 15 soft interrupt.
987 		 */
988 		r = prom_cpustop(cpi->node);
989 		printf("cpu%d %shalted\n", cpi->ci_cpuid,
990 			r == 0 ? "" : "(boot CPU?) can not be ");
991 	}
992 }
993 
994 #if defined(DDB)
995 void
mp_pause_cpus_ddb(void)996 mp_pause_cpus_ddb(void)
997 {
998 	CPU_INFO_ITERATOR n;
999 	struct cpu_info *cpi;
1000 
1001 	for (CPU_INFO_FOREACH(n, cpi)) {
1002 		if (cpi == NULL || cpi->mid == cpuinfo.mid ||
1003 		    (cpi->flags & CPUFLG_HATCHED) == 0)
1004 			continue;
1005 
1006 		cpi->msg_lev15.tag = XPMSG15_PAUSECPU;
1007 		raise_ipi(cpi,15);	/* high priority intr */
1008 	}
1009 }
1010 
1011 void
mp_resume_cpus_ddb(void)1012 mp_resume_cpus_ddb(void)
1013 {
1014 	CPU_INFO_ITERATOR n;
1015 	struct cpu_info *cpi;
1016 
1017 	for (CPU_INFO_FOREACH(n, cpi)) {
1018 		if (cpi == NULL || cpuinfo.mid == cpi->mid ||
1019 		    (cpi->flags & CPUFLG_PAUSED) == 0)
1020 			continue;
1021 
1022 		/* tell it to continue */
1023 		cpi->flags &= ~CPUFLG_PAUSED;
1024 	}
1025 }
1026 #endif /* DDB */
1027 #endif /* MULTIPROCESSOR */
1028 
1029 /*
1030  * fpu_init() must be run on associated CPU.
1031  */
1032 void
fpu_init(struct cpu_info * sc)1033 fpu_init(struct cpu_info *sc)
1034 {
1035 	struct fpstate fpstate;
1036 	int fpuvers;
1037 
1038 	/*
1039 	 * Get the FSR and clear any exceptions.  If we do not unload
1040 	 * the queue here and it is left over from a previous crash, we
1041 	 * will panic in the first loadfpstate(), due to a sequence
1042 	 * error, so we need to dump the whole state anyway.
1043 	 *
1044 	 * If there is no FPU, trap.c will advance over all the stores,
1045 	 * so we initialize fs_fsr here.
1046 	 */
1047 
1048 	/* 7 is reserved for "none" */
1049 	fpstate.fs_fsr = 7 << FSR_VER_SHIFT;
1050 	savefpstate(&fpstate);
1051 	sc->fpuvers = fpuvers =
1052 		(fpstate.fs_fsr >> FSR_VER_SHIFT) & (FSR_VER >> FSR_VER_SHIFT);
1053 
1054 	if (fpuvers == 7) {
1055 		sc->fpu_name = "no";
1056 		return;
1057 	}
1058 
1059 	sc->fpupresent = 1;
1060 	sc->fpu_name = fsrtoname(sc->cpu_impl, sc->cpu_vers, fpuvers);
1061 	if (sc->fpu_name == NULL) {
1062 		snprintf(sc->fpu_namebuf, sizeof(sc->fpu_namebuf),
1063 		    "version 0x%x", fpuvers);
1064 		sc->fpu_name = sc->fpu_namebuf;
1065 	}
1066 }
1067 
1068 static void
cache_print(struct cpu_softc * sc)1069 cache_print(struct cpu_softc *sc)
1070 {
1071 	struct cacheinfo *ci = &sc->sc_cpuinfo->cacheinfo;
1072 
1073 	cache_printf_backend(ci, device_xname(sc->sc_dev));
1074 }
1075 
1076 /*------------*/
1077 
1078 
1079 void cpumatch_unknown(struct cpu_info *, struct module_info *, int);
1080 void cpumatch_sun4(struct cpu_info *, struct module_info *, int);
1081 void cpumatch_sun4c(struct cpu_info *, struct module_info *, int);
1082 void cpumatch_ms1(struct cpu_info *, struct module_info *, int);
1083 void cpumatch_viking(struct cpu_info *, struct module_info *, int);
1084 void cpumatch_hypersparc(struct cpu_info *, struct module_info *, int);
1085 void cpumatch_turbosparc(struct cpu_info *, struct module_info *, int);
1086 
1087 void getcacheinfo_sun4(struct cpu_info *, int node);
1088 void getcacheinfo_sun4c(struct cpu_info *, int node);
1089 void getcacheinfo_obp(struct cpu_info *, int node);
1090 void getcacheinfo_sun4d(struct cpu_info *, int node);
1091 
1092 void sun4_hotfix(struct cpu_info *);
1093 void viking_hotfix(struct cpu_info *);
1094 void turbosparc_hotfix(struct cpu_info *);
1095 void swift_hotfix(struct cpu_info *);
1096 
1097 void ms1_mmu_enable(void);
1098 void viking_mmu_enable(void);
1099 void swift_mmu_enable(void);
1100 void hypersparc_mmu_enable(void);
1101 
1102 void srmmu_get_syncflt(void);
1103 void ms1_get_syncflt(void);
1104 void viking_get_syncflt(void);
1105 void swift_get_syncflt(void);
1106 void turbosparc_get_syncflt(void);
1107 void hypersparc_get_syncflt(void);
1108 void cypress_get_syncflt(void);
1109 
1110 int srmmu_get_asyncflt(u_int *, u_int *);
1111 int hypersparc_get_asyncflt(u_int *, u_int *);
1112 int cypress_get_asyncflt(u_int *, u_int *);
1113 int no_asyncflt_regs(u_int *, u_int *);
1114 
1115 int hypersparc_getmid(void);
1116 /* cypress and hypersparc can share this function, see ctlreg.h */
1117 #define cypress_getmid	hypersparc_getmid
1118 int viking_getmid(void);
1119 
1120 #if (defined(SUN4M) && !defined(MSIIEP)) || defined(SUN4D)
1121 int viking_module_error(void);
1122 #endif
1123 
1124 struct module_info module_unknown = {
1125 	CPUTYP_UNKNOWN,
1126 	VAC_UNKNOWN,
1127 	cpumatch_unknown
1128 };
1129 
1130 
1131 void
cpumatch_unknown(struct cpu_info * sc,struct module_info * mp,int node)1132 cpumatch_unknown(struct cpu_info *sc, struct module_info *mp, int node)
1133 {
1134 
1135 	panic("Unknown CPU type: "
1136 	      "cpu: impl %d, vers %d; mmu: impl %d, vers %d",
1137 		sc->cpu_impl, sc->cpu_vers,
1138 		sc->mmu_impl, sc->mmu_vers);
1139 }
1140 
1141 #if defined(SUN4)
1142 struct module_info module_sun4 = {
1143 	CPUTYP_UNKNOWN,
1144 	VAC_WRITETHROUGH,
1145 	cpumatch_sun4,
1146 	getcacheinfo_sun4,
1147 	sun4_hotfix,
1148 	0,
1149 	sun4_cache_enable,
1150 	0,
1151 	0,			/* ncontext set in `match' function */
1152 	0,			/* get_syncflt(); unused in sun4c */
1153 	0,			/* get_asyncflt(); unused in sun4c */
1154 	sun4_cache_flush,
1155 	sun4_vcache_flush_page, NULL,
1156 	sun4_vcache_flush_segment, NULL,
1157 	sun4_vcache_flush_region, NULL,
1158 	sun4_vcache_flush_context, NULL,
1159 	NULL, NULL,
1160 	noop_pcache_flush_page,
1161 	noop_pure_vcache_flush,
1162 	noop_cache_flush_all,
1163 	0,
1164 	pmap_zero_page4_4c,
1165 	pmap_copy_page4_4c
1166 };
1167 
1168 void
getcacheinfo_sun4(struct cpu_info * sc,int node)1169 getcacheinfo_sun4(struct cpu_info *sc, int node)
1170 {
1171 	struct cacheinfo *ci = &sc->cacheinfo;
1172 
1173 	switch (sc->cpu_type) {
1174 	case CPUTYP_4_100:
1175 		ci->c_vactype = VAC_NONE;
1176 		ci->c_totalsize = 0;
1177 		ci->c_hwflush = 0;
1178 		ci->c_linesize = 0;
1179 		ci->c_l2linesize = 0;
1180 		ci->c_split = 0;
1181 		ci->c_nlines = 0;
1182 
1183 		/* Override cache flush functions */
1184 		sc->cache_flush = noop_cache_flush;
1185 		sc->sp_vcache_flush_page = noop_vcache_flush_page;
1186 		sc->sp_vcache_flush_segment = noop_vcache_flush_segment;
1187 		sc->sp_vcache_flush_region = noop_vcache_flush_region;
1188 		sc->sp_vcache_flush_context = noop_vcache_flush_context;
1189 		break;
1190 	case CPUTYP_4_200:
1191 		ci->c_vactype = VAC_WRITEBACK;
1192 		ci->c_totalsize = 128*1024;
1193 		ci->c_hwflush = 0;
1194 		ci->c_linesize = 16;
1195 		ci->c_l2linesize = 4;
1196 		ci->c_split = 0;
1197 		ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize;
1198 		break;
1199 	case CPUTYP_4_300:
1200 		ci->c_vactype = VAC_WRITEBACK;
1201 		ci->c_totalsize = 128*1024;
1202 		ci->c_hwflush = 0;
1203 		ci->c_linesize = 16;
1204 		ci->c_l2linesize = 4;
1205 		ci->c_split = 0;
1206 		ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize;
1207 		sc->cacheinfo.c_flags |= CACHE_TRAPPAGEBUG;
1208 		break;
1209 	case CPUTYP_4_400:
1210 		ci->c_vactype = VAC_WRITEBACK;
1211 		ci->c_totalsize = 128 * 1024;
1212 		ci->c_hwflush = 0;
1213 		ci->c_linesize = 32;
1214 		ci->c_l2linesize = 5;
1215 		ci->c_split = 0;
1216 		ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize;
1217 		break;
1218 	}
1219 }
1220 
1221 void
cpumatch_sun4(struct cpu_info * sc,struct module_info * mp,int node)1222 cpumatch_sun4(struct cpu_info *sc, struct module_info *mp, int node)
1223 {
1224 	struct idprom *idp = prom_getidprom();
1225 
1226 	switch (idp->idp_machtype) {
1227 	case ID_SUN4_100:
1228 		sc->cpu_type = CPUTYP_4_100;
1229 		sc->classlvl = 100;
1230 		sc->mmu_ncontext = 8;
1231 		sc->mmu_nsegment = 256;
1232 /*XXX*/		sc->hz = 14280000;
1233 		break;
1234 	case ID_SUN4_200:
1235 		sc->cpu_type = CPUTYP_4_200;
1236 		sc->classlvl = 200;
1237 		sc->mmu_nsegment = 512;
1238 		sc->mmu_ncontext = 16;
1239 /*XXX*/		sc->hz = 16670000;
1240 		break;
1241 	case ID_SUN4_300:
1242 		sc->cpu_type = CPUTYP_4_300;
1243 		sc->classlvl = 300;
1244 		sc->mmu_nsegment = 256;
1245 		sc->mmu_ncontext = 16;
1246 /*XXX*/		sc->hz = 25000000;
1247 		break;
1248 	case ID_SUN4_400:
1249 		sc->cpu_type = CPUTYP_4_400;
1250 		sc->classlvl = 400;
1251 		sc->mmu_nsegment = 1024;
1252 		sc->mmu_ncontext = 64;
1253 		sc->mmu_nregion = 256;
1254 /*XXX*/		sc->hz = 33000000;
1255 		sc->sun4_mmu3l = 1;
1256 		break;
1257 	}
1258 
1259 }
1260 #endif /* SUN4 */
1261 
1262 #if defined(SUN4C)
1263 struct module_info module_sun4c = {
1264 	CPUTYP_UNKNOWN,
1265 	VAC_WRITETHROUGH,
1266 	cpumatch_sun4c,
1267 	getcacheinfo_sun4c,
1268 	sun4_hotfix,
1269 	0,
1270 	sun4_cache_enable,
1271 	0,
1272 	0,			/* ncontext set in `match' function */
1273 	0,			/* get_syncflt(); unused in sun4c */
1274 	0,			/* get_asyncflt(); unused in sun4c */
1275 	sun4_cache_flush,
1276 	sun4_vcache_flush_page, NULL,
1277 	sun4_vcache_flush_segment, NULL,
1278 	sun4_vcache_flush_region, NULL,
1279 	sun4_vcache_flush_context, NULL,
1280 	NULL, NULL,
1281 	noop_pcache_flush_page,
1282 	noop_pure_vcache_flush,
1283 	noop_cache_flush_all,
1284 	0,
1285 	pmap_zero_page4_4c,
1286 	pmap_copy_page4_4c
1287 };
1288 
1289 void
cpumatch_sun4c(struct cpu_info * sc,struct module_info * mp,int node)1290 cpumatch_sun4c(struct cpu_info *sc, struct module_info *mp, int node)
1291 {
1292 	int	rnode;
1293 
1294 	rnode = findroot();
1295 	sc->mmu_npmeg = sc->mmu_nsegment =
1296 		prom_getpropint(rnode, "mmu-npmg", 128);
1297 	sc->mmu_ncontext = prom_getpropint(rnode, "mmu-nctx", 8);
1298 
1299 	/* Get clock frequency */
1300 	sc->hz = prom_getpropint(rnode, "clock-frequency", 0);
1301 }
1302 
1303 void
getcacheinfo_sun4c(struct cpu_info * sc,int node)1304 getcacheinfo_sun4c(struct cpu_info *sc, int node)
1305 {
1306 	struct cacheinfo *ci = &sc->cacheinfo;
1307 	int i, l;
1308 
1309 	if (node == 0)
1310 		/* Bootstrapping */
1311 		return;
1312 
1313 	/* Sun4c's have only virtually-addressed caches */
1314 	ci->c_physical = 0;
1315 	ci->c_totalsize = prom_getpropint(node, "vac-size", 65536);
1316 	/*
1317 	 * Note: vac-hwflush is spelled with an underscore
1318 	 * on the 4/75s.
1319 	 */
1320 	ci->c_hwflush =
1321 		prom_getpropint(node, "vac_hwflush", 0) |
1322 		prom_getpropint(node, "vac-hwflush", 0);
1323 
1324 	ci->c_linesize = l = prom_getpropint(node, "vac-linesize", 16);
1325 	for (i = 0; (1 << i) < l; i++)
1326 		/* void */;
1327 	if ((1 << i) != l)
1328 		panic("bad cache line size %d", l);
1329 	ci->c_l2linesize = i;
1330 	ci->c_associativity = 1;
1331 	ci->c_nlines = ci->c_totalsize >> i;
1332 
1333 	ci->c_vactype = VAC_WRITETHROUGH;
1334 
1335 	/*
1336 	 * Machines with "buserr-type" 1 have a bug in the cache
1337 	 * chip that affects traps.  (I wish I knew more about this
1338 	 * mysterious buserr-type variable....)
1339 	 */
1340 	if (prom_getpropint(node, "buserr-type", 0) == 1)
1341 		sc->cacheinfo.c_flags |= CACHE_TRAPPAGEBUG;
1342 }
1343 #endif /* SUN4C */
1344 
1345 void
sun4_hotfix(struct cpu_info * sc)1346 sun4_hotfix(struct cpu_info *sc)
1347 {
1348 
1349 	if ((sc->cacheinfo.c_flags & CACHE_TRAPPAGEBUG) != 0)
1350 		kvm_uncache((char *)trapbase, 1);
1351 
1352 	/* Use the hardware-assisted page flush routine, if present */
1353 	if (sc->cacheinfo.c_hwflush)
1354 		sc->vcache_flush_page = sun4_vcache_flush_page_hw;
1355 }
1356 
1357 #if defined(SUN4M)
1358 void
getcacheinfo_obp(struct cpu_info * sc,int node)1359 getcacheinfo_obp(struct cpu_info *sc, int node)
1360 {
1361 	struct cacheinfo *ci = &sc->cacheinfo;
1362 	int i, l;
1363 
1364 #if defined(MULTIPROCESSOR)
1365 	/*
1366 	 * We really really want the cache info early for MP systems,
1367 	 * so figure out the boot node, if we can.
1368 	 *
1369 	 * XXX this loop stolen from mainbus_attach()
1370 	 */
1371 	if (node == 0 && CPU_ISSUN4M && bootmid != 0) {
1372 		const char *cp;
1373 		char namebuf[32];
1374 		int mid, node2;
1375 
1376 		for (node2 = firstchild(findroot());
1377 		     node2;
1378 		     node2 = nextsibling(node2)) {
1379 			cp = prom_getpropstringA(node2, "device_type",
1380 					    namebuf, sizeof namebuf);
1381 			if (strcmp(cp, "cpu") != 0)
1382 				continue;
1383 
1384 			mid = prom_getpropint(node2, "mid", -1);
1385 			if (mid == bootmid) {
1386 				node = node2;
1387 				break;
1388 			}
1389 		}
1390 	}
1391 #endif
1392 
1393 	if (node == 0)
1394 		/* Bootstrapping */
1395 		return;
1396 
1397 	/*
1398 	 * Determine the Sun4m cache organization.
1399 	 */
1400 	ci->c_physical = node_has_property(node, "cache-physical?");
1401 
1402 	if (prom_getpropint(node, "ncaches", 1) == 2)
1403 		ci->c_split = 1;
1404 	else
1405 		ci->c_split = 0;
1406 
1407 	/* hwflush is used only by sun4/4c code */
1408 	ci->c_hwflush = 0;
1409 
1410 	if (node_has_property(node, "icache-nlines") &&
1411 	    node_has_property(node, "dcache-nlines") &&
1412 	    ci->c_split) {
1413 		/* Harvard architecture: get I and D cache sizes */
1414 		ci->ic_nlines = prom_getpropint(node, "icache-nlines", 0);
1415 		ci->ic_linesize = l =
1416 			prom_getpropint(node, "icache-line-size", 0);
1417 		for (i = 0; (1 << i) < l && l; i++)
1418 			/* void */;
1419 		if ((1 << i) != l && l)
1420 			panic("bad icache line size %d", l);
1421 		ci->ic_l2linesize = i;
1422 		ci->ic_associativity =
1423 			prom_getpropint(node, "icache-associativity", 1);
1424 		ci->ic_totalsize = l * ci->ic_nlines * ci->ic_associativity;
1425 
1426 		ci->dc_nlines = prom_getpropint(node, "dcache-nlines", 0);
1427 		ci->dc_linesize = l =
1428 			prom_getpropint(node, "dcache-line-size",0);
1429 		for (i = 0; (1 << i) < l && l; i++)
1430 			/* void */;
1431 		if ((1 << i) != l && l)
1432 			panic("bad dcache line size %d", l);
1433 		ci->dc_l2linesize = i;
1434 		ci->dc_associativity =
1435 			prom_getpropint(node, "dcache-associativity", 1);
1436 		ci->dc_totalsize = l * ci->dc_nlines * ci->dc_associativity;
1437 
1438 		ci->c_l2linesize = uimin(ci->ic_l2linesize, ci->dc_l2linesize);
1439 		ci->c_linesize = uimin(ci->ic_linesize, ci->dc_linesize);
1440 		ci->c_totalsize = uimax(ci->ic_totalsize, ci->dc_totalsize);
1441 		ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize;
1442 	} else {
1443 		/* unified I/D cache */
1444 		ci->c_nlines = prom_getpropint(node, "cache-nlines", 128);
1445 		ci->c_linesize = l =
1446 			prom_getpropint(node, "cache-line-size", 0);
1447 		for (i = 0; (1 << i) < l && l; i++)
1448 			/* void */;
1449 		if ((1 << i) != l && l)
1450 			panic("bad cache line size %d", l);
1451 		ci->c_l2linesize = i;
1452 		ci->c_associativity =
1453 			prom_getpropint(node, "cache-associativity", 1);
1454 		ci->dc_associativity = ci->ic_associativity =
1455 			ci->c_associativity;
1456 		ci->c_totalsize = l * ci->c_nlines * ci->c_associativity;
1457 	}
1458 
1459 	if (node_has_property(node, "ecache-nlines")) {
1460 		/* we have a L2 "e"xternal cache */
1461 		ci->ec_nlines = prom_getpropint(node, "ecache-nlines", 32768);
1462 		ci->ec_linesize = l = prom_getpropint(node, "ecache-line-size", 0);
1463 		for (i = 0; (1 << i) < l && l; i++)
1464 			/* void */;
1465 		if ((1 << i) != l && l)
1466 			panic("bad ecache line size %d", l);
1467 		ci->ec_l2linesize = i;
1468 		ci->ec_associativity =
1469 			prom_getpropint(node, "ecache-associativity", 1);
1470 		ci->ec_totalsize = l * ci->ec_nlines * ci->ec_associativity;
1471 	}
1472 	if (ci->c_totalsize == 0)
1473 		printf("warning: couldn't identify cache\n");
1474 }
1475 
1476 /*
1477  * We use the max. number of contexts on the micro and
1478  * hyper SPARCs. The SuperSPARC would let us use up to 65536
1479  * contexts (by powers of 2), but we keep it at 4096 since
1480  * the table must be aligned to #context*4. With 4K contexts,
1481  * we waste at most 16K of memory. Note that the context
1482  * table is *always* page-aligned, so there can always be
1483  * 1024 contexts without sacrificing memory space (given
1484  * that the chip supports 1024 contexts).
1485  *
1486  * Currently known limits: MS1=64, MS2=256, HS=4096, SS=65536
1487  * 	some old SS's=4096
1488  */
1489 
1490 /* TI Microsparc I */
1491 struct module_info module_ms1 = {
1492 	CPUTYP_MS1,
1493 	VAC_NONE,
1494 	cpumatch_ms1,
1495 	getcacheinfo_obp,
1496 	0,
1497 	ms1_mmu_enable,
1498 	ms1_cache_enable,
1499 	0,
1500 	64,
1501 	ms1_get_syncflt,
1502 	no_asyncflt_regs,
1503 	ms1_cache_flush,
1504 	noop_vcache_flush_page, NULL,
1505 	noop_vcache_flush_segment, NULL,
1506 	noop_vcache_flush_region, NULL,
1507 	noop_vcache_flush_context, NULL,
1508 	noop_vcache_flush_range, NULL,
1509 	noop_pcache_flush_page,
1510 	noop_pure_vcache_flush,
1511 	ms1_cache_flush_all,
1512 	memerr4m,
1513 	pmap_zero_page4m,
1514 	pmap_copy_page4m
1515 };
1516 
1517 void
cpumatch_ms1(struct cpu_info * sc,struct module_info * mp,int node)1518 cpumatch_ms1(struct cpu_info *sc, struct module_info *mp, int node)
1519 {
1520 }
1521 
1522 void
ms1_mmu_enable(void)1523 ms1_mmu_enable(void)
1524 {
1525 }
1526 
1527 /* TI Microsparc II */
1528 struct module_info module_ms2 = {		/* UNTESTED */
1529 	CPUTYP_MS2,
1530 	VAC_WRITETHROUGH,
1531 	0,
1532 	getcacheinfo_obp,
1533 	0,
1534 	0,
1535 	swift_cache_enable,
1536 	0,
1537 	256,
1538 	srmmu_get_syncflt,
1539 	srmmu_get_asyncflt,
1540 	srmmu_cache_flush,
1541 	srmmu_vcache_flush_page, NULL,
1542 	srmmu_vcache_flush_segment, NULL,
1543 	srmmu_vcache_flush_region, NULL,
1544 	srmmu_vcache_flush_context, NULL,
1545 	srmmu_vcache_flush_range, NULL,
1546 	noop_pcache_flush_page,
1547 	noop_pure_vcache_flush,
1548 	srmmu_cache_flush_all,
1549 	memerr4m,
1550 	pmap_zero_page4m,
1551 	pmap_copy_page4m
1552 };
1553 
1554 
1555 struct module_info module_swift = {
1556 	CPUTYP_MS2,
1557 	VAC_WRITETHROUGH,
1558 	0,
1559 	getcacheinfo_obp,
1560 	swift_hotfix,
1561 	0,
1562 	swift_cache_enable,
1563 	0,
1564 	256,
1565 	swift_get_syncflt,
1566 	no_asyncflt_regs,
1567 	srmmu_cache_flush,
1568 	srmmu_vcache_flush_page, NULL,
1569 	srmmu_vcache_flush_segment, NULL,
1570 	srmmu_vcache_flush_region, NULL,
1571 	srmmu_vcache_flush_context, NULL,
1572 	srmmu_vcache_flush_range, NULL,
1573 	noop_pcache_flush_page,
1574 	noop_pure_vcache_flush,
1575 	srmmu_cache_flush_all,
1576 	memerr4m,
1577 	pmap_zero_page4m,
1578 	pmap_copy_page4m
1579 };
1580 
1581 void
swift_hotfix(struct cpu_info * sc)1582 swift_hotfix(struct cpu_info *sc)
1583 {
1584 	int pcr = lda(SRMMU_PCR, ASI_SRMMU);
1585 
1586 	/* Turn off branch prediction */
1587 	pcr &= ~SWIFT_PCR_BF;
1588 	sta(SRMMU_PCR, ASI_SRMMU, pcr);
1589 }
1590 
1591 void
swift_mmu_enable(void)1592 swift_mmu_enable(void)
1593 {
1594 }
1595 
1596 
1597 /* ROSS Hypersparc */
1598 struct module_info module_hypersparc = {
1599 	CPUTYP_UNKNOWN,
1600 	VAC_WRITEBACK,
1601 	cpumatch_hypersparc,
1602 	getcacheinfo_obp,
1603 	0,
1604 	hypersparc_mmu_enable,
1605 	hypersparc_cache_enable,
1606 	hypersparc_getmid,
1607 	4096,
1608 	hypersparc_get_syncflt,
1609 	hypersparc_get_asyncflt,
1610 	srmmu_cache_flush,
1611 	srmmu_vcache_flush_page, ft_srmmu_vcache_flush_page,
1612 	srmmu_vcache_flush_segment, ft_srmmu_vcache_flush_segment,
1613 	srmmu_vcache_flush_region, ft_srmmu_vcache_flush_region,
1614 	srmmu_vcache_flush_context, ft_srmmu_vcache_flush_context,
1615 	srmmu_vcache_flush_range, ft_srmmu_vcache_flush_range,
1616 	noop_pcache_flush_page,
1617 	hypersparc_pure_vcache_flush,
1618 	hypersparc_cache_flush_all,
1619 	hypersparc_memerr,
1620 	pmap_zero_page4m,
1621 	pmap_copy_page4m
1622 };
1623 
1624 void
cpumatch_hypersparc(struct cpu_info * sc,struct module_info * mp,int node)1625 cpumatch_hypersparc(struct cpu_info *sc, struct module_info *mp, int node)
1626 {
1627 
1628 	sc->cpu_type = CPUTYP_HS_MBUS;/*XXX*/
1629 
1630 	if (node == 0) {
1631 		/* Flush I-cache */
1632 		sta(0, ASI_HICACHECLR, 0);
1633 
1634 		/* Disable `unimplemented flush' traps during boot-up */
1635 		wrasr(rdasr(HYPERSPARC_ASRNUM_ICCR) | HYPERSPARC_ICCR_FTD,
1636 			HYPERSPARC_ASRNUM_ICCR);
1637 	}
1638 }
1639 
1640 void
hypersparc_mmu_enable(void)1641 hypersparc_mmu_enable(void)
1642 {
1643 #if 0
1644 	int pcr;
1645 
1646 	pcr = lda(SRMMU_PCR, ASI_SRMMU);
1647 	pcr |= HYPERSPARC_PCR_C;
1648 	pcr &= ~HYPERSPARC_PCR_CE;
1649 
1650 	sta(SRMMU_PCR, ASI_SRMMU, pcr);
1651 #endif
1652 }
1653 
1654 int
hypersparc_getmid(void)1655 hypersparc_getmid(void)
1656 {
1657 	u_int pcr = lda(SRMMU_PCR, ASI_SRMMU);
1658 	return ((pcr & HYPERSPARC_PCR_MID) >> 15);
1659 }
1660 
1661 
1662 /* Cypress 605 */
1663 struct module_info module_cypress = {
1664 	CPUTYP_CYPRESS,
1665 	VAC_WRITEBACK,
1666 	0,
1667 	getcacheinfo_obp,
1668 	0,
1669 	0,
1670 	cypress_cache_enable,
1671 	cypress_getmid,
1672 	4096,
1673 	cypress_get_syncflt,
1674 	cypress_get_asyncflt,
1675 	srmmu_cache_flush,
1676 	srmmu_vcache_flush_page, ft_srmmu_vcache_flush_page,
1677 	srmmu_vcache_flush_segment, ft_srmmu_vcache_flush_segment,
1678 	srmmu_vcache_flush_region, ft_srmmu_vcache_flush_region,
1679 	srmmu_vcache_flush_context, ft_srmmu_vcache_flush_context,
1680 	srmmu_vcache_flush_range, ft_srmmu_vcache_flush_range,
1681 	noop_pcache_flush_page,
1682 	noop_pure_vcache_flush,
1683 	cypress_cache_flush_all,
1684 	memerr4m,
1685 	pmap_zero_page4m,
1686 	pmap_copy_page4m
1687 };
1688 
1689 
1690 /* Fujitsu Turbosparc */
1691 struct module_info module_turbosparc = {
1692 	CPUTYP_MS2,
1693 	VAC_WRITEBACK,
1694 	cpumatch_turbosparc,
1695 	getcacheinfo_obp,
1696 	turbosparc_hotfix,
1697 	0,
1698 	turbosparc_cache_enable,
1699 	0,
1700 	256,
1701 	turbosparc_get_syncflt,
1702 	no_asyncflt_regs,
1703 	srmmu_cache_flush,
1704 	srmmu_vcache_flush_page, NULL,
1705 	srmmu_vcache_flush_segment, NULL,
1706 	srmmu_vcache_flush_region, NULL,
1707 	srmmu_vcache_flush_context, NULL,
1708 	srmmu_vcache_flush_range, NULL,
1709 	noop_pcache_flush_page,
1710 	noop_pure_vcache_flush,
1711 	srmmu_cache_flush_all,
1712 	memerr4m,
1713 	pmap_zero_page4m,
1714 	pmap_copy_page4m
1715 };
1716 
1717 void
cpumatch_turbosparc(struct cpu_info * sc,struct module_info * mp,int node)1718 cpumatch_turbosparc(struct cpu_info *sc, struct module_info *mp, int node)
1719 {
1720 	int i;
1721 
1722 	if (node == 0 || sc->master == 0)
1723 		return;
1724 
1725 	i = getpsr();
1726 	if (sc->cpu_vers == IU_VERS(i))
1727 		return;
1728 
1729 	/*
1730 	 * A cloaked Turbosparc: clear any items in cpuinfo that
1731 	 * might have been set to uS2 versions during bootstrap.
1732 	 */
1733 	sc->cpu_longname = 0;
1734 	sc->mmu_ncontext = 0;
1735 	sc->cpu_type = 0;
1736 	sc->cacheinfo.c_vactype = 0;
1737 	sc->hotfix = 0;
1738 	sc->mmu_enable = 0;
1739 	sc->cache_enable = 0;
1740 	sc->get_syncflt = 0;
1741 	sc->cache_flush = 0;
1742 	sc->sp_vcache_flush_page = 0;
1743 	sc->sp_vcache_flush_segment = 0;
1744 	sc->sp_vcache_flush_region = 0;
1745 	sc->sp_vcache_flush_context = 0;
1746 	sc->pcache_flush_page = 0;
1747 }
1748 
1749 void
turbosparc_hotfix(struct cpu_info * sc)1750 turbosparc_hotfix(struct cpu_info *sc)
1751 {
1752 	int pcf;
1753 
1754 	pcf = lda(SRMMU_PCFG, ASI_SRMMU);
1755 	if (pcf & TURBOSPARC_PCFG_US2) {
1756 		/* Turn off uS2 emulation bit */
1757 		pcf &= ~TURBOSPARC_PCFG_US2;
1758 		sta(SRMMU_PCFG, ASI_SRMMU, pcf);
1759 	}
1760 }
1761 #endif /* SUN4M */
1762 
1763 #if defined(SUN4M)
1764 struct module_info module_viking = {
1765 	CPUTYP_UNKNOWN,		/* set in cpumatch() */
1766 	VAC_NONE,
1767 	cpumatch_viking,
1768 	getcacheinfo_obp,
1769 	viking_hotfix,
1770 	viking_mmu_enable,
1771 	viking_cache_enable,
1772 	viking_getmid,
1773 	4096,
1774 	viking_get_syncflt,
1775 	no_asyncflt_regs,
1776 	/* supersparcs use cached DVMA, no need to flush */
1777 	noop_cache_flush,
1778 	noop_vcache_flush_page, NULL,
1779 	noop_vcache_flush_segment, NULL,
1780 	noop_vcache_flush_region, NULL,
1781 	noop_vcache_flush_context, NULL,
1782 	noop_vcache_flush_range, NULL,
1783 	viking_pcache_flush_page,
1784 	noop_pure_vcache_flush,
1785 	noop_cache_flush_all,
1786 	viking_memerr,
1787 	pmap_zero_page4m,
1788 	pmap_copy_page4m
1789 };
1790 #endif /* SUN4M */
1791 
1792 #if defined(SUN4M) || defined(SUN4D)
1793 void
cpumatch_viking(struct cpu_info * sc,struct module_info * mp,int node)1794 cpumatch_viking(struct cpu_info *sc, struct module_info *mp, int node)
1795 {
1796 
1797 	if (node == 0)
1798 		viking_hotfix(sc);
1799 }
1800 
1801 void
viking_hotfix(struct cpu_info * sc)1802 viking_hotfix(struct cpu_info *sc)
1803 {
1804 static	int mxcc = -1;
1805 	int pcr = lda(SRMMU_PCR, ASI_SRMMU);
1806 
1807 	/* Test if we're directly on the MBus */
1808 	if ((pcr & VIKING_PCR_MB) == 0) {
1809 		sc->mxcc = 1;
1810 		sc->cacheinfo.c_flags |= CACHE_MANDATORY;
1811 		sc->zero_page = pmap_zero_page_viking_mxcc;
1812 		sc->copy_page = pmap_copy_page_viking_mxcc;
1813 #if !defined(MSIIEP)
1814 		moduleerr_handler = viking_module_error;
1815 #endif
1816 
1817 		/*
1818 		 * Ok to cache PTEs; set the flag here, so we don't
1819 		 * uncache in pmap_bootstrap().
1820 		 */
1821 		if ((pcr & VIKING_PCR_TC) == 0)
1822 			printf("[viking: PCR_TC is off]");
1823 		else
1824 			sc->cacheinfo.c_flags |= CACHE_PAGETABLES;
1825 	} else {
1826 #ifdef MULTIPROCESSOR
1827 		if (sparc_ncpus > 1 && sc->cacheinfo.ec_totalsize == 0)
1828 			sc->cache_flush = srmmu_cache_flush;
1829 #endif
1830 	}
1831 	/* Check all modules have the same MXCC configuration */
1832 	if (mxcc != -1 && sc->mxcc != mxcc)
1833 		panic("MXCC module mismatch");
1834 
1835 	mxcc = sc->mxcc;
1836 
1837 	/* XXX! */
1838 	if (sc->mxcc)
1839 		sc->cpu_type = CPUTYP_SS1_MBUS_MXCC;
1840 	else
1841 		sc->cpu_type = CPUTYP_SS1_MBUS_NOMXCC;
1842 }
1843 
1844 void
viking_mmu_enable(void)1845 viking_mmu_enable(void)
1846 {
1847 	int pcr;
1848 
1849 	pcr = lda(SRMMU_PCR, ASI_SRMMU);
1850 
1851 	if (cpuinfo.mxcc) {
1852 		if ((pcr & VIKING_PCR_TC) == 0) {
1853 			printf("[viking: turn on PCR_TC]");
1854 		}
1855 		pcr |= VIKING_PCR_TC;
1856 		CACHEINFO.c_flags |= CACHE_PAGETABLES;
1857 	} else
1858 		pcr &= ~VIKING_PCR_TC;
1859 	sta(SRMMU_PCR, ASI_SRMMU, pcr);
1860 }
1861 
1862 int
viking_getmid(void)1863 viking_getmid(void)
1864 {
1865 
1866 	if (cpuinfo.mxcc) {
1867 		u_int v = ldda(MXCC_MBUSPORT, ASI_CONTROL) & 0xffffffff;
1868 		return ((v >> 24) & 0xf);
1869 	}
1870 	return (0);
1871 }
1872 
1873 #if !defined(MSIIEP)
1874 int
viking_module_error(void)1875 viking_module_error(void)
1876 {
1877 	uint64_t v;
1878 	int fatal = 0;
1879 	CPU_INFO_ITERATOR n;
1880 	struct cpu_info *cpi;
1881 
1882 	/* Report on MXCC error registers in each module */
1883 	for (CPU_INFO_FOREACH(n, cpi)) {
1884 		if (cpi->ci_mxccregs == 0) {
1885 			printf("\tMXCC registers not mapped\n");
1886 			continue;
1887 		}
1888 
1889 		printf("module%d:\n", cpi->ci_cpuid);
1890 		v = *((uint64_t *)(cpi->ci_mxccregs + 0xe00));
1891 		printf("\tmxcc error 0x%llx\n", v);
1892 		v = *((uint64_t *)(cpi->ci_mxccregs + 0xb00));
1893 		printf("\tmxcc status 0x%llx\n", v);
1894 		v = *((uint64_t *)(cpi->ci_mxccregs + 0xc00));
1895 		printf("\tmxcc reset 0x%llx", v);
1896 		if (v & MXCC_MRST_WD)
1897 			printf(" (WATCHDOG RESET)"), fatal = 1;
1898 		if (v & MXCC_MRST_SI)
1899 			printf(" (SOFTWARE RESET)"), fatal = 1;
1900 		printf("\n");
1901 	}
1902 	return (fatal);
1903 }
1904 #endif /* MSIIEP */
1905 #endif /* SUN4M || SUN4D */
1906 
1907 #if defined(SUN4D)
1908 void
getcacheinfo_sun4d(struct cpu_info * sc,int node)1909 getcacheinfo_sun4d(struct cpu_info *sc, int node)
1910 {
1911 	struct cacheinfo *ci = &sc->cacheinfo;
1912 	int i, l;
1913 
1914 	if (node == 0)
1915 		/* Bootstrapping */
1916 		return;
1917 
1918 	/*
1919 	 * The Sun4d always has TI TMS390Z55 Viking CPUs; we hard-code
1920 	 * much of the cache information here.
1921 	 */
1922 
1923 	ci->c_physical = 1;
1924 	ci->c_split = 1;
1925 
1926 	/* hwflush is used only by sun4/4c code */
1927 	ci->c_hwflush = 0;
1928 
1929 	ci->ic_nlines = 0x00000040;
1930 	ci->ic_linesize = 0x00000040;
1931 	ci->ic_l2linesize = 6;
1932 	ci->ic_associativity = 0x00000005;
1933 	ci->ic_totalsize = ci->ic_linesize * ci->ic_nlines *
1934 	    ci->ic_associativity;
1935 
1936 	ci->dc_nlines = 0x00000080;
1937 	ci->dc_linesize = 0x00000020;
1938 	ci->dc_l2linesize = 5;
1939 	ci->dc_associativity = 0x00000004;
1940 	ci->dc_totalsize = ci->dc_linesize * ci->dc_nlines *
1941 	    ci->dc_associativity;
1942 
1943 	ci->c_l2linesize = uimin(ci->ic_l2linesize, ci->dc_l2linesize);
1944 	ci->c_linesize = uimin(ci->ic_linesize, ci->dc_linesize);
1945 	ci->c_totalsize = uimax(ci->ic_totalsize, ci->dc_totalsize);
1946 	ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize;
1947 
1948 	if (node_has_property(node, "ecache-nlines")) {
1949 		/* we have a L2 "e"xternal cache */
1950 		ci->ec_nlines = prom_getpropint(node, "ecache-nlines", 32768);
1951 		ci->ec_linesize = l = prom_getpropint(node, "ecache-line-size", 0);
1952 		for (i = 0; (1 << i) < l && l; i++)
1953 			/* void */;
1954 		if ((1 << i) != l && l)
1955 			panic("bad ecache line size %d", l);
1956 		ci->ec_l2linesize = i;
1957 		ci->ec_associativity =
1958 			prom_getpropint(node, "ecache-associativity", 1);
1959 		ci->ec_totalsize = l * ci->ec_nlines * ci->ec_associativity;
1960 	}
1961 }
1962 
1963 struct module_info module_viking_sun4d = {
1964 	CPUTYP_UNKNOWN,		/* set in cpumatch() */
1965 	VAC_NONE,
1966 	cpumatch_viking,
1967 	getcacheinfo_sun4d,
1968 	viking_hotfix,
1969 	viking_mmu_enable,
1970 	viking_cache_enable,
1971 	viking_getmid,
1972 	4096,
1973 	viking_get_syncflt,
1974 	no_asyncflt_regs,
1975 	/* supersparcs use cached DVMA, no need to flush */
1976 	noop_cache_flush,
1977 	noop_vcache_flush_page, NULL,
1978 	noop_vcache_flush_segment, NULL,
1979 	noop_vcache_flush_region, NULL,
1980 	noop_vcache_flush_context, NULL,
1981 	noop_vcache_flush_range, NULL,
1982 	viking_pcache_flush_page,
1983 	noop_pure_vcache_flush,
1984 	noop_cache_flush_all,
1985 	viking_memerr,
1986 	pmap_zero_page4m,
1987 	pmap_copy_page4m
1988 };
1989 #endif /* SUN4D */
1990 
1991 #define	ANY	-1	/* match any version */
1992 
1993 struct cpu_conf {
1994 	int	arch;
1995 	int	cpu_impl;
1996 	int	cpu_vers;
1997 	int	mmu_impl;
1998 	int	mmu_vers;
1999 	const char	*name;
2000 	struct	module_info *minfo;
2001 } cpu_conf[] = {
2002 #if defined(SUN4)
2003 	{ CPU_SUN4, 0, 0, ANY, ANY, "MB86900/1A or L64801", &module_sun4 },
2004 	{ CPU_SUN4, 1, 0, ANY, ANY, "L64811", &module_sun4 },
2005 	{ CPU_SUN4, 1, 1, ANY, ANY, "CY7C601", &module_sun4 },
2006 #endif
2007 
2008 #if defined(SUN4C)
2009 	{ CPU_SUN4C, 0, 0, ANY, ANY, "MB86900/1A or L64801", &module_sun4c },
2010 	{ CPU_SUN4C, 1, 0, ANY, ANY, "L64811", &module_sun4c },
2011 	{ CPU_SUN4C, 1, 1, ANY, ANY, "CY7C601", &module_sun4c },
2012 	{ CPU_SUN4C, 9, 0, ANY, ANY, "W8601/8701 or MB86903", &module_sun4c },
2013 #endif
2014 
2015 #if defined(SUN4M)
2016 	{ CPU_SUN4M, 0, 4, 0, 4, "MB86904", &module_swift },
2017 	{ CPU_SUN4M, 0, 5, 0, 5, "MB86907", &module_turbosparc },
2018 	{ CPU_SUN4M, 1, 1, 1, 0, "CY7C601/604", &module_cypress },
2019 	{ CPU_SUN4M, 1, 1, 1, 0xb, "CY7C601/605 (v.b)", &module_cypress },
2020 	{ CPU_SUN4M, 1, 1, 1, 0xc, "CY7C601/605 (v.c)", &module_cypress },
2021 	{ CPU_SUN4M, 1, 1, 1, 0xf, "CY7C601/605 (v.f)", &module_cypress },
2022 	{ CPU_SUN4M, 1, 3, 1, ANY, "CY7C611", &module_cypress },
2023 	{ CPU_SUN4M, 1, 0xe, 1, 7, "RT620/625", &module_hypersparc },
2024 	{ CPU_SUN4M, 1, 0xf, 1, 7, "RT620/625", &module_hypersparc },
2025 	{ CPU_SUN4M, 4, 0, 0, 1, "SuperSPARC v3", &module_viking },
2026 	{ CPU_SUN4M, 4, 0, 0, 2, "SuperSPARC v4", &module_viking },
2027 	{ CPU_SUN4M, 4, 0, 0, 3, "SuperSPARC v5", &module_viking },
2028 	{ CPU_SUN4M, 4, 0, 0, 8, "SuperSPARC II v1", &module_viking },
2029 	{ CPU_SUN4M, 4, 0, 0, 10, "SuperSPARC II v2", &module_viking },
2030 	{ CPU_SUN4M, 4, 0, 0, 12, "SuperSPARC II v3", &module_viking },
2031 	{ CPU_SUN4M, 4, 0, 0, ANY, "TMS390Z50 v0 or TMS390Z55", &module_viking },
2032 	{ CPU_SUN4M, 4, 1, 0, ANY, "TMS390Z50 v1", &module_viking },
2033 	{ CPU_SUN4M, 4, 1, 4, ANY, "TMS390S10", &module_ms1 },
2034 	{ CPU_SUN4M, 4, 2, 0, ANY, "TI_MS2", &module_ms2 },
2035 	{ CPU_SUN4M, 4, 3, ANY, ANY, "TI_4_3", &module_viking },
2036 	{ CPU_SUN4M, 4, 4, ANY, ANY, "TI_4_4", &module_viking },
2037 #endif
2038 
2039 #if defined(SUN4D)
2040 	{ CPU_SUN4D, 4, 0, 0, ANY, "TMS390Z50 v0 or TMS390Z55",
2041 	  &module_viking_sun4d },
2042 #endif
2043 
2044 	{ ANY, ANY, ANY, ANY, ANY, "Unknown", &module_unknown }
2045 };
2046 
2047 void
getcpuinfo(struct cpu_info * sc,int node)2048 getcpuinfo(struct cpu_info *sc, int node)
2049 {
2050 	struct cpu_conf *mp;
2051 	int i;
2052 	int cpu_impl, cpu_vers;
2053 	int mmu_impl, mmu_vers;
2054 
2055 	/*
2056 	 * Set up main criteria for selection from the CPU configuration
2057 	 * table: the CPU implementation/version fields from the PSR
2058 	 * register, and -- on sun4m machines -- the MMU
2059 	 * implementation/version from the SCR register.
2060 	 */
2061 	if (sc->master) {
2062 		i = getpsr();
2063 		if (node == 0 ||
2064 		    (cpu_impl =
2065 		     prom_getpropint(node, "psr-implementation", -1)) == -1)
2066 			cpu_impl = IU_IMPL(i);
2067 
2068 		if (node == 0 ||
2069 		    (cpu_vers = prom_getpropint(node, "psr-version", -1)) == -1)
2070 			cpu_vers = IU_VERS(i);
2071 
2072 		if (CPU_HAS_SRMMU) {
2073 			i = lda(SRMMU_PCR, ASI_SRMMU);
2074 			if (node == 0 ||
2075 			    (mmu_impl =
2076 			     prom_getpropint(node, "implementation", -1)) == -1)
2077 				mmu_impl = SRMMU_IMPL(i);
2078 
2079 			if (node == 0 ||
2080 			    (mmu_vers = prom_getpropint(node, "version", -1)) == -1)
2081 				mmu_vers = SRMMU_VERS(i);
2082 		} else {
2083 			mmu_impl = ANY;
2084 			mmu_vers = ANY;
2085 		}
2086 	} else {
2087 		/*
2088 		 * Get CPU version/implementation from ROM. If not
2089 		 * available, assume same as boot CPU.
2090 		 */
2091 		cpu_impl = prom_getpropint(node, "psr-implementation",
2092 					   cpuinfo.cpu_impl);
2093 		cpu_vers = prom_getpropint(node, "psr-version",
2094 					   cpuinfo.cpu_vers);
2095 
2096 		/* Get MMU version/implementation from ROM always */
2097 		mmu_impl = prom_getpropint(node, "implementation", -1);
2098 		mmu_vers = prom_getpropint(node, "version", -1);
2099 	}
2100 
2101 	if (node != 0) {
2102 		char *cpu_name;
2103 		char namebuf[64];
2104 
2105 		cpu_name = prom_getpropstringA(node, "name", namebuf,
2106 					       sizeof namebuf);
2107 		if (cpu_name && cpu_name[0])
2108 			sc->cpu_longname = kmem_strdupsize(cpu_name, NULL,
2109 							   KM_SLEEP);
2110 	}
2111 
2112 	for (mp = cpu_conf; ; mp++) {
2113 		if (mp->arch != cputyp && mp->arch != ANY)
2114 			continue;
2115 
2116 #define MATCH(x)	(mp->x == x || mp->x == ANY)
2117 		if (!MATCH(cpu_impl) ||
2118 		    !MATCH(cpu_vers) ||
2119 		    !MATCH(mmu_impl) ||
2120 		    !MATCH(mmu_vers))
2121 			continue;
2122 #undef MATCH
2123 
2124 		/*
2125 		 * Got CPU type.
2126 		 */
2127 		sc->cpu_impl = cpu_impl;
2128 		sc->cpu_vers = cpu_vers;
2129 		sc->mmu_impl = mmu_impl;
2130 		sc->mmu_vers = mmu_vers;
2131 
2132 		if (mp->minfo->cpu_match) {
2133 			/* Additional fixups */
2134 			mp->minfo->cpu_match(sc, mp->minfo, node);
2135 		}
2136 		if (sc->cpu_longname == 0)
2137 			sc->cpu_longname = mp->name;
2138 
2139 		if (sc->mmu_ncontext == 0)
2140 			sc->mmu_ncontext = mp->minfo->ncontext;
2141 
2142 		if (sc->cpu_type == 0)
2143 			sc->cpu_type = mp->minfo->cpu_type;
2144 
2145 		if (sc->cacheinfo.c_vactype == VAC_UNKNOWN)
2146 			sc->cacheinfo.c_vactype = mp->minfo->vactype;
2147 
2148 		if (sc->master && mp->minfo->getmid != NULL)
2149 			bootmid = mp->minfo->getmid();
2150 
2151 		mp->minfo->getcacheinfo(sc, node);
2152 
2153 		if (node && sc->hz == 0 && !CPU_ISSUN4/*XXX*/) {
2154 			sc->hz = prom_getpropint(node, "clock-frequency", 0);
2155 			if (sc->hz == 0) {
2156 				/*
2157 				 * Try to find it in the OpenPROM root...
2158 				 */
2159 				sc->hz = prom_getpropint(findroot(),
2160 						    "clock-frequency", 0);
2161 			}
2162 		}
2163 
2164 		/*
2165 		 * Copy CPU/MMU/Cache specific routines into cpu_info.
2166 		 */
2167 #define MPCOPY(x)	if (sc->x == 0) sc->x = mp->minfo->x;
2168 		MPCOPY(hotfix);
2169 		MPCOPY(mmu_enable);
2170 		MPCOPY(cache_enable);
2171 		MPCOPY(get_syncflt);
2172 		MPCOPY(get_asyncflt);
2173 		MPCOPY(cache_flush);
2174 		MPCOPY(sp_vcache_flush_page);
2175 		MPCOPY(sp_vcache_flush_segment);
2176 		MPCOPY(sp_vcache_flush_region);
2177 		MPCOPY(sp_vcache_flush_context);
2178 		MPCOPY(sp_vcache_flush_range);
2179 		MPCOPY(ft_vcache_flush_page);
2180 		MPCOPY(ft_vcache_flush_segment);
2181 		MPCOPY(ft_vcache_flush_region);
2182 		MPCOPY(ft_vcache_flush_context);
2183 		MPCOPY(ft_vcache_flush_range);
2184 		MPCOPY(pcache_flush_page);
2185 		MPCOPY(pure_vcache_flush);
2186 		MPCOPY(cache_flush_all);
2187 		MPCOPY(memerr);
2188 		MPCOPY(zero_page);
2189 		MPCOPY(copy_page);
2190 #undef MPCOPY
2191 		/*
2192 		 * Use the single-processor cache flush functions until
2193 		 * all CPUs are initialized.
2194 		 */
2195 		sc->vcache_flush_page = sc->sp_vcache_flush_page;
2196 		sc->vcache_flush_segment = sc->sp_vcache_flush_segment;
2197 		sc->vcache_flush_region = sc->sp_vcache_flush_region;
2198 		sc->vcache_flush_context = sc->sp_vcache_flush_context;
2199 		(*sc->cache_flush_all)();
2200 		return;
2201 	}
2202 	panic("Out of CPUs");
2203 }
2204 
2205 /*
2206  * The following tables convert <IU impl, IU version, FPU version> triples
2207  * into names for the CPU and FPU chip.  In most cases we do not need to
2208  * inspect the FPU version to name the IU chip, but there is one exception
2209  * (for Tsunami), and this makes the tables the same.
2210  *
2211  * The table contents (and much of the structure here) are from Guy Harris.
2212  *
2213  */
2214 struct info {
2215 	int	valid;
2216 	int	iu_impl;
2217 	int	iu_vers;
2218 	int	fpu_vers;
2219 	const char	*name;
2220 };
2221 
2222 /* XXX trim this table on a per-ARCH basis */
2223 /* NB: table order matters here; specific numbers must appear before ANY. */
2224 static struct info fpu_types[] = {
2225 	/*
2226 	 * Vendor 0, IU Fujitsu0.
2227 	 */
2228 	{ 1, 0x0, ANY, 0, "MB86910 or WTL1164/5" },
2229 	{ 1, 0x0, ANY, 1, "MB86911 or WTL1164/5" },
2230 	{ 1, 0x0, ANY, 2, "L64802 or ACT8847" },
2231 	{ 1, 0x0, ANY, 3, "WTL3170/2" },
2232 	{ 1, 0x0, 4,   4, "on-chip" },		/* Swift */
2233 	{ 1, 0x0, 5,   5, "on-chip" },		/* TurboSparc */
2234 	{ 1, 0x0, ANY, 4, "L64804" },
2235 
2236 	/*
2237 	 * Vendor 1, IU ROSS0/1 or Pinnacle.
2238 	 */
2239 	{ 1, 0x1, 0xf, 0, "on-chip" },		/* Pinnacle */
2240 	{ 1, 0x1, 0xe, 0, "on-chip" },		/* Hypersparc RT 625/626 */
2241 	{ 1, 0x1, ANY, 0, "L64812 or ACT8847" },
2242 	{ 1, 0x1, ANY, 1, "L64814" },
2243 	{ 1, 0x1, ANY, 2, "TMS390C602A" },
2244 	{ 1, 0x1, ANY, 3, "RT602 or WTL3171" },
2245 
2246 	/*
2247 	 * Vendor 2, IU BIT0.
2248 	 */
2249 	{ 1, 0x2, ANY, 0, "B5010 or B5110/20 or B5210" },
2250 
2251 	/*
2252 	 * Vendor 4, Texas Instruments.
2253 	 */
2254 	{ 1, 0x4, ANY, 0, "on-chip" },		/* Viking */
2255 	{ 1, 0x4, ANY, 4, "on-chip" },		/* Tsunami */
2256 
2257 	/*
2258 	 * Vendor 5, IU Matsushita0.
2259 	 */
2260 	{ 1, 0x5, ANY, 0, "on-chip" },
2261 
2262 	/*
2263 	 * Vendor 9, Weitek.
2264 	 */
2265 	{ 1, 0x9, ANY, 3, "on-chip" },
2266 
2267 	{ 0 }
2268 };
2269 
2270 static const char *
fsrtoname(int impl,int vers,int fver)2271 fsrtoname(int impl, int vers, int fver)
2272 {
2273 	struct info *p;
2274 
2275 	for (p = fpu_types; p->valid; p++) {
2276 		if (p->iu_impl == impl &&
2277 		    (p->iu_vers == vers || p->iu_vers == ANY) &&
2278 		    (p->fpu_vers == fver))
2279 			return (p->name);
2280 	}
2281 	return (NULL);
2282 }
2283 
2284 #ifdef DDB
2285 
2286 #include <ddb/db_output.h>
2287 #include <machine/db_machdep.h>
2288 
2289 #include "ioconf.h"
2290 
2291 /*
2292  * Dump CPU information from ddb.
2293  */
2294 void
cpu_debug_dump(void)2295 cpu_debug_dump(void)
2296 {
2297 	struct cpu_info *ci;
2298 	CPU_INFO_ITERATOR cii;
2299 
2300 	db_printf("%-4s %-10s %-8s %-10s %-10s %-10s %-10s\n",
2301 	    "CPU#", "CPUINFO", "FLAGS", "CURLWP", "CURPROC", "FPLWP", "CPCB");
2302 	for (CPU_INFO_FOREACH(cii, ci)) {
2303 		db_printf("%-4d %-10p %-8x %-10p %-10p %-10p %-10p\n",
2304 		    ci->ci_cpuid,
2305 		    ci,
2306 		    ci->flags,
2307 		    ci->ci_curlwp,
2308 		    ci->ci_curlwp == NULL ? NULL : ci->ci_curlwp->l_proc,
2309 		    ci->fplwp,
2310 		    ci->curpcb);
2311 	}
2312 }
2313 
2314 #if defined(MULTIPROCESSOR)
2315 /*
2316  * Dump CPU xcall from ddb.
2317  */
2318 void
cpu_xcall_dump(void)2319 cpu_xcall_dump(void)
2320 {
2321 	struct cpu_info *ci;
2322 	CPU_INFO_ITERATOR cii;
2323 
2324 	db_printf("%-4s %-10s %-10s %-10s %-10s %-10s "
2325 		    "%-4s %-4s %-4s\n",
2326 	          "CPU#", "FUNC", "TRAP", "ARG0", "ARG1", "ARG2",
2327 	            "TAG", "RECV", "COMPL");
2328 	for (CPU_INFO_FOREACH(cii, ci)) {
2329 		db_printf("%-4d %-10p %-10p 0x%-8x 0x%-8x 0x%-8x "
2330 			    "%-4d %-4d %-4d\n",
2331 		    ci->ci_cpuid,
2332 		    ci->msg.u.xpmsg_func.func,
2333 		    ci->msg.u.xpmsg_func.trap,
2334 		    ci->msg.u.xpmsg_func.arg0,
2335 		    ci->msg.u.xpmsg_func.arg1,
2336 		    ci->msg.u.xpmsg_func.arg2,
2337 		    ci->msg.tag,
2338 		    ci->msg.received,
2339 		    ci->msg.complete);
2340 	}
2341 }
2342 #endif
2343 
2344 #endif
2345