xref: /netbsd-src/sys/arch/sparc64/sparc64/cpu.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: cpu.c,v 1.135 2018/06/06 01:49:08 maya Exp $ */
2 
3 /*
4  * Copyright (c) 1996
5  *	The President and Fellows of Harvard College. All rights reserved.
6  * Copyright (c) 1992, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  *
9  * This software was developed by the Computer Systems Engineering group
10  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
11  * contributed to Berkeley.
12  *
13  * All advertising materials mentioning features or use of this software
14  * must display the following acknowledgement:
15  *	This product includes software developed by Harvard University.
16  *	This product includes software developed by the University of
17  *	California, Lawrence Berkeley Laboratory.
18  *
19  * Redistribution and use in source and binary forms, with or without
20  * modification, are permitted provided that the following conditions
21  * are met:
22  *
23  * 1. Redistributions of source code must retain the above copyright
24  *    notice, this list of conditions and the following disclaimer.
25  * 2. Redistributions in binary form must reproduce the above copyright
26  *    notice, this list of conditions and the following disclaimer in the
27  *    documentation and/or other materials provided with the distribution.
28  * 3. All advertising materials mentioning features or use of this software
29  *    must display the following acknowledgement:
30  *	This product includes software developed by Aaron Brown and
31  *	Harvard University.
32  *	This product includes software developed by the University of
33  *	California, Berkeley and its contributors.
34  * 4. Neither the name of the University nor the names of its contributors
35  *    may be used to endorse or promote products derived from this software
36  *    without specific prior written permission.
37  *
38  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48  * SUCH DAMAGE.
49  *
50  *	@(#)cpu.c	8.5 (Berkeley) 11/23/93
51  *
52  */
53 
54 #include <sys/cdefs.h>
55 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.135 2018/06/06 01:49:08 maya Exp $");
56 
57 #include "opt_multiprocessor.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/device.h>
62 #include <sys/kernel.h>
63 #include <sys/reboot.h>
64 #include <sys/cpu.h>
65 #include <sys/sysctl.h>
66 #include <sys/kmem.h>
67 
68 #include <uvm/uvm.h>
69 
70 #include <machine/autoconf.h>
71 #include <machine/cpu.h>
72 #include <machine/reg.h>
73 #include <machine/trap.h>
74 #include <machine/pmap.h>
75 #include <machine/sparc64.h>
76 #include <machine/openfirm.h>
77 #include <machine/hypervisor.h>
78 #include <machine/mdesc.h>
79 
80 #include <sparc64/sparc64/cache.h>
81 
82 #define SUN4V_MONDO_QUEUE_SIZE	32
83 #define SUN4V_QUEUE_ENTRY_SIZE	64
84 
85 int ecache_min_line_size;
86 
87 /* Linked list of all CPUs in system. */
88 #if defined(MULTIPROCESSOR)
89 int sparc_ncpus = 0;
90 #endif
91 struct cpu_info *cpus = NULL;
92 
93 volatile sparc64_cpuset_t cpus_active;/* set of active cpus */
94 struct cpu_bootargs *cpu_args;	/* allocated very early in pmap_bootstrap. */
95 struct pool_cache *fpstate_cache;
96 
97 static struct cpu_info *alloc_cpuinfo(u_int);
98 
99 /* The following are used externally (sysctl_hw). */
100 char	machine[] = MACHINE;		/* from <machine/param.h> */
101 char	machine_arch[] = MACHINE_ARCH;	/* from <machine/param.h> */
102 
103 /* These are used in locore.s, and are maximums */
104 int	dcache_line_size;
105 int	dcache_size;
106 int	icache_line_size;
107 int	icache_size;
108 
109 #ifdef MULTIPROCESSOR
110 static const char *ipi_evcnt_names[IPI_EVCNT_NUM] = IPI_EVCNT_NAMES;
111 #endif
112 
113 static void cpu_reset_fpustate(void);
114 
115 volatile int sync_tick = 0;
116 
117 /* The CPU configuration driver. */
118 void cpu_attach(device_t, device_t, void *);
119 int cpu_match(device_t, cfdata_t, void *);
120 
121 CFATTACH_DECL_NEW(cpu, 0, cpu_match, cpu_attach, NULL, NULL);
122 
123 static int
124 cpuid_from_node(u_int cpu_node)
125 {
126 	/*
127 	 * Determine the cpuid by examining the nodes properties
128 	 * in the following order:
129 	 *  upa-portid
130 	 *  portid
131 	 *  cpuid
132 	 *  reg (sun4v only)
133 	 */
134 
135 	int id;
136 
137 	id = prom_getpropint(cpu_node, "upa-portid", -1);
138 	if (id == -1)
139 		id = prom_getpropint(cpu_node, "portid", -1);
140 	if (id == -1)
141 		id = prom_getpropint(cpu_node, "cpuid", -1);
142 	if (CPU_ISSUN4V) {
143 		int reg[4];
144 		int* regp=reg;
145 		int len = 4;
146 		int rc = prom_getprop(cpu_node, "reg", sizeof(int),
147 		    &len, &regp);
148 		if ( rc != 0)
149 			panic("No reg property found\n");
150 		/* cpuid in the lower 24 bits - sun4v hypervisor arch */
151 		id = reg[0] & 0x0fffffff;
152 	}
153 	if (id == -1)
154 		panic("failed to determine cpuid");
155 
156 	return id;
157 }
158 
159 static int
160 cpu_cache_info_sun4v(const char *type, int level, const char *prop)
161 {
162 	int idx = 0;
163 	uint64_t val = 0;
164 	idx = mdesc_find_node_by_idx(idx, "cache");
165 	while (idx != -1 && val == 0) {
166 		const char *name = mdesc_name_by_idx(idx);
167 		if (strcmp("cache", name) == 0) {
168 			const char *p;
169 			size_t len = 0;
170 			p = mdesc_get_prop_data(idx, "type", &len);
171 			if (p == NULL)
172 				panic("No type found\n");
173 			if (len == 0)
174 				panic("Len is zero");
175 			if (type == NULL || strcmp(p, type) == 0) {
176 				uint64_t l;
177 				l = mdesc_get_prop_val(idx, "level");
178 				if (l == level)
179 					val = mdesc_get_prop_val(idx, prop);
180 			}
181 		}
182 		if (val == 0)
183 			idx = mdesc_next_node(idx);
184 	}
185 	return val;
186 }
187 
188 static int
189 cpu_icache_size(int node)
190 {
191 	if (CPU_ISSUN4V)
192 		return cpu_cache_info_sun4v("instn", 1, "size");
193 	else
194 		return prom_getpropint(node, "icache-size", 0);
195 }
196 
197 static int
198 cpu_icache_line_size(int node)
199 {
200 	if (CPU_ISSUN4V)
201 		return cpu_cache_info_sun4v("instn", 1, "line-size");
202 	else
203 		return prom_getpropint(node, "icache-line-size", 0);
204 }
205 
206 static int
207 cpu_icache_nlines(int node)
208 {
209 	if (CPU_ISSUN4V)
210 		return 0;
211 	else
212 		return prom_getpropint(node, "icache-nlines", 64);
213 }
214 
215 static int
216 cpu_icache_associativity(int node)
217 {
218 	if (CPU_ISSUN4V) {
219 		int val;
220 		val = cpu_cache_info_sun4v("instn", 1, "associativity");
221 		if (val == 0)
222 			val = 1;
223 		return val;
224 	} else
225 		return prom_getpropint(node, "icache-associativity", 1);
226 }
227 
228 static int
229 cpu_dcache_size(int node)
230 {
231 	if (CPU_ISSUN4V)
232 		return cpu_cache_info_sun4v("data", 1, "size");
233 	else
234 		return prom_getpropint(node, "dcache-size", 0);
235 }
236 
237 static int
238 cpu_dcache_line_size(int node)
239 {
240 	if (CPU_ISSUN4V)
241 		return cpu_cache_info_sun4v("data", 1, "line-size");
242 	else
243 		return prom_getpropint(node, "dcache-line-size", 0);
244 }
245 
246 static int
247 cpu_dcache_nlines(int node)
248 {
249 	if (CPU_ISSUN4V)
250 		return 0;
251 	else
252 		return prom_getpropint(node, "dcache-nlines", 128);
253 }
254 
255 static int
256 cpu_dcache_associativity(int node)
257 {
258 	if (CPU_ISSUN4V) {
259 		int val;
260 		val = cpu_cache_info_sun4v("data", 1, "associativity");
261 		if (val == 0)
262 			val = 1;
263 		return val;
264 	} else
265 		return prom_getpropint(node, "dcache-associativity", 1);
266 }
267 
268 int
269 cpu_ecache_size(int node)
270 {
271 	if (CPU_ISSUN4V)
272 		return cpu_cache_info_sun4v(NULL, 2, "size");
273 	else
274 		return prom_getpropint(node, "ecache-size", 0);
275 }
276 
277 static int
278 cpu_ecache_line_size(int node)
279 {
280 	if (CPU_ISSUN4V)
281 		return cpu_cache_info_sun4v(NULL, 2, "line-size");
282 	else
283 		return prom_getpropint(node, "ecache-line-size", 0);
284 }
285 
286 static int
287 cpu_ecache_nlines(int node)
288 {
289 	if (CPU_ISSUN4V)
290 		return 0;
291 	else
292 		return prom_getpropint(node, "ecache-nlines", 32768);
293 }
294 
295 int
296 cpu_ecache_associativity(int node)
297 {
298 	if (CPU_ISSUN4V) {
299 		int val;
300 		val = cpu_cache_info_sun4v(NULL, 2, "associativity");
301 		if (val == 0)
302 			val = 1;
303 		return val;
304 	} else
305 		return prom_getpropint(node, "ecache-associativity", 1);
306 }
307 
308 struct cpu_info *
309 alloc_cpuinfo(u_int cpu_node)
310 {
311 	paddr_t pa0, pa;
312 	vaddr_t va, va0;
313 	vsize_t sz = 8 * PAGE_SIZE;
314 	int cpuid;
315 	struct cpu_info *cpi, *ci;
316 	extern paddr_t cpu0paddr;
317 
318 	/*
319 	 * Check for matching cpuid in the cpus list.
320 	 */
321 	cpuid = cpuid_from_node(cpu_node);
322 
323 	for (cpi = cpus; cpi != NULL; cpi = cpi->ci_next)
324 		if (cpi->ci_cpuid == cpuid)
325 			return cpi;
326 
327 	/* Allocate the aligned VA and determine the size. */
328 	va = uvm_km_alloc(kernel_map, sz, 8 * PAGE_SIZE, UVM_KMF_VAONLY);
329 	if (!va)
330 		panic("alloc_cpuinfo: no virtual space");
331 	va0 = va;
332 
333 	pa0 = cpu0paddr;
334 	cpu0paddr += sz;
335 
336 	for (pa = pa0; pa < cpu0paddr; pa += PAGE_SIZE, va += PAGE_SIZE)
337 		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
338 
339 	pmap_update(pmap_kernel());
340 
341 	cpi = (struct cpu_info *)(va0 + CPUINFO_VA - INTSTACK);
342 
343 	memset((void *)va0, 0, sz);
344 
345 	/*
346 	 * Initialize cpuinfo structure.
347 	 *
348 	 * Arrange pcb, idle stack and interrupt stack in the same
349 	 * way as is done for the boot CPU in pmap.c.
350 	 */
351 	cpi->ci_next = NULL;
352 	cpi->ci_curlwp = NULL;
353 	cpi->ci_cpuid = cpuid;
354 	cpi->ci_fplwp = NULL;
355 	cpi->ci_eintstack = NULL;
356 	cpi->ci_spinup = NULL;
357 	cpi->ci_paddr = pa0;
358 	cpi->ci_self = cpi;
359 	if (CPU_ISSUN4V)
360 		cpi->ci_mmufsa = pa0;
361 	cpi->ci_node = cpu_node;
362 	cpi->ci_idepth = -1;
363 	memset(cpi->ci_intrpending, -1, sizeof(cpi->ci_intrpending));
364 
365 	/*
366 	 * Finally, add itself to the list of active cpus.
367 	 */
368 	for (ci = cpus; ci->ci_next != NULL; ci = ci->ci_next)
369 		;
370 #ifdef MULTIPROCESSOR
371 	ci->ci_next = cpi;
372 #endif
373 	return (cpi);
374 }
375 
376 int
377 cpu_match(device_t parent, cfdata_t cf, void *aux)
378 {
379 	struct mainbus_attach_args *ma = aux;
380 
381 	if (strcmp(cf->cf_name, ma->ma_name) != 0)
382 		return 0;
383 
384 	/*
385 	 * If we are going to only attach a single cpu, make sure
386 	 * to pick the one we are running on right now.
387 	 */
388 	if (cpuid_from_node(ma->ma_node) != cpu_myid()) {
389 #ifdef MULTIPROCESSOR
390 		if (boothowto & RB_MD1)
391 #endif
392 			return 0;
393 	}
394 
395 	return 1;
396 }
397 
398 static void
399 cpu_reset_fpustate(void)
400 {
401 	struct fpstate64 *fpstate;
402 	struct fpstate64 fps[2];
403 
404 	/* This needs to be 64-byte aligned */
405 	fpstate = ALIGNFPSTATE(&fps[1]);
406 
407 	/*
408 	 * Get the FSR and clear any exceptions.  If we do not unload
409 	 * the queue here and it is left over from a previous crash, we
410 	 * will panic in the first loadfpstate(), due to a sequence error,
411 	 * so we need to dump the whole state anyway.
412 	 */
413 	fpstate->fs_fsr = 7 << FSR_VER_SHIFT;	/* 7 is reserved for "none" */
414 	savefpstate(fpstate);
415 }
416 
417 /* setup the hw.cpuN.* nodes for this cpu */
418 static void
419 cpu_setup_sysctl(struct cpu_info *ci, device_t dev)
420 {
421 	const struct sysctlnode *cpunode = NULL;
422 
423 	sysctl_createv(NULL, 0, NULL, &cpunode,
424 		       CTLFLAG_PERMANENT,
425 		       CTLTYPE_NODE, device_xname(dev), NULL,
426 		       NULL, 0, NULL, 0,
427 		       CTL_HW,
428 		       CTL_CREATE, CTL_EOL);
429 
430 	if (cpunode == NULL)
431 		return;
432 
433 #define SETUPS(name, member)					\
434 	sysctl_createv(NULL, 0, &cpunode, NULL,			\
435 		       CTLFLAG_PERMANENT,			\
436 		       CTLTYPE_STRING, name, NULL,		\
437 		       NULL, 0, member, 0,			\
438 		       CTL_CREATE, CTL_EOL);
439 
440 	SETUPS("name", __UNCONST(ci->ci_name))
441 #undef SETUPS
442 
443 #define SETUPI(name, member)					\
444 	sysctl_createv(NULL, 0, &cpunode, NULL,			\
445 		       CTLFLAG_PERMANENT,			\
446 		       CTLTYPE_INT, name, NULL,			\
447 		       NULL, 0, member, 0,			\
448 		       CTL_CREATE, CTL_EOL);
449 
450 	SETUPI("id", &ci->ci_cpuid);
451 #undef SETUPI
452 
453 #define SETUPQ(name, member)					\
454 	sysctl_createv(NULL, 0, &cpunode, NULL,			\
455 		       CTLFLAG_PERMANENT,			\
456 		       CTLTYPE_QUAD, name, NULL,			\
457 		       NULL, 0, member, 0,			\
458 		       CTL_CREATE, CTL_EOL);
459 
460 	SETUPQ("clock_frequency", &ci->ci_cpu_clockrate[0])
461 	SETUPQ("ver", &ci->ci_ver)
462 #undef SETUPI
463 
464         sysctl_createv(NULL, 0, &cpunode, NULL,
465                        CTLFLAG_PERMANENT,
466                        CTLTYPE_STRUCT, "cacheinfo", NULL,
467                        NULL, 0, &ci->ci_cacheinfo, sizeof(ci->ci_cacheinfo),
468 		       CTL_CREATE, CTL_EOL);
469 
470 }
471 
472 /*
473  * Attach the CPU.
474  * Discover interesting goop about the virtual address cache
475  * (slightly funny place to do it, but this is where it is to be found).
476  */
477 void
478 cpu_attach(device_t parent, device_t dev, void *aux)
479 {
480 	int node;
481 	uint64_t clk, sclk = 0;
482 	struct mainbus_attach_args *ma = aux;
483 	struct cpu_info *ci;
484 	const char *sep;
485 	register int i, l;
486 	int bigcache, cachesize;
487 	char buf[100];
488 	int 	totalsize = 0;
489 	int 	linesize, dcachesize, icachesize;
490 
491 	/* tell them what we have */
492 	node = ma->ma_node;
493 
494 	/*
495 	 * Allocate cpu_info structure if needed.
496 	 */
497 	ci = alloc_cpuinfo((u_int)node);
498 
499 	/*
500 	 * Only do this on the boot cpu.  Other cpu's call
501 	 * cpu_reset_fpustate() from cpu_hatch() before they
502 	 * call into the idle loop.
503 	 * For other cpus, we need to call mi_cpu_attach()
504 	 * and complete setting up cpcb.
505 	 */
506 	if (ci->ci_flags & CPUF_PRIMARY) {
507 		fpstate_cache = pool_cache_init(sizeof(struct fpstate64),
508 					SPARC64_BLOCK_SIZE, 0, 0, "fpstate",
509 					NULL, IPL_NONE, NULL, NULL, NULL);
510 		cpu_reset_fpustate();
511 	}
512 #ifdef MULTIPROCESSOR
513 	else {
514 		mi_cpu_attach(ci);
515 		ci->ci_cpcb = lwp_getpcb(ci->ci_data.cpu_idlelwp);
516 	}
517 	for (i = 0; i < IPI_EVCNT_NUM; ++i)
518 		evcnt_attach_dynamic(&ci->ci_ipi_evcnt[i], EVCNT_TYPE_INTR,
519 				     NULL, device_xname(dev), ipi_evcnt_names[i]);
520 #endif
521 	evcnt_attach_dynamic(&ci->ci_tick_evcnt, EVCNT_TYPE_INTR, NULL,
522 			     device_xname(dev), "timer");
523 	mutex_init(&ci->ci_ctx_lock, MUTEX_SPIN, IPL_VM);
524 
525 	clk = prom_getpropuint64(node, "clock-frequency64", 0);
526 	if (clk == 0)
527 	  clk = prom_getpropint(node, "clock-frequency", 0);
528 	if (clk == 0) {
529 		/*
530 		 * Try to find it in the OpenPROM root...
531 		 */
532 		clk = prom_getpropint(findroot(), "clock-frequency", 0);
533 	}
534 	if (clk) {
535 		/* Tell OS what frequency we run on */
536 		ci->ci_cpu_clockrate[0] = clk;
537 		ci->ci_cpu_clockrate[1] = clk / 1000000;
538 	}
539 
540 	sclk = prom_getpropint(findroot(), "stick-frequency", 0);
541 
542 	ci->ci_system_clockrate[0] = sclk;
543 	ci->ci_system_clockrate[1] = sclk / 1000000;
544 
545 	ci->ci_name = kmem_strdupsize(prom_getpropstring(node, "name"), NULL,
546 				      KM_NOSLEEP);
547 	snprintf(buf, sizeof buf, "%s @ %s MHz", ci->ci_name, clockfreq(clk));
548 	cpu_setmodel("%s (%s)", machine_model, buf);
549 
550 	aprint_normal(": %s, CPU id %d\n", buf, ci->ci_cpuid);
551 	aprint_naive("\n");
552 	if (CPU_ISSUN4U || CPU_ISSUN4US) {
553 		ci->ci_ver = getver();
554 		aprint_normal_dev(dev, "manuf %x, impl %x, mask %x\n",
555 		    (u_int)GETVER_CPU_MANUF(),
556 		    (u_int)GETVER_CPU_IMPL(),
557 		    (u_int)GETVER_CPU_MASK());
558 	}
559 
560 	if (ci->ci_system_clockrate[0] != 0) {
561 		aprint_normal_dev(dev, "system tick frequency %s MHz\n",
562 		    clockfreq(ci->ci_system_clockrate[0]));
563 	}
564 	aprint_normal_dev(dev, "");
565 
566 	bigcache = 0;
567 
568 	icachesize = cpu_icache_size(node);
569 	if (icachesize > icache_size)
570 		icache_size = icachesize;
571 	linesize = l = cpu_icache_line_size(node);
572 	if (linesize > icache_line_size)
573 		icache_line_size = linesize;
574 
575 	for (i = 0; (1 << i) < l && l; i++)
576 		/* void */;
577 	if ((1 << i) != l && l)
578 		panic("bad icache line size %d", l);
579 	totalsize = icachesize;
580 	if (totalsize == 0)
581 		totalsize = l *
582 		    cpu_icache_nlines(node) * cpu_icache_associativity(node);
583 
584 	cachesize = totalsize / cpu_icache_associativity(node);
585 	bigcache = cachesize;
586 
587 	sep = "";
588 	if (totalsize > 0) {
589 		aprint_normal("%s%ldK instruction (%ld b/l)", sep,
590 		       (long)totalsize/1024,
591 		       (long)linesize);
592 		sep = ", ";
593 	}
594 	ci->ci_cacheinfo.c_itotalsize = totalsize;
595 	ci->ci_cacheinfo.c_ilinesize = linesize;
596 
597 	dcachesize = cpu_dcache_size(node);
598 	if (dcachesize > dcache_size)
599 		dcache_size = dcachesize;
600 	linesize = l = cpu_dcache_line_size(node);
601 	if (linesize > dcache_line_size)
602 		dcache_line_size = linesize;
603 
604 	for (i = 0; (1 << i) < l && l; i++)
605 		/* void */;
606 	if ((1 << i) != l && l)
607 		panic("bad dcache line size %d", l);
608 	totalsize = dcachesize;
609 	if (totalsize == 0)
610 		totalsize = l *
611 		    cpu_dcache_nlines(node) * cpu_dcache_associativity(node);
612 
613 	cachesize = totalsize / cpu_dcache_associativity(node);
614 	if (cachesize > bigcache)
615 		bigcache = cachesize;
616 
617 	if (totalsize > 0) {
618 		aprint_normal("%s%ldK data (%ld b/l)", sep,
619 		       (long)totalsize/1024,
620 		       (long)linesize);
621 		sep = ", ";
622 	}
623 	ci->ci_cacheinfo.c_dtotalsize = totalsize;
624 	ci->ci_cacheinfo.c_dlinesize = linesize;
625 
626 	linesize = l = cpu_ecache_line_size(node);
627 	for (i = 0; (1 << i) < l && l; i++)
628 		/* void */;
629 	if ((1 << i) != l && l)
630 		panic("bad ecache line size %d", l);
631 	totalsize = cpu_ecache_size(node);
632 	if (totalsize == 0)
633 		totalsize = l *
634 		    cpu_ecache_nlines(node) * cpu_ecache_associativity(node);
635 
636 	cachesize = totalsize / cpu_ecache_associativity(node);
637 	if (cachesize > bigcache)
638 		bigcache = cachesize;
639 
640 	if (totalsize > 0) {
641 		aprint_normal("%s%ldK external (%ld b/l)", sep,
642 		       (long)totalsize/1024,
643 		       (long)linesize);
644 	}
645 	aprint_normal("\n");
646 	ci->ci_cacheinfo.c_etotalsize = totalsize;
647 	ci->ci_cacheinfo.c_elinesize = linesize;
648 
649 	if (ecache_min_line_size == 0 ||
650 	    linesize < ecache_min_line_size)
651 		ecache_min_line_size = linesize;
652 
653 	cpu_setup_sysctl(ci, dev);
654 
655 	/*
656 	 * Now that we know the size of the largest cache on this CPU,
657 	 * re-color our pages.
658 	 */
659 	uvm_page_recolor(atop(bigcache)); /* XXX */
660 
661 	/*
662 	 * CPU specific ipi setup
663 	 * Currently only necessary for SUN4V
664 	 */
665 	if (CPU_ISSUN4V) {
666 		paddr_t pa = ci->ci_paddr;
667 		int err;
668 
669 		pa += CPUINFO_VA - INTSTACK;
670 		pa += PAGE_SIZE;
671 
672 		ci->ci_cpumq = pa;
673 		err = hv_cpu_qconf(CPU_MONDO_QUEUE, ci->ci_cpumq, SUN4V_MONDO_QUEUE_SIZE);
674 		if (err != H_EOK)
675 			panic("Unable to set cpu mondo queue: %d", err);
676 		pa += SUN4V_MONDO_QUEUE_SIZE * SUN4V_QUEUE_ENTRY_SIZE;
677 
678 		ci->ci_devmq = pa;
679 		err = hv_cpu_qconf(DEVICE_MONDO_QUEUE, ci->ci_devmq, SUN4V_MONDO_QUEUE_SIZE);
680 		if (err != H_EOK)
681 			panic("Unable to set device mondo queue: %d", err);
682 		pa += SUN4V_MONDO_QUEUE_SIZE * SUN4V_QUEUE_ENTRY_SIZE;
683 
684 		ci->ci_mondo = pa;
685 		pa += 64; /* mondo message is 64 bytes */
686 
687 		ci->ci_cpuset = pa;
688 		pa += 64;
689 	}
690 
691 }
692 
693 int
694 cpu_myid(void)
695 {
696 	char buf[32];
697 
698 	if (CPU_ISSUN4V) {
699 		uint64_t myid;
700 		hv_cpu_myid(&myid);
701 		return myid;
702 	}
703 	if (OF_getprop(findroot(), "name", buf, sizeof(buf)) > 0 &&
704 	    strcmp(buf, "SUNW,Ultra-Enterprise-10000") == 0)
705 		return lduwa(0x1fff40000d0UL, ASI_PHYS_NON_CACHED);
706 	switch (GETVER_CPU_IMPL()) {
707 		case IMPL_OLYMPUS_C:
708 		case IMPL_JUPITER:
709 			return CPU_JUPITERID;
710 		case IMPL_CHEETAH:
711 		case IMPL_CHEETAH_PLUS:
712 		case IMPL_JAGUAR:
713 		case IMPL_PANTHER:
714 			return CPU_FIREPLANEID;
715 		default:
716 			return CPU_UPAID;
717 	}
718 }
719 
720 #if defined(MULTIPROCESSOR)
721 vaddr_t cpu_spinup_trampoline;
722 
723 /*
724  * Start secondary processors in motion.
725  */
726 void
727 cpu_boot_secondary_processors(void)
728 {
729 	int i, pstate;
730 	struct cpu_info *ci;
731 
732 	sync_tick = 0;
733 
734 	sparc64_ipi_init();
735 
736 	if (boothowto & RB_MD1) {
737 		cpus[0].ci_next = NULL;
738 		sparc_ncpus = ncpu = ncpuonline = 1;
739 		return;
740 	}
741 
742 	for (ci = cpus; ci != NULL; ci = ci->ci_next) {
743 		if (ci->ci_cpuid == cpu_myid())
744 			continue;
745 
746 		cpu_pmap_prepare(ci, false);
747 		cpu_args->cb_node = ci->ci_node;
748 		cpu_args->cb_cpuinfo = ci->ci_paddr;
749 		cpu_args->cb_cputyp = cputyp;
750 		membar_Sync();
751 
752 		/* Disable interrupts and start another CPU. */
753 		pstate = getpstate();
754 		setpstate(PSTATE_KERN);
755 
756 		int rc = prom_startcpu_by_cpuid(ci->ci_cpuid,
757 		    (void *)cpu_spinup_trampoline, 0);
758 		if (rc == -1)
759 			prom_startcpu(ci->ci_node,
760 			    (void *)cpu_spinup_trampoline, 0);
761 
762 		for (i = 0; i < 2000; i++) {
763 			membar_Sync();
764 			if (CPUSET_HAS(cpus_active, ci->ci_index))
765 				break;
766 			delay(10000);
767 		}
768 
769 		/* synchronize %tick ( to some degree at least ) */
770 		delay(1000);
771 		sync_tick = 1;
772 		membar_Sync();
773 		if (CPU_ISSUN4U || CPU_ISSUN4US)
774 			settick(0);
775 		if (ci->ci_system_clockrate[0] != 0)
776 			if (CPU_ISSUN4U || CPU_ISSUN4US)
777 				setstick(0);
778 
779 		setpstate(pstate);
780 
781 		if (!CPUSET_HAS(cpus_active, ci->ci_index))
782 			printf("cpu%d: startup failed\n", ci->ci_cpuid);
783 	}
784 }
785 
786 void
787 cpu_hatch(void)
788 {
789 	char *v = (char*)CPUINFO_VA;
790 	int i;
791 
792 	/* XXX - why flush the icache here? but should be harmless */
793 	for (i = 0; i < 4*PAGE_SIZE; i += sizeof(long))
794 		sparc_flush_icache(v + i);
795 
796 	cpu_pmap_init(curcpu());
797 	CPUSET_ADD(cpus_active, cpu_number());
798 	cpu_reset_fpustate();
799 	curlwp = curcpu()->ci_data.cpu_idlelwp;
800 	membar_Sync();
801 
802 	/* wait for the boot CPU to flip the switch */
803 	while (sync_tick == 0) {
804 		/* we do nothing here */
805 	}
806 	if (CPU_ISSUN4U || CPU_ISSUN4US)
807 		settick(0);
808 	if (curcpu()->ci_system_clockrate[0] != 0) {
809 		if (CPU_ISSUN4U || CPU_ISSUN4US)
810 			setstick(0);
811 		stickintr_establish(PIL_CLOCK, stickintr);
812 	} else {
813 		tickintr_establish(PIL_CLOCK, tickintr);
814 	}
815 	spl0();
816 }
817 #endif /* MULTIPROCESSOR */
818