xref: /netbsd-src/sys/arch/sparc64/sparc64/cpu.c (revision 9616dacfef448e70e3fbbd865bddf60d54b656c5)
1 /*	$NetBSD: cpu.c,v 1.129 2016/12/28 19:16:25 martin Exp $ */
2 
3 /*
4  * Copyright (c) 1996
5  *	The President and Fellows of Harvard College. All rights reserved.
6  * Copyright (c) 1992, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  *
9  * This software was developed by the Computer Systems Engineering group
10  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
11  * contributed to Berkeley.
12  *
13  * All advertising materials mentioning features or use of this software
14  * must display the following acknowledgement:
15  *	This product includes software developed by Harvard University.
16  *	This product includes software developed by the University of
17  *	California, Lawrence Berkeley Laboratory.
18  *
19  * Redistribution and use in source and binary forms, with or without
20  * modification, are permitted provided that the following conditions
21  * are met:
22  *
23  * 1. Redistributions of source code must retain the above copyright
24  *    notice, this list of conditions and the following disclaimer.
25  * 2. Redistributions in binary form must reproduce the above copyright
26  *    notice, this list of conditions and the following disclaimer in the
27  *    documentation and/or other materials provided with the distribution.
28  * 3. All advertising materials mentioning features or use of this software
29  *    must display the following acknowledgement:
30  *	This product includes software developed by Aaron Brown and
31  *	Harvard University.
32  *	This product includes software developed by the University of
33  *	California, Berkeley and its contributors.
34  * 4. Neither the name of the University nor the names of its contributors
35  *    may be used to endorse or promote products derived from this software
36  *    without specific prior written permission.
37  *
38  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48  * SUCH DAMAGE.
49  *
50  *	@(#)cpu.c	8.5 (Berkeley) 11/23/93
51  *
52  */
53 
54 #include <sys/cdefs.h>
55 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.129 2016/12/28 19:16:25 martin Exp $");
56 
57 #include "opt_multiprocessor.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/device.h>
62 #include <sys/kernel.h>
63 #include <sys/reboot.h>
64 #include <sys/cpu.h>
65 
66 #include <uvm/uvm.h>
67 
68 #include <machine/autoconf.h>
69 #include <machine/cpu.h>
70 #include <machine/reg.h>
71 #include <machine/trap.h>
72 #include <machine/pmap.h>
73 #include <machine/sparc64.h>
74 #include <machine/openfirm.h>
75 #include <machine/hypervisor.h>
76 #include <machine/mdesc.h>
77 
78 #include <sparc64/sparc64/cache.h>
79 
80 #define SUN4V_MONDO_QUEUE_SIZE	32
81 #define SUN4V_QUEUE_ENTRY_SIZE	64
82 
83 int ecache_min_line_size;
84 
85 /* Linked list of all CPUs in system. */
86 #if defined(MULTIPROCESSOR)
87 int sparc_ncpus = 0;
88 #endif
89 struct cpu_info *cpus = NULL;
90 
91 volatile sparc64_cpuset_t cpus_active;/* set of active cpus */
92 struct cpu_bootargs *cpu_args;	/* allocated very early in pmap_bootstrap. */
93 struct pool_cache *fpstate_cache;
94 
95 static struct cpu_info *alloc_cpuinfo(u_int);
96 
97 /* The following are used externally (sysctl_hw). */
98 char	machine[] = MACHINE;		/* from <machine/param.h> */
99 char	machine_arch[] = MACHINE_ARCH;	/* from <machine/param.h> */
100 
101 /* These are used in locore.s, and are maximums */
102 int	dcache_line_size;
103 int	dcache_size;
104 int	icache_line_size;
105 int	icache_size;
106 
107 #ifdef MULTIPROCESSOR
108 static const char *ipi_evcnt_names[IPI_EVCNT_NUM] = IPI_EVCNT_NAMES;
109 #endif
110 
111 static void cpu_reset_fpustate(void);
112 
113 volatile int sync_tick = 0;
114 
115 /* The CPU configuration driver. */
116 void cpu_attach(device_t, device_t, void *);
117 int cpu_match(device_t, cfdata_t, void *);
118 
119 CFATTACH_DECL_NEW(cpu, 0, cpu_match, cpu_attach, NULL, NULL);
120 
121 static int
122 cpuid_from_node(u_int cpu_node)
123 {
124 	/*
125 	 * Determine the cpuid by examining the nodes properties
126 	 * in the following order:
127 	 *  upa-portid
128 	 *  portid
129 	 *  cpuid
130 	 *  reg (sun4v only)
131 	 */
132 
133 	int id;
134 
135 	id = prom_getpropint(cpu_node, "upa-portid", -1);
136 	if (id == -1)
137 		id = prom_getpropint(cpu_node, "portid", -1);
138 	if (id == -1)
139 		id = prom_getpropint(cpu_node, "cpuid", -1);
140 	if (CPU_ISSUN4V) {
141 		int reg[4];
142 		int* regp=reg;
143 		int len = 4;
144 		int rc = prom_getprop(cpu_node, "reg", sizeof(int),
145 		    &len, &regp);
146 		if ( rc != 0)
147 			panic("No reg property found\n");
148 		/* cpuid in the lower 24 bits - sun4v hypervisor arch */
149 		id = reg[0] & 0x0fffffff;
150 	}
151 	if (id == -1)
152 		panic("failed to determine cpuid");
153 
154 	return id;
155 }
156 
157 static int
158 cpu_cache_info_sun4v(const char *type, int level, const char *prop)
159 {
160 	int idx = 0;
161 	uint64_t val = 0;;
162 	idx = mdesc_find_node_by_idx(idx, "cache");
163 	while (idx != -1 && val == 0) {
164 		const char *name = mdesc_name_by_idx(idx);
165 		if (strcmp("cache", name) == 0) {
166 			const char *p;
167 			size_t len = 0;
168 			p = mdesc_get_prop_data(idx, "type", &len);
169 			if (p == NULL)
170 				panic("No type found\n");
171 			if (len == 0)
172 				panic("Len is zero");
173 			if (type == NULL || strcmp(p, type) == 0) {
174 				uint64_t l;
175 				l = mdesc_get_prop_val(idx, "level");
176 				if (l == level)
177 					val = mdesc_get_prop_val(idx, prop);
178 			}
179 		}
180 		if (val == 0)
181 			idx = mdesc_next_node(idx);
182 	}
183 	return val;
184 }
185 
186 static int
187 cpu_icache_size(int node)
188 {
189 	if (CPU_ISSUN4V)
190 		return cpu_cache_info_sun4v("instn", 1, "size");
191 	else
192 		return prom_getpropint(node, "icache-size", 0);
193 }
194 
195 static int
196 cpu_icache_line_size(int node)
197 {
198 	if (CPU_ISSUN4V)
199 		return cpu_cache_info_sun4v("instn", 1, "line-size");
200 	else
201 		return prom_getpropint(node, "icache-line-size", 0);
202 }
203 
204 static int
205 cpu_icache_nlines(int node)
206 {
207 	if (CPU_ISSUN4V)
208 		return 0;
209 	else
210 		return prom_getpropint(node, "icache-nlines", 64);
211 }
212 
213 static int
214 cpu_icache_associativity(int node)
215 {
216 	if (CPU_ISSUN4V) {
217 		int val;
218 		val = cpu_cache_info_sun4v("instn", 1, "associativity");
219 		if (val == 0)
220 			val = 1;
221 		return val;
222 	} else
223 		return prom_getpropint(node, "icache-associativity", 1);
224 }
225 
226 static int
227 cpu_dcache_size(int node)
228 {
229 	if (CPU_ISSUN4V)
230 		return cpu_cache_info_sun4v("data", 1, "size");
231 	else
232 		return prom_getpropint(node, "dcache-size", 0);
233 }
234 
235 static int
236 cpu_dcache_line_size(int node)
237 {
238 	if (CPU_ISSUN4V)
239 		return cpu_cache_info_sun4v("data", 1, "line-size");
240 	else
241 		return prom_getpropint(node, "dcache-line-size", 0);
242 }
243 
244 static int
245 cpu_dcache_nlines(int node)
246 {
247 	if (CPU_ISSUN4V)
248 		return 0;
249 	else
250 		return prom_getpropint(node, "dcache-nlines", 128);
251 }
252 
253 static int
254 cpu_dcache_associativity(int node)
255 {
256 	if (CPU_ISSUN4V) {
257 		int val;
258 		val = cpu_cache_info_sun4v("data", 1, "associativity");
259 		if (val == 0)
260 			val = 1;
261 		return val;
262 	} else
263 		return prom_getpropint(node, "dcache-associativity", 1);
264 }
265 
266 int
267 cpu_ecache_size(int node)
268 {
269 	if (CPU_ISSUN4V)
270 		return cpu_cache_info_sun4v(NULL, 2, "size");
271 	else
272 		return prom_getpropint(node, "ecache-size", 0);
273 }
274 
275 static int
276 cpu_ecache_line_size(int node)
277 {
278 	if (CPU_ISSUN4V)
279 		return cpu_cache_info_sun4v(NULL, 2, "line-size");
280 	else
281 		return prom_getpropint(node, "ecache-line-size", 0);
282 }
283 
284 static int
285 cpu_ecache_nlines(int node)
286 {
287 	if (CPU_ISSUN4V)
288 		return 0;
289 	else
290 		return prom_getpropint(node, "ecache-nlines", 32768);
291 }
292 
293 int
294 cpu_ecache_associativity(int node)
295 {
296 	if (CPU_ISSUN4V) {
297 		int val;
298 		val = cpu_cache_info_sun4v(NULL, 2, "associativity");
299 		if (val == 0)
300 			val = 1;
301 		return val;
302 	} else
303 		return prom_getpropint(node, "ecache-associativity", 1);
304 }
305 
306 struct cpu_info *
307 alloc_cpuinfo(u_int cpu_node)
308 {
309 	paddr_t pa0, pa;
310 	vaddr_t va, va0;
311 	vsize_t sz = 8 * PAGE_SIZE;
312 	int cpuid;
313 	struct cpu_info *cpi, *ci;
314 	extern paddr_t cpu0paddr;
315 
316 	/*
317 	 * Check for matching cpuid in the cpus list.
318 	 */
319 	cpuid = cpuid_from_node(cpu_node);
320 
321 	for (cpi = cpus; cpi != NULL; cpi = cpi->ci_next)
322 		if (cpi->ci_cpuid == cpuid)
323 			return cpi;
324 
325 	/* Allocate the aligned VA and determine the size. */
326 	va = uvm_km_alloc(kernel_map, sz, 8 * PAGE_SIZE, UVM_KMF_VAONLY);
327 	if (!va)
328 		panic("alloc_cpuinfo: no virtual space");
329 	va0 = va;
330 
331 	pa0 = cpu0paddr;
332 	cpu0paddr += sz;
333 
334 	for (pa = pa0; pa < cpu0paddr; pa += PAGE_SIZE, va += PAGE_SIZE)
335 		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
336 
337 	pmap_update(pmap_kernel());
338 
339 	cpi = (struct cpu_info *)(va0 + CPUINFO_VA - INTSTACK);
340 
341 	memset((void *)va0, 0, sz);
342 
343 	/*
344 	 * Initialize cpuinfo structure.
345 	 *
346 	 * Arrange pcb, idle stack and interrupt stack in the same
347 	 * way as is done for the boot CPU in pmap.c.
348 	 */
349 	cpi->ci_next = NULL;
350 	cpi->ci_curlwp = NULL;
351 	cpi->ci_cpuid = cpuid;
352 	cpi->ci_fplwp = NULL;
353 	cpi->ci_eintstack = NULL;
354 	cpi->ci_spinup = NULL;
355 	cpi->ci_paddr = pa0;
356 	cpi->ci_self = cpi;
357 	if (CPU_ISSUN4V)
358 		cpi->ci_mmfsa = pa0;
359 	cpi->ci_node = cpu_node;
360 	cpi->ci_idepth = -1;
361 	memset(cpi->ci_intrpending, -1, sizeof(cpi->ci_intrpending));
362 
363 	/*
364 	 * Finally, add itself to the list of active cpus.
365 	 */
366 	for (ci = cpus; ci->ci_next != NULL; ci = ci->ci_next)
367 		;
368 #ifdef MULTIPROCESSOR
369 	ci->ci_next = cpi;
370 #endif
371 	return (cpi);
372 }
373 
374 int
375 cpu_match(device_t parent, cfdata_t cf, void *aux)
376 {
377 	struct mainbus_attach_args *ma = aux;
378 
379 	if (strcmp(cf->cf_name, ma->ma_name) != 0)
380 		return 0;
381 
382 	/*
383 	 * If we are going to only attach a single cpu, make sure
384 	 * to pick the one we are running on right now.
385 	 */
386 	if (cpuid_from_node(ma->ma_node) != cpu_myid()) {
387 #ifdef MULTIPROCESSOR
388 		if (boothowto & RB_MD1)
389 #endif
390 			return 0;
391 	}
392 
393 	return 1;
394 }
395 
396 static void
397 cpu_reset_fpustate(void)
398 {
399 	struct fpstate64 *fpstate;
400 	struct fpstate64 fps[2];
401 
402 	/* This needs to be 64-byte aligned */
403 	fpstate = ALIGNFPSTATE(&fps[1]);
404 
405 	/*
406 	 * Get the FSR and clear any exceptions.  If we do not unload
407 	 * the queue here and it is left over from a previous crash, we
408 	 * will panic in the first loadfpstate(), due to a sequence error,
409 	 * so we need to dump the whole state anyway.
410 	 */
411 	fpstate->fs_fsr = 7 << FSR_VER_SHIFT;	/* 7 is reserved for "none" */
412 	savefpstate(fpstate);
413 }
414 
415 /*
416  * Attach the CPU.
417  * Discover interesting goop about the virtual address cache
418  * (slightly funny place to do it, but this is where it is to be found).
419  */
420 void
421 cpu_attach(device_t parent, device_t dev, void *aux)
422 {
423 	int node;
424 	long clk, sclk = 0;
425 	struct mainbus_attach_args *ma = aux;
426 	struct cpu_info *ci;
427 	const char *sep;
428 	register int i, l;
429 	int bigcache, cachesize;
430 	char buf[100];
431 	int 	totalsize = 0;
432 	int 	linesize, dcachesize, icachesize;
433 
434 	/* tell them what we have */
435 	node = ma->ma_node;
436 
437 	/*
438 	 * Allocate cpu_info structure if needed.
439 	 */
440 	ci = alloc_cpuinfo((u_int)node);
441 
442 	/*
443 	 * Only do this on the boot cpu.  Other cpu's call
444 	 * cpu_reset_fpustate() from cpu_hatch() before they
445 	 * call into the idle loop.
446 	 * For other cpus, we need to call mi_cpu_attach()
447 	 * and complete setting up cpcb.
448 	 */
449 	if (ci->ci_flags & CPUF_PRIMARY) {
450 		fpstate_cache = pool_cache_init(sizeof(struct fpstate64),
451 					SPARC64_BLOCK_SIZE, 0, 0, "fpstate",
452 					NULL, IPL_NONE, NULL, NULL, NULL);
453 		cpu_reset_fpustate();
454 	}
455 #ifdef MULTIPROCESSOR
456 	else {
457 		mi_cpu_attach(ci);
458 		ci->ci_cpcb = lwp_getpcb(ci->ci_data.cpu_idlelwp);
459 	}
460 	for (i = 0; i < IPI_EVCNT_NUM; ++i)
461 		evcnt_attach_dynamic(&ci->ci_ipi_evcnt[i], EVCNT_TYPE_INTR,
462 				     NULL, device_xname(dev), ipi_evcnt_names[i]);
463 #endif
464 	evcnt_attach_dynamic(&ci->ci_tick_evcnt, EVCNT_TYPE_INTR, NULL,
465 			     device_xname(dev), "timer");
466 	mutex_init(&ci->ci_ctx_lock, MUTEX_SPIN, IPL_VM);
467 
468 	clk = prom_getpropint(node, "clock-frequency", 0);
469 	if (clk == 0) {
470 		/*
471 		 * Try to find it in the OpenPROM root...
472 		 */
473 		clk = prom_getpropint(findroot(), "clock-frequency", 0);
474 	}
475 	if (clk) {
476 		/* Tell OS what frequency we run on */
477 		ci->ci_cpu_clockrate[0] = clk;
478 		ci->ci_cpu_clockrate[1] = clk / 1000000;
479 	}
480 
481 	sclk = prom_getpropint(findroot(), "stick-frequency", 0);
482 
483 	ci->ci_system_clockrate[0] = sclk;
484 	ci->ci_system_clockrate[1] = sclk / 1000000;
485 
486 	snprintf(buf, sizeof buf, "%s @ %s MHz",
487 		prom_getpropstring(node, "name"), clockfreq(clk));
488 	cpu_setmodel("%s (%s)", machine_model, buf);
489 
490 	aprint_normal(": %s, CPU id %d\n", buf, ci->ci_cpuid);
491 	aprint_naive("\n");
492 	if (CPU_ISSUN4U || CPU_ISSUN4US) {
493 		aprint_normal_dev(dev, "manuf %x, impl %x, mask %x\n",
494 		    (u_int)GETVER_CPU_MANUF(),
495 		    (u_int)GETVER_CPU_IMPL(),
496 		    (u_int)GETVER_CPU_MASK());
497 	}
498 
499 	if (ci->ci_system_clockrate[0] != 0) {
500 		aprint_normal_dev(dev, "system tick frequency %s MHz\n",
501 		    clockfreq(ci->ci_system_clockrate[0]));
502 	}
503 	aprint_normal_dev(dev, "");
504 
505 	bigcache = 0;
506 
507 	icachesize = cpu_icache_size(node);
508 	if (icachesize > icache_size)
509 		icache_size = icachesize;
510 	linesize = l = cpu_icache_line_size(node);
511 	if (linesize > icache_line_size)
512 		icache_line_size = linesize;
513 
514 	for (i = 0; (1 << i) < l && l; i++)
515 		/* void */;
516 	if ((1 << i) != l && l)
517 		panic("bad icache line size %d", l);
518 	totalsize = icachesize;
519 	if (totalsize == 0)
520 		totalsize = l *
521 		    cpu_icache_nlines(node) * cpu_icache_associativity(node);
522 
523 	cachesize = totalsize / cpu_icache_associativity(node);
524 	bigcache = cachesize;
525 
526 	sep = "";
527 	if (totalsize > 0) {
528 		aprint_normal("%s%ldK instruction (%ld b/l)", sep,
529 		       (long)totalsize/1024,
530 		       (long)linesize);
531 		sep = ", ";
532 	}
533 
534 	dcachesize = cpu_dcache_size(node);
535 	if (dcachesize > dcache_size)
536 		dcache_size = dcachesize;
537 	linesize = l = cpu_dcache_line_size(node);
538 	if (linesize > dcache_line_size)
539 		dcache_line_size = linesize;
540 
541 	for (i = 0; (1 << i) < l && l; i++)
542 		/* void */;
543 	if ((1 << i) != l && l)
544 		panic("bad dcache line size %d", l);
545 	totalsize = dcachesize;
546 	if (totalsize == 0)
547 		totalsize = l *
548 		    cpu_dcache_nlines(node) * cpu_dcache_associativity(node);
549 
550 	cachesize = totalsize / cpu_dcache_associativity(node);
551 	if (cachesize > bigcache)
552 		bigcache = cachesize;
553 
554 	if (totalsize > 0) {
555 		aprint_normal("%s%ldK data (%ld b/l)", sep,
556 		       (long)totalsize/1024,
557 		       (long)linesize);
558 		sep = ", ";
559 	}
560 
561 	linesize = l = cpu_ecache_line_size(node);
562 	for (i = 0; (1 << i) < l && l; i++)
563 		/* void */;
564 	if ((1 << i) != l && l)
565 		panic("bad ecache line size %d", l);
566 	totalsize = cpu_ecache_size(node);
567 	if (totalsize == 0)
568 		totalsize = l *
569 		    cpu_ecache_nlines(node) * cpu_ecache_associativity(node);
570 
571 	cachesize = totalsize / cpu_ecache_associativity(node);
572 	if (cachesize > bigcache)
573 		bigcache = cachesize;
574 
575 	if (totalsize > 0) {
576 		aprint_normal("%s%ldK external (%ld b/l)", sep,
577 		       (long)totalsize/1024,
578 		       (long)linesize);
579 	}
580 	aprint_normal("\n");
581 
582 	if (ecache_min_line_size == 0 ||
583 	    linesize < ecache_min_line_size)
584 		ecache_min_line_size = linesize;
585 
586 	/*
587 	 * Now that we know the size of the largest cache on this CPU,
588 	 * re-color our pages.
589 	 */
590 	uvm_page_recolor(atop(bigcache)); /* XXX */
591 
592 	/*
593 	 * CPU specific ipi setup
594 	 * Currently only necessary for SUN4V
595 	 */
596 	if (CPU_ISSUN4V) {
597 		paddr_t pa = ci->ci_paddr;
598 		int err;
599 
600 		pa += CPUINFO_VA - INTSTACK;
601 		pa += PAGE_SIZE;
602 
603 		ci->ci_cpumq = pa;
604 		err = hv_cpu_qconf(CPU_MONDO_QUEUE, ci->ci_cpumq, SUN4V_MONDO_QUEUE_SIZE);
605 		if (err != H_EOK)
606 			panic("Unable to set cpu mondo queue: %d", err);
607 		pa += SUN4V_MONDO_QUEUE_SIZE * SUN4V_QUEUE_ENTRY_SIZE;
608 
609 		ci->ci_devmq = pa;
610 		err = hv_cpu_qconf(DEVICE_MONDO_QUEUE, ci->ci_devmq, SUN4V_MONDO_QUEUE_SIZE);
611 		if (err != H_EOK)
612 			panic("Unable to set device mondo queue: %d", err);
613 		pa += SUN4V_MONDO_QUEUE_SIZE * SUN4V_QUEUE_ENTRY_SIZE;
614 
615 		ci->ci_mondo = pa;
616 		pa += 64; /* mondo message is 64 bytes */
617 
618 		ci->ci_cpuset = pa;
619 		pa += 64;
620 	}
621 
622 }
623 
624 int
625 cpu_myid(void)
626 {
627 	char buf[32];
628 
629 	if (CPU_ISSUN4V) {
630 		uint64_t myid;
631 		hv_cpu_myid(&myid);
632 		return myid;
633 	}
634 	if (OF_getprop(findroot(), "name", buf, sizeof(buf)) > 0 &&
635 	    strcmp(buf, "SUNW,Ultra-Enterprise-10000") == 0)
636 		return lduwa(0x1fff40000d0UL, ASI_PHYS_NON_CACHED);
637 	switch (GETVER_CPU_IMPL()) {
638 		case IMPL_OLYMPUS_C:
639 		case IMPL_JUPITER:
640 			return CPU_JUPITERID;
641 		case IMPL_CHEETAH:
642 		case IMPL_CHEETAH_PLUS:
643 		case IMPL_JAGUAR:
644 		case IMPL_PANTHER:
645 			return CPU_FIREPLANEID;
646 		default:
647 			return CPU_UPAID;
648 	}
649 }
650 
651 #if defined(MULTIPROCESSOR)
652 vaddr_t cpu_spinup_trampoline;
653 
654 /*
655  * Start secondary processors in motion.
656  */
657 void
658 cpu_boot_secondary_processors(void)
659 {
660 	int i, pstate;
661 	struct cpu_info *ci;
662 
663 	sync_tick = 0;
664 
665 	sparc64_ipi_init();
666 
667 	if (boothowto & RB_MD1) {
668 		cpus[0].ci_next = NULL;
669 		sparc_ncpus = ncpu = ncpuonline = 1;
670 		return;
671 	}
672 
673 	for (ci = cpus; ci != NULL; ci = ci->ci_next) {
674 		if (ci->ci_cpuid == cpu_myid())
675 			continue;
676 
677 		cpu_pmap_prepare(ci, false);
678 		cpu_args->cb_node = ci->ci_node;
679 		cpu_args->cb_cpuinfo = ci->ci_paddr;
680 		cpu_args->cb_cputyp = cputyp;
681 		membar_Sync();
682 
683 		/* Disable interrupts and start another CPU. */
684 		pstate = getpstate();
685 		setpstate(PSTATE_KERN);
686 
687 		int rc = prom_startcpu_by_cpuid(ci->ci_cpuid,
688 		    (void *)cpu_spinup_trampoline, 0);
689 		if (rc == -1)
690 			prom_startcpu(ci->ci_node,
691 			    (void *)cpu_spinup_trampoline, 0);
692 
693 		for (i = 0; i < 2000; i++) {
694 			membar_Sync();
695 			if (CPUSET_HAS(cpus_active, ci->ci_index))
696 				break;
697 			delay(10000);
698 		}
699 
700 		/* synchronize %tick ( to some degree at least ) */
701 		delay(1000);
702 		sync_tick = 1;
703 		membar_Sync();
704 		if (CPU_ISSUN4U || CPU_ISSUN4US)
705 			settick(0);
706 		if (ci->ci_system_clockrate[0] != 0)
707 			if (CPU_ISSUN4U || CPU_ISSUN4US)
708 				setstick(0);
709 
710 		setpstate(pstate);
711 
712 		if (!CPUSET_HAS(cpus_active, ci->ci_index))
713 			printf("cpu%d: startup failed\n", ci->ci_cpuid);
714 	}
715 }
716 
717 void
718 cpu_hatch(void)
719 {
720 	char *v = (char*)CPUINFO_VA;
721 	int i;
722 
723 	/* XXX - why flush the icache here? but should be harmless */
724 	for (i = 0; i < 4*PAGE_SIZE; i += sizeof(long))
725 		sparc_flush_icache(v + i);
726 
727 	cpu_pmap_init(curcpu());
728 	CPUSET_ADD(cpus_active, cpu_number());
729 	cpu_reset_fpustate();
730 	curlwp = curcpu()->ci_data.cpu_idlelwp;
731 	membar_Sync();
732 
733 	/* wait for the boot CPU to flip the switch */
734 	while (sync_tick == 0) {
735 		/* we do nothing here */
736 	}
737 	if (CPU_ISSUN4U || CPU_ISSUN4US)
738 		settick(0);
739 	if (curcpu()->ci_system_clockrate[0] != 0) {
740 		if (CPU_ISSUN4U || CPU_ISSUN4US)
741 			setstick(0);
742 		stickintr_establish(PIL_CLOCK, stickintr);
743 	} else {
744 		tickintr_establish(PIL_CLOCK, tickintr);
745 	}
746 	spl0();
747 }
748 #endif /* MULTIPROCESSOR */
749