xref: /netbsd-src/sys/arch/alpha/alpha/cpu.c (revision e5696cfb37d81e6fd1a7fdebe40d6733e781f5a7)
1 /* $NetBSD: cpu.c,v 1.108 2024/03/06 07:22:45 thorpej Exp $ */
2 
3 /*-
4  * Copyright (c) 1998, 1999, 2000, 2001, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
35  * All rights reserved.
36  *
37  * Author: Chris G. Demetriou
38  *
39  * Permission to use, copy, modify and distribute this software and
40  * its documentation is hereby granted, provided that both the copyright
41  * notice and this permission notice appear in all copies of the
42  * software, derivative works or modified versions, and any portions
43  * thereof, and that both notices appear in supporting documentation.
44  *
45  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
46  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
47  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48  *
49  * Carnegie Mellon requests users of this software to return to
50  *
51  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
52  *  School of Computer Science
53  *  Carnegie Mellon University
54  *  Pittsburgh PA 15213-3890
55  *
56  * any improvements or extensions that they make and grant Carnegie the
57  * rights to redistribute these changes.
58  */
59 
60 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
61 
62 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.108 2024/03/06 07:22:45 thorpej Exp $");
63 
64 #include "opt_ddb.h"
65 #include "opt_multiprocessor.h"
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/device.h>
70 #include <sys/kmem.h>
71 #include <sys/proc.h>
72 #include <sys/atomic.h>
73 #include <sys/cpu.h>
74 #include <sys/sysctl.h>
75 
76 #include <uvm/uvm_extern.h>
77 
78 #include <machine/autoconf.h>
79 #include <machine/cpuvar.h>
80 #include <machine/rpb.h>
81 #include <machine/prom.h>
82 #include <machine/alpha.h>
83 
84 struct cpu_info cpu_info_primary __cacheline_aligned = {
85 	.ci_curlwp = &lwp0,
86 	.ci_flags  = CPUF_PRIMARY|CPUF_RUNNING,
87 };
88 struct cpu_info *cpu_info_list __read_mostly = &cpu_info_primary;
89 
90 #if defined(MULTIPROCESSOR)
91 /*
92  * Array of CPU info structures.  Must be statically-allocated because
93  * curproc, etc. are used early.
94  */
95 struct cpu_info *cpu_info[ALPHA_MAXPROCS];
96 
97 /* Bitmask of CPUs booted, currently running, and paused. */
98 volatile u_long cpus_booted __read_mostly;
99 volatile u_long cpus_running __read_mostly;
100 volatile u_long cpus_paused __read_mostly;
101 
102 void	cpu_boot_secondary(struct cpu_info *);
103 #endif /* MULTIPROCESSOR */
104 
105 static void
cpu_idle_default(void)106 cpu_idle_default(void)
107 {
108 	/*
109 	 * Default is to do nothing.  Platform code can overwrite
110 	 * as needed.
111 	 */
112 }
113 
114 void
cpu_idle_wtint(void)115 cpu_idle_wtint(void)
116 {
117 	/*
118 	 * Some PALcode versions implement the WTINT call to idle
119 	 * in a low power mode.
120 	 */
121 	alpha_pal_wtint(0);
122 }
123 
124 void	(*cpu_idle_fn)(void) __read_mostly = cpu_idle_default;
125 
126 /*
127  * The Implementation Version and the Architecture Mask must be
128  * consistent across all CPUs in the system, so we set it for the
129  * primary and announce the AMASK extensions if they exist.
130  *
131  * Note, we invert the AMASK so that if a bit is set, it means "has
132  * extension".
133  */
134 u_long	cpu_implver __read_mostly;
135 u_long	cpu_amask __read_mostly;
136 
137 /* Definition of the driver for autoconfig. */
138 static int	cpumatch(device_t, cfdata_t, void *);
139 static void	cpuattach(device_t, device_t, void *);
140 
141 CFATTACH_DECL_NEW(cpu, sizeof(struct cpu_softc),
142     cpumatch, cpuattach, NULL, NULL);
143 
144 static void	cpu_announce_extensions(struct cpu_info *);
145 
146 extern struct cfdriver cpu_cd;
147 
148 static const char * const lcaminor[] = {
149 	"",
150 	"21066", "21066",
151 	"21068", "21068",
152 	"21066A", "21068A",
153 	NULL
154 };
155 
156 const struct cputable_struct {
157 	const char *cpu_evname;
158 	const char *cpu_major_name;
159 	const char * const *cpu_minor_names;
160 } cpunametable[] = {
161 [PCS_PROC_EV3]       ={	"EV3",		NULL,		NULL		},
162 [PCS_PROC_EV4]       ={	"EV4",		"21064",	NULL		},
163 [PCS_PROC_SIMULATION]={ "Sim",		NULL,		NULL		},
164 [PCS_PROC_LCA4]      ={	"LCA4",		NULL,		lcaminor	},
165 [PCS_PROC_EV5]       ={	"EV5",		"21164",	NULL		},
166 [PCS_PROC_EV45]      ={	"EV45",		"21064A",	NULL		},
167 [PCS_PROC_EV56]      ={	"EV56",		"21164A",	NULL		},
168 [PCS_PROC_EV6]       ={	"EV6",		"21264",	NULL		},
169 [PCS_PROC_PCA56]     ={	"PCA56",	"21164PC",	NULL		},
170 [PCS_PROC_PCA57]     ={	"PCA57",	"21164PC"/*XXX*/,NULL		},
171 [PCS_PROC_EV67]      ={	"EV67",		"21264A",	NULL		},
172 [PCS_PROC_EV68CB]    ={	"EV68CB",	"21264C",	NULL		},
173 [PCS_PROC_EV68AL]    ={	"EV68AL",	"21264B",	NULL		},
174 [PCS_PROC_EV68CX]    ={	"EV68CX",	"21264D",	NULL		},
175 [PCS_PROC_EV7]       ={	"EV7",		"21364",	NULL		},
176 [PCS_PROC_EV79]      ={	"EV79",		NULL,		NULL		},
177 [PCS_PROC_EV69]      ={	"EV69",		NULL,		NULL		},
178 };
179 
180 static bool
cpu_description(const struct cpu_softc * const sc,char * const buf,size_t const buflen)181 cpu_description(const struct cpu_softc * const sc,
182     char * const buf, size_t const buflen)
183 {
184 	const char * const *s;
185 	const char *ev;
186 	int i;
187 
188 	const uint32_t major = sc->sc_major_type;
189 	const uint32_t minor = sc->sc_minor_type;
190 
191 	if (major < __arraycount(cpunametable) &&
192 	    (ev = cpunametable[major].cpu_evname) != NULL) {
193 		s = cpunametable[major].cpu_minor_names;
194 		for (i = 0; s != NULL && s[i] != NULL; i++) {
195 			if (i == minor && strlen(s[i]) != 0) {
196 				break;
197 			}
198 		}
199 		if (s == NULL || s[i] == NULL) {
200 			s = &cpunametable[major].cpu_major_name;
201 			i = 0;
202 			if (s[i] == NULL) {
203 				s = NULL;
204 			}
205 		}
206 
207 		/*
208 		 * Example strings:
209 		 *
210 		 *	Sim-0
211 		 *	21068-3 (LCA4)		[uses minor table]
212 		 *	21264C-5 (EV68CB)
213 		 *	21164PC-1 (PCA56)
214 		 */
215 		if (s != NULL) {
216 			snprintf(buf, buflen, "%s-%d (%s)", s[i], minor, ev);
217 		} else {
218 			snprintf(buf, buflen, "%s-%d", ev, minor);
219 		}
220 		return true;
221 	}
222 
223 	snprintf(buf, buflen, "UNKNOWN CPU TYPE (%u:%u)", major, minor);
224 	return false;
225 }
226 
227 static int
cpu_sysctl_model(SYSCTLFN_ARGS)228 cpu_sysctl_model(SYSCTLFN_ARGS)
229 {
230 	struct sysctlnode node = *rnode;
231 	const struct cpu_softc * const sc = node.sysctl_data;
232 	char model[32];
233 
234 	cpu_description(sc, model, sizeof(model));
235 	node.sysctl_data = model;
236 	return sysctl_lookup(SYSCTLFN_CALL(&node));
237 }
238 
239 static int
cpu_sysctl_amask_bit(SYSCTLFN_ARGS,unsigned long const bit)240 cpu_sysctl_amask_bit(SYSCTLFN_ARGS, unsigned long const bit)
241 {
242 	struct sysctlnode node = *rnode;
243 	const struct cpu_softc * const sc = node.sysctl_data;
244 
245 	bool result = (sc->sc_amask & bit) ? true : false;
246 	node.sysctl_data = &result;
247 	return sysctl_lookup(SYSCTLFN_CALL(&node));
248 }
249 
250 static int
cpu_sysctl_bwx(SYSCTLFN_ARGS)251 cpu_sysctl_bwx(SYSCTLFN_ARGS)
252 {
253 	return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_BWX);
254 }
255 
256 static int
cpu_sysctl_fix(SYSCTLFN_ARGS)257 cpu_sysctl_fix(SYSCTLFN_ARGS)
258 {
259 	return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_FIX);
260 }
261 
262 static int
cpu_sysctl_cix(SYSCTLFN_ARGS)263 cpu_sysctl_cix(SYSCTLFN_ARGS)
264 {
265 	return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_CIX);
266 }
267 
268 static int
cpu_sysctl_mvi(SYSCTLFN_ARGS)269 cpu_sysctl_mvi(SYSCTLFN_ARGS)
270 {
271 	return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_MVI);
272 }
273 
274 static int
cpu_sysctl_pat(SYSCTLFN_ARGS)275 cpu_sysctl_pat(SYSCTLFN_ARGS)
276 {
277 	return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_PAT);
278 }
279 
280 static int
cpu_sysctl_pmi(SYSCTLFN_ARGS)281 cpu_sysctl_pmi(SYSCTLFN_ARGS)
282 {
283 	return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_PMI);
284 }
285 
286 static int
cpu_sysctl_primary(SYSCTLFN_ARGS)287 cpu_sysctl_primary(SYSCTLFN_ARGS)
288 {
289 	struct sysctlnode node = *rnode;
290 	const struct cpu_softc * const sc = node.sysctl_data;
291 
292 	bool result = CPU_IS_PRIMARY(sc->sc_ci);
293 	node.sysctl_data = &result;
294 	return sysctl_lookup(SYSCTLFN_CALL(&node));
295 }
296 
297 /*
298  * The following is an attempt to map out how booting secondary CPUs
299  * works.
300  *
301  * As we find processors during the autoconfiguration sequence, all
302  * processors have idle stacks and PCBs created for them, including
303  * the primary (although the primary idles on lwp0's PCB until its
304  * idle PCB is created).
305  *
306  * Right before calling uvm_scheduler(), main() calls, on lwp0's
307  * context, cpu_boot_secondary_processors().  This is our key to
308  * actually spin up the additional processor's we've found.  We
309  * run through our cpu_info[] array looking for secondary processors
310  * with idle PCBs, and spin them up.
311  *
312  * The spinup involves switching the secondary processor to the
313  * OSF/1 PALcode, setting the entry point to cpu_spinup_trampoline(),
314  * and sending a "START" message to the secondary's console.
315  *
316  * Upon successful processor bootup, the cpu_spinup_trampoline will call
317  * cpu_hatch(), which will print a message indicating that the processor
318  * is running, and will set the "hatched" flag in its softc.  At the end
319  * of cpu_hatch() is a spin-forever loop; we do not yet attempt to schedule
320  * anything on secondary CPUs.
321  */
322 
323 static int
cpumatch(device_t parent,cfdata_t cfdata,void * aux)324 cpumatch(device_t parent, cfdata_t cfdata, void *aux)
325 {
326 	struct mainbus_attach_args *ma = aux;
327 
328 	/* make sure that we're looking for a CPU. */
329 	if (strcmp(ma->ma_name, cpu_cd.cd_name) != 0)
330 		return (0);
331 
332 	/* XXX CHECK SLOT? */
333 	/* XXX CHECK PRIMARY? */
334 
335 	return (1);
336 }
337 
338 static void
cpuattach(device_t parent,device_t self,void * aux)339 cpuattach(device_t parent, device_t self, void *aux)
340 {
341 	struct cpu_softc * const sc = device_private(self);
342 	const struct mainbus_attach_args * const ma = aux;
343 	struct cpu_info *ci;
344 	char model[32];
345 
346 	const bool primary = ma->ma_slot == hwrpb->rpb_primary_cpu_id;
347 
348 	sc->sc_dev = self;
349 
350 	const struct pcs * const p = LOCATE_PCS(hwrpb, ma->ma_slot);
351 	sc->sc_major_type = PCS_CPU_MAJORTYPE(p);
352 	sc->sc_minor_type = PCS_CPU_MINORTYPE(p);
353 
354 	const bool recognized = cpu_description(sc, model, sizeof(model));
355 
356 	aprint_normal(": ID %d%s, ", ma->ma_slot, primary ? " (primary)" : "");
357 	if (recognized) {
358 		aprint_normal("%s", model);
359 	} else {
360 		aprint_error("%s", model);
361 	}
362 
363 	aprint_naive("\n");
364 	aprint_normal("\n");
365 
366 	if (p->pcs_proc_var != 0) {
367 		bool needcomma = false;
368 		const char *vaxfp = "";
369 		const char *ieeefp = "";
370 		const char *pe = "";
371 
372 		if (p->pcs_proc_var & PCS_VAR_VAXFP) {
373 			sc->sc_vax_fp = true;
374 			vaxfp = "VAX FP support";
375 			needcomma = true;
376 		}
377 		if (p->pcs_proc_var & PCS_VAR_IEEEFP) {
378 			sc->sc_ieee_fp = true;
379 			ieeefp = ", IEEE FP support";
380 			if (!needcomma)
381 				ieeefp += 2;
382 			needcomma = true;
383 		}
384 		if (p->pcs_proc_var & PCS_VAR_PE) {
385 			sc->sc_primary_eligible = true;
386 			pe = ", Primary Eligible";
387 			if (!needcomma)
388 				pe += 2;
389 			needcomma = true;
390 		}
391 		aprint_debug_dev(sc->sc_dev, "%s%s%s", vaxfp, ieeefp, pe);
392 		if (p->pcs_proc_var & PCS_VAR_RESERVED)
393 			aprint_debug("%sreserved bits: %#lx",
394 			    needcomma ? ", " : "",
395 			    p->pcs_proc_var & PCS_VAR_RESERVED);
396 		aprint_debug("\n");
397 	}
398 
399 	if (ma->ma_slot > ALPHA_WHAMI_MAXID) {
400 		if (primary)
401 			panic("cpu_attach: primary CPU ID too large");
402 		aprint_error_dev(sc->sc_dev,
403 		    "processor ID too large, ignoring\n");
404 		return;
405 	}
406 
407 	if (primary) {
408 		ci = &cpu_info_primary;
409 	} else {
410 		/*
411 		 * kmem_zalloc() will guarante cache line alignment for
412 		 * all allocations >= CACHE_LINE_SIZE.
413 		 */
414 		ci = kmem_zalloc(sizeof(*ci), KM_SLEEP);
415 		KASSERT(((uintptr_t)ci & (CACHE_LINE_SIZE - 1)) == 0);
416 	}
417 #if defined(MULTIPROCESSOR)
418 	cpu_info[ma->ma_slot] = ci;
419 #endif
420 	ci->ci_cpuid = ma->ma_slot;
421 	ci->ci_softc = sc;
422 	ci->ci_pcc_freq = hwrpb->rpb_cc_freq;
423 
424 	sc->sc_ci = ci;
425 
426 #if defined(MULTIPROCESSOR)
427 	/*
428 	 * Make sure the processor is available for use.
429 	 */
430 	if ((p->pcs_flags & PCS_PA) == 0) {
431 		if (primary)
432 			panic("cpu_attach: primary not available?!");
433 		aprint_normal_dev(sc->sc_dev,
434 		    "processor not available for use\n");
435 		return;
436 	}
437 
438 	/* Make sure the processor has valid PALcode. */
439 	if ((p->pcs_flags & PCS_PV) == 0) {
440 		if (primary)
441 			panic("cpu_attach: primary has invalid PALcode?!");
442 		aprint_error_dev(sc->sc_dev, "PALcode not valid\n");
443 		return;
444 	}
445 #endif /* MULTIPROCESSOR */
446 
447 	/*
448 	 * If we're the primary CPU, no more work to do; we're already
449 	 * running!
450 	 */
451 	if (primary) {
452 		cpu_announce_extensions(ci);
453 #if defined(MULTIPROCESSOR)
454 		KASSERT(ci->ci_flags & CPUF_PRIMARY);
455 		KASSERT(ci->ci_flags & CPUF_RUNNING);
456 		atomic_or_ulong(&cpus_booted, (1UL << ma->ma_slot));
457 		atomic_or_ulong(&cpus_running, (1UL << ma->ma_slot));
458 #endif /* MULTIPROCESSOR */
459 	} else {
460 #if defined(MULTIPROCESSOR)
461 		int error;
462 
463 		error = mi_cpu_attach(ci);
464 		if (error != 0) {
465 			aprint_error_dev(sc->sc_dev,
466 			    "mi_cpu_attach failed with %d\n", error);
467 			return;
468 		}
469 
470 		/*
471 		 * Boot the secondary processor.  It will announce its
472 		 * extensions, and then spin until we tell it to go
473 		 * on its merry way.
474 		 */
475 		cpu_boot_secondary(ci);
476 
477 		/*
478 		 * Link the processor into the list.
479 		 */
480 		ci->ci_next = cpu_info_list->ci_next;
481 		cpu_info_list->ci_next = ci;
482 #else /* ! MULTIPROCESSOR */
483 		aprint_normal_dev(sc->sc_dev, "processor off-line; "
484 		    "multiprocessor support not present in kernel\n");
485 #endif /* MULTIPROCESSOR */
486 	}
487 
488 	evcnt_attach_dynamic(&sc->sc_evcnt_clock, EVCNT_TYPE_INTR,
489 	    NULL, device_xname(sc->sc_dev), "clock");
490 	evcnt_attach_dynamic(&sc->sc_evcnt_device, EVCNT_TYPE_INTR,
491 	    NULL, device_xname(sc->sc_dev), "device");
492 #if defined(MULTIPROCESSOR)
493 	alpha_ipi_init(ci);
494 #endif
495 
496 	struct sysctllog **log = &sc->sc_sysctllog;
497 	const struct sysctlnode *rnode, *cnode;
498 	int error;
499 
500 	error = sysctl_createv(log, 0, NULL, &rnode, CTLFLAG_PERMANENT,
501 	    CTLTYPE_NODE, device_xname(sc->sc_dev),
502 	    SYSCTL_DESCR("cpu properties"),
503 	    NULL, 0,
504 	    NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
505 	if (error)
506 		return;
507 
508 	error = sysctl_createv(log, 0, &rnode, &cnode,
509 	    CTLFLAG_PERMANENT, CTLTYPE_STRING, "model",
510 	    SYSCTL_DESCR("cpu model"),
511 	    cpu_sysctl_model, 0,
512 	    (void *)sc, 0, CTL_CREATE, CTL_EOL);
513 	if (error)
514 		return;
515 
516 	error = sysctl_createv(log, 0, &rnode, &cnode,
517 	    CTLFLAG_PERMANENT, CTLTYPE_INT, "major",
518 	    SYSCTL_DESCR("cpu major type"),
519 	    NULL, 0,
520 	    &sc->sc_major_type, 0, CTL_CREATE, CTL_EOL);
521 	if (error)
522 		return;
523 
524 	error = sysctl_createv(log, 0, &rnode, &cnode,
525 	    CTLFLAG_PERMANENT, CTLTYPE_INT, "minor",
526 	    SYSCTL_DESCR("cpu minor type"),
527 	    NULL, 0,
528 	    &sc->sc_minor_type, 0, CTL_CREATE, CTL_EOL);
529 	if (error)
530 		return;
531 
532 	error = sysctl_createv(log, 0, &rnode, &cnode,
533 	    CTLFLAG_PERMANENT, CTLTYPE_LONG, "implver",
534 	    SYSCTL_DESCR("cpu implementation version"),
535 	    NULL, 0,
536 	    &sc->sc_implver, 0, CTL_CREATE, CTL_EOL);
537 	if (error)
538 		return;
539 
540 	error = sysctl_createv(log, 0, &rnode, &cnode,
541 	    CTLFLAG_PERMANENT|CTLFLAG_HEX, CTLTYPE_LONG, "amask",
542 	    SYSCTL_DESCR("architecture extensions mask"),
543 	    NULL, 0,
544 	    &sc->sc_amask, 0, CTL_CREATE, CTL_EOL);
545 	if (error)
546 		return;
547 
548 	error = sysctl_createv(log, 0, &rnode, &cnode,
549 	    CTLFLAG_PERMANENT, CTLTYPE_BOOL, "bwx",
550 	    SYSCTL_DESCR("cpu supports BWX extension"),
551 	    cpu_sysctl_bwx, 0,
552 	    (void *)sc, 0, CTL_CREATE, CTL_EOL);
553 	if (error)
554 		return;
555 
556 	error = sysctl_createv(log, 0, &rnode, &cnode,
557 	    CTLFLAG_PERMANENT, CTLTYPE_BOOL, "fix",
558 	    SYSCTL_DESCR("cpu supports FIX extension"),
559 	    cpu_sysctl_fix, 0,
560 	    (void *)sc, 0, CTL_CREATE, CTL_EOL);
561 	if (error)
562 		return;
563 
564 	error = sysctl_createv(log, 0, &rnode, &cnode,
565 	    CTLFLAG_PERMANENT, CTLTYPE_BOOL, "cix",
566 	    SYSCTL_DESCR("cpu supports CIX extension"),
567 	    cpu_sysctl_cix, 0,
568 	    (void *)sc, 0, CTL_CREATE, CTL_EOL);
569 	if (error)
570 		return;
571 
572 	error = sysctl_createv(log, 0, &rnode, &cnode,
573 	    CTLFLAG_PERMANENT, CTLTYPE_BOOL, "mvi",
574 	    SYSCTL_DESCR("cpu supports MVI extension"),
575 	    cpu_sysctl_mvi, 0,
576 	    (void *)sc, 0, CTL_CREATE, CTL_EOL);
577 	if (error)
578 		return;
579 
580 	error = sysctl_createv(log, 0, &rnode, &cnode,
581 	    CTLFLAG_PERMANENT, CTLTYPE_BOOL, "pat",
582 	    SYSCTL_DESCR("cpu supports PAT extension"),
583 	    cpu_sysctl_pat, 0,
584 	    (void *)sc, 0, CTL_CREATE, CTL_EOL);
585 	if (error)
586 		return;
587 
588 	error = sysctl_createv(log, 0, &rnode, &cnode,
589 	    CTLFLAG_PERMANENT, CTLTYPE_BOOL, "pmi",
590 	    SYSCTL_DESCR("cpu supports PMI extension"),
591 	    cpu_sysctl_pmi, 0,
592 	    (void *)sc, 0, CTL_CREATE, CTL_EOL);
593 	if (error)
594 		return;
595 
596 	error = sysctl_createv(log, 0, &rnode, &cnode,
597 	    CTLFLAG_PERMANENT, CTLTYPE_BOOL, "vax_fp",
598 	    SYSCTL_DESCR("cpu supports VAX FP"),
599 	    NULL, 0,
600 	    &sc->sc_vax_fp, 0, CTL_CREATE, CTL_EOL);
601 	if (error)
602 		return;
603 
604 	error = sysctl_createv(log, 0, &rnode, &cnode,
605 	    CTLFLAG_PERMANENT, CTLTYPE_BOOL, "ieee_fp",
606 	    SYSCTL_DESCR("cpu supports IEEE FP"),
607 	    NULL, 0,
608 	    &sc->sc_ieee_fp, 0, CTL_CREATE, CTL_EOL);
609 	if (error)
610 		return;
611 
612 	error = sysctl_createv(log, 0, &rnode, &cnode,
613 	    CTLFLAG_PERMANENT, CTLTYPE_BOOL, "primary_eligible",
614 	    SYSCTL_DESCR("cpu is primary-eligible"),
615 	    NULL, 0,
616 	    &sc->sc_primary_eligible, 0, CTL_CREATE, CTL_EOL);
617 	if (error)
618 		return;
619 
620 	error = sysctl_createv(log, 0, &rnode, &cnode,
621 	    CTLFLAG_PERMANENT, CTLTYPE_BOOL, "primary",
622 	    SYSCTL_DESCR("cpu is the primary cpu"),
623 	    cpu_sysctl_primary, 0,
624 	    (void *)sc, 0, CTL_CREATE, CTL_EOL);
625 	if (error)
626 		return;
627 
628 	error = sysctl_createv(log, 0, &rnode, &cnode,
629 	    CTLFLAG_PERMANENT, CTLTYPE_LONG, "cpu_id",
630 	    SYSCTL_DESCR("hardware cpu ID"),
631 	    NULL, 0,
632 	    &sc->sc_ci->ci_cpuid, 0, CTL_CREATE, CTL_EOL);
633 	if (error)
634 		return;
635 
636 	error = sysctl_createv(log, 0, &rnode, &cnode,
637 	    CTLFLAG_PERMANENT, CTLTYPE_LONG, "pcc_freq",
638 	    SYSCTL_DESCR("PCC frequency"),
639 	    NULL, 0,
640 	    &sc->sc_ci->ci_pcc_freq, 0, CTL_CREATE, CTL_EOL);
641 	if (error)
642 		return;
643 }
644 
645 static void
cpu_announce_extensions(struct cpu_info * ci)646 cpu_announce_extensions(struct cpu_info *ci)
647 {
648 	u_long implver, amask = 0;
649 	char bits[64];
650 
651 	implver = alpha_implver();
652 	if (implver >= ALPHA_IMPLVER_EV5)
653 		amask = (~alpha_amask(ALPHA_AMASK_ALL)) & ALPHA_AMASK_ALL;
654 
655 	ci->ci_softc->sc_implver = implver;
656 	ci->ci_softc->sc_amask = amask;
657 
658 	if (ci->ci_cpuid == hwrpb->rpb_primary_cpu_id) {
659 		cpu_implver = implver;
660 		cpu_amask = amask;
661 	} else {
662 		if (implver < cpu_implver)
663 			aprint_error_dev(ci->ci_softc->sc_dev,
664 			    "WARNING: IMPLVER %lu < %lu\n",
665 			    implver, cpu_implver);
666 
667 		/*
668 		 * Cap the system architecture mask to the intersection
669 		 * of features supported by all processors in the system.
670 		 */
671 		cpu_amask &= amask;
672 	}
673 
674 	if (amask) {
675 		snprintb(bits, sizeof(bits),
676 		    ALPHA_AMASK_BITS, amask);
677 		aprint_normal_dev(ci->ci_softc->sc_dev,
678 		    "Architecture extensions: %s\n", bits);
679 	}
680 }
681 
682 #if defined(MULTIPROCESSOR)
683 void
cpu_boot_secondary_processors(void)684 cpu_boot_secondary_processors(void)
685 {
686 	struct cpu_info *ci;
687 	u_long i;
688 	bool did_patch = false;
689 
690 	for (i = 0; i < ALPHA_MAXPROCS; i++) {
691 		ci = cpu_info[i];
692 		if (ci == NULL || ci->ci_data.cpu_idlelwp == NULL)
693 			continue;
694 		if (CPU_IS_PRIMARY(ci))
695 			continue;
696 		if ((cpus_booted & (1UL << i)) == 0)
697 			continue;
698 
699 		/* Patch MP-criticial kernel routines. */
700 		if (did_patch == false) {
701 			alpha_patch(true);
702 			did_patch = true;
703 		}
704 
705 		/*
706 		 * Launch the processor.
707 		 */
708 		atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING);
709 		atomic_or_ulong(&cpus_running, (1U << i));
710 	}
711 }
712 
713 void
cpu_boot_secondary(struct cpu_info * ci)714 cpu_boot_secondary(struct cpu_info *ci)
715 {
716 	long timeout;
717 	struct pcs *pcsp, *primary_pcsp;
718 	struct pcb *pcb;
719 	u_long cpumask;
720 
721 	pcb = lwp_getpcb(ci->ci_data.cpu_idlelwp);
722 	primary_pcsp = LOCATE_PCS(hwrpb, hwrpb->rpb_primary_cpu_id);
723 	pcsp = LOCATE_PCS(hwrpb, ci->ci_cpuid);
724 	cpumask = (1UL << ci->ci_cpuid);
725 
726 	/*
727 	 * Set up the PCS's HWPCB to match ours.
728 	 */
729 	memcpy(pcsp->pcs_hwpcb, &pcb->pcb_hw, sizeof(pcb->pcb_hw));
730 
731 	/*
732 	 * Set up the HWRPB to restart the secondary processor
733 	 * with our spin-up trampoline.
734 	 */
735 	hwrpb->rpb_restart = (uint64_t) cpu_spinup_trampoline;
736 	hwrpb->rpb_restart_val = (uint64_t) ci;
737 	hwrpb->rpb_checksum = hwrpb_checksum();
738 
739 	/*
740 	 * Configure the CPU to start in OSF/1 PALcode by copying
741 	 * the primary CPU's PALcode revision info to the secondary
742 	 * CPUs PCS.
743 	 */
744 	memcpy(&pcsp->pcs_pal_rev, &primary_pcsp->pcs_pal_rev,
745 	    sizeof(pcsp->pcs_pal_rev));
746 	pcsp->pcs_flags |= (PCS_CV|PCS_RC);
747 	pcsp->pcs_flags &= ~PCS_BIP;
748 
749 	/* Make sure the secondary console sees all this. */
750 	alpha_mb();
751 
752 	/* Send a "START" command to the secondary CPU's console. */
753 	if (cpu_iccb_send(ci->ci_cpuid, "START\r\n")) {
754 		aprint_error_dev(ci->ci_softc->sc_dev,
755 		    "unable to issue `START' command\n");
756 		return;
757 	}
758 
759 	/* Wait for the processor to boot. */
760 	for (timeout = 10000; timeout != 0; timeout--) {
761 		alpha_mb();
762 		if (pcsp->pcs_flags & PCS_BIP)
763 			break;
764 		delay(1000);
765 	}
766 	if (timeout == 0)
767 		aprint_error_dev(ci->ci_softc->sc_dev,
768 		    "processor failed to boot\n");
769 
770 	/*
771 	 * ...and now wait for verification that it's running kernel
772 	 * code.
773 	 */
774 	for (timeout = 10000; timeout != 0; timeout--) {
775 		alpha_mb();
776 		if (cpus_booted & cpumask)
777 			break;
778 		delay(1000);
779 	}
780 	if (timeout == 0)
781 		aprint_error_dev(ci->ci_softc->sc_dev,
782 		    "processor failed to hatch\n");
783 }
784 
785 void
cpu_pause_resume(u_long cpu_id,int pause)786 cpu_pause_resume(u_long cpu_id, int pause)
787 {
788 	u_long cpu_mask = (1UL << cpu_id);
789 
790 	if (pause) {
791 		atomic_or_ulong(&cpus_paused, cpu_mask);
792 		alpha_send_ipi(cpu_id, ALPHA_IPI_PAUSE);
793 	} else
794 		atomic_and_ulong(&cpus_paused, ~cpu_mask);
795 }
796 
797 void
cpu_pause_resume_all(int pause)798 cpu_pause_resume_all(int pause)
799 {
800 	struct cpu_info *ci, *self = curcpu();
801 	CPU_INFO_ITERATOR cii;
802 
803 	for (CPU_INFO_FOREACH(cii, ci)) {
804 		if (ci == self)
805 			continue;
806 		cpu_pause_resume(ci->ci_cpuid, pause);
807 	}
808 }
809 
810 void
cpu_halt(void)811 cpu_halt(void)
812 {
813 	struct cpu_info *ci = curcpu();
814 	u_long cpu_id = cpu_number();
815 	struct pcs *pcsp = LOCATE_PCS(hwrpb, cpu_id);
816 
817 	aprint_normal_dev(ci->ci_softc->sc_dev, "shutting down...\n");
818 
819 	pcsp->pcs_flags &= ~(PCS_RC | PCS_HALT_REQ);
820 	pcsp->pcs_flags |= PCS_HALT_STAY_HALTED;
821 
822 	atomic_and_ulong(&cpus_running, ~(1UL << cpu_id));
823 	atomic_and_ulong(&cpus_booted, ~(1U << cpu_id));
824 
825 	alpha_pal_halt();
826 	/* NOTREACHED */
827 }
828 
829 void
cpu_hatch(struct cpu_info * ci)830 cpu_hatch(struct cpu_info *ci)
831 {
832 	u_long cpu_id = cpu_number();
833 	u_long cpumask = (1UL << cpu_id);
834 
835 	/* pmap initialization for this processor. */
836 	pmap_init_cpu(ci);
837 
838 	/* Initialize trap vectors for this processor. */
839 	trap_init();
840 
841 	/* Yahoo!  We're running kernel code!  Announce it! */
842 	cpu_announce_extensions(ci);
843 
844 	atomic_or_ulong(&cpus_booted, cpumask);
845 
846 	/*
847 	 * Spin here until we're told we can start.
848 	 */
849 	while ((cpus_running & cpumask) == 0)
850 		/* spin */ ;
851 
852 	/*
853 	 * Invalidate the TLB and sync the I-stream before we
854 	 * jump into the kernel proper.  We have to do this
855 	 * because we haven't been getting IPIs while we've
856 	 * been spinning.
857 	 */
858 	ALPHA_TBIA();
859 	alpha_pal_imb();
860 
861 	if (alpha_use_cctr) {
862 		cc_init_secondary(ci);
863 	}
864 
865 	cpu_initclocks_secondary();
866 }
867 
868 int
cpu_iccb_send(long cpu_id,const char * msg)869 cpu_iccb_send(long cpu_id, const char *msg)
870 {
871 	struct pcs *pcsp = LOCATE_PCS(hwrpb, cpu_id);
872 	int timeout;
873 	u_long cpumask = (1UL << cpu_id);
874 
875 	/* Wait for the ICCB to become available. */
876 	for (timeout = 10000; timeout != 0; timeout--) {
877 		alpha_mb();
878 		if ((hwrpb->rpb_rxrdy & cpumask) == 0)
879 			break;
880 		delay(1000);
881 	}
882 	if (timeout == 0)
883 		return (EIO);
884 
885 	/*
886 	 * Copy the message into the ICCB, and tell the secondary console
887 	 * that it's there.  Ensure the buffer is initialized before we
888 	 * set the rxrdy bits, as a store-release.
889 	 */
890 	strcpy(pcsp->pcs_iccb.iccb_rxbuf, msg);
891 	pcsp->pcs_iccb.iccb_rxlen = strlen(msg);
892 	membar_release();
893 	atomic_or_ulong(&hwrpb->rpb_rxrdy, cpumask);
894 
895 	/* Wait for the message to be received. */
896 	for (timeout = 10000; timeout != 0; timeout--) {
897 		alpha_mb();
898 		if ((hwrpb->rpb_rxrdy & cpumask) == 0)
899 			break;
900 		delay(1000);
901 	}
902 	if (timeout == 0)
903 		return (EIO);
904 
905 	return (0);
906 }
907 
908 void
cpu_iccb_receive(void)909 cpu_iccb_receive(void)
910 {
911 #if 0	/* Don't bother... we don't get any important messages anyhow. */
912 	uint64_t txrdy;
913 	char *cp1, *cp2, buf[80];
914 	struct pcs *pcsp;
915 	u_int cnt;
916 	long cpu_id;
917 
918 	txrdy = hwrpb->rpb_txrdy;
919 
920 	for (cpu_id = 0; cpu_id < hwrpb->rpb_pcs_cnt; cpu_id++) {
921 		if (txrdy & (1UL << cpu_id)) {
922 			pcsp = LOCATE_PCS(hwrpb, cpu_id);
923 			printf("Inter-console message from CPU %lu "
924 			    "HALT REASON = 0x%lx, FLAGS = 0x%lx\n",
925 			    cpu_id, pcsp->pcs_halt_reason, pcsp->pcs_flags);
926 
927 			cnt = pcsp->pcs_iccb.iccb_txlen;
928 			if (cnt >= 80) {
929 				printf("Malformed inter-console message\n");
930 				continue;
931 			}
932 			cp1 = pcsp->pcs_iccb.iccb_txbuf;
933 			cp2 = buf;
934 			while (cnt--) {
935 				if (*cp1 != '\r' && *cp1 != '\n')
936 					*cp2++ = *cp1;
937 				cp1++;
938 			}
939 			*cp2 = '\0';
940 			printf("Message from CPU %lu: %s\n", cpu_id, buf);
941 		}
942 	}
943 #endif /* 0 */
944 	hwrpb->rpb_txrdy = 0;
945 	alpha_mb();
946 }
947 
948 #if defined(DDB)
949 
950 #include <ddb/db_output.h>
951 #include <machine/db_machdep.h>
952 
953 /*
954  * Dump CPU information from DDB.
955  */
956 void
cpu_debug_dump(void)957 cpu_debug_dump(void)
958 {
959 	struct cpu_info *ci;
960 	CPU_INFO_ITERATOR cii;
961 
962 	db_printf("addr		dev	id	flags	ipis	curproc\n");
963 	for (CPU_INFO_FOREACH(cii, ci)) {
964 		db_printf("%p	%s	%lu	%lx	%lx	%p\n",
965 		    ci,
966 		    device_xname(ci->ci_softc->sc_dev),
967 		    ci->ci_cpuid,
968 		    ci->ci_flags,
969 		    ci->ci_ipis,
970 		    ci->ci_curlwp);
971 	}
972 }
973 
974 #endif /* DDB */
975 
976 #endif /* MULTIPROCESSOR */
977