1 /* $NetBSD: powerpc_machdep.c,v 1.86 2022/05/30 14:48:08 rin Exp $ */
2
3 /*
4 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5 * Copyright (C) 1995, 1996 TooLs GmbH.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by TooLs GmbH.
19 * 4. The name of TooLs GmbH may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: powerpc_machdep.c,v 1.86 2022/05/30 14:48:08 rin Exp $");
36
37 #ifdef _KERNEL_OPT
38 #include "opt_altivec.h"
39 #include "opt_ddb.h"
40 #include "opt_modular.h"
41 #include "opt_multiprocessor.h"
42 #include "opt_ppcarch.h"
43 #include "opt_ppcopts.h"
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/conf.h>
48 #include <sys/disklabel.h>
49 #include <sys/exec.h>
50 #include <sys/kauth.h>
51 #include <sys/pool.h>
52 #include <sys/proc.h>
53 #include <sys/signal.h>
54 #include <sys/sysctl.h>
55 #include <sys/ucontext.h>
56 #include <sys/cpu.h>
57 #include <sys/module.h>
58 #include <sys/device.h>
59 #include <sys/pcu.h>
60 #include <sys/atomic.h>
61 #include <sys/kmem.h>
62 #include <sys/xcall.h>
63 #include <sys/ipi.h>
64
65 #include <dev/mm.h>
66
67 #include <powerpc/fpu.h>
68 #include <powerpc/pcb.h>
69 #include <powerpc/psl.h>
70 #include <powerpc/userret.h>
71 #if defined(ALTIVEC) || defined(PPC_HAVE_SPE)
72 #include <powerpc/altivec.h>
73 #endif
74
75 #ifdef MULTIPROCESSOR
76 #include <powerpc/pic/ipivar.h>
77 #include <machine/cpu_counter.h>
78 #endif
79
80 #ifdef DDB
81 #include <machine/db_machdep.h>
82 #include <ddb/db_output.h>
83 #endif
84
85 int cpu_timebase;
86 int cpu_printfataltraps = 1;
87 #if !defined(PPC_IBM4XX)
88 extern int powersave;
89 #endif
90
91 /* exported variable to be filled in by the bootloaders */
92 char *booted_kernel;
93
94 const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = {
95 [PCU_FPU] = &fpu_ops,
96 #if defined(ALTIVEC) || defined(PPC_HAVE_SPE)
97 [PCU_VEC] = &vec_ops,
98 #endif
99 };
100
101 #ifdef MULTIPROCESSOR
102 struct cpuset_info cpuset_info;
103 #endif
104
105 /*
106 * Set set up registers on exec.
107 */
108 void
setregs(struct lwp * l,struct exec_package * epp,vaddr_t stack)109 setregs(struct lwp *l, struct exec_package *epp, vaddr_t stack)
110 {
111 struct proc * const p = l->l_proc;
112 struct trapframe * const tf = l->l_md.md_utf;
113 struct pcb * const pcb = lwp_getpcb(l);
114 struct ps_strings arginfo;
115 vaddr_t func = epp->ep_entry;
116
117 memset(tf, 0, sizeof *tf);
118 tf->tf_fixreg[1] = -roundup(-stack + 8, 16);
119
120 /*
121 * XXX Machine-independent code has already copied arguments and
122 * XXX environment to userland. Get them back here.
123 */
124 (void)copyin_psstrings(p, &arginfo);
125
126 /*
127 * Set up arguments for _start():
128 * _start(argc, argv, envp, obj, cleanup, ps_strings);
129 *
130 * Notes:
131 * - obj and cleanup are the auxiliary and termination
132 * vectors. They are fixed up by ld.elf_so.
133 * - ps_strings is a NetBSD extension, and will be
134 * ignored by executables which are strictly
135 * compliant with the SVR4 ABI.
136 *
137 * XXX We have to set both regs and retval here due to different
138 * XXX calling convention in trap.c and init_main.c.
139 */
140 tf->tf_fixreg[3] = arginfo.ps_nargvstr;
141 tf->tf_fixreg[4] = (register_t)arginfo.ps_argvstr;
142 tf->tf_fixreg[5] = (register_t)arginfo.ps_envstr;
143 tf->tf_fixreg[6] = 0; /* auxiliary vector */
144 tf->tf_fixreg[7] = 0; /* termination vector */
145 tf->tf_fixreg[8] = p->p_psstrp; /* NetBSD extension */
146
147 #ifdef _LP64
148 /*
149 * For native ELF64, entry point to the function
150 * descriptor which contains the real function address
151 * and its TOC base address.
152 */
153 uintptr_t fdesc[3] = { [0] = func, [1] = 0, [2] = 0 };
154 copyin((void *)func, fdesc, sizeof(fdesc));
155 tf->tf_fixreg[2] = fdesc[1] + epp->ep_entryoffset;
156 func = fdesc[0] + epp->ep_entryoffset;
157 #endif
158 tf->tf_srr0 = func;
159 tf->tf_srr1 = PSL_MBO | PSL_USERSET;
160 #ifdef ALTIVEC
161 tf->tf_vrsave = 0;
162 #endif
163 pcb->pcb_flags = PSL_FE_DFLT;
164
165 #if defined(PPC_BOOKE) || defined(PPC_IBM4XX)
166 p->p_md.md_ss_addr[0] = p->p_md.md_ss_addr[1] = 0;
167 p->p_md.md_ss_insn[0] = p->p_md.md_ss_insn[1] = 0;
168 #endif
169 }
170
171 /*
172 * Machine dependent system variables.
173 */
174 static int
sysctl_machdep_cacheinfo(SYSCTLFN_ARGS)175 sysctl_machdep_cacheinfo(SYSCTLFN_ARGS)
176 {
177 struct sysctlnode node = *rnode;
178
179 node.sysctl_data = &curcpu()->ci_ci;
180 node.sysctl_size = sizeof(curcpu()->ci_ci);
181 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
182 }
183
184 #if !defined (PPC_IBM4XX)
185 static int
sysctl_machdep_powersave(SYSCTLFN_ARGS)186 sysctl_machdep_powersave(SYSCTLFN_ARGS)
187 {
188 struct sysctlnode node = *rnode;
189
190 if (powersave < 0)
191 node.sysctl_flags &= ~CTLFLAG_READWRITE;
192 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
193 }
194 #endif
195
196 static int
sysctl_machdep_booted_device(SYSCTLFN_ARGS)197 sysctl_machdep_booted_device(SYSCTLFN_ARGS)
198 {
199 struct sysctlnode node;
200
201 if (booted_device == NULL)
202 return (EOPNOTSUPP);
203
204 const char * const xname = device_xname(booted_device);
205
206 node = *rnode;
207 node.sysctl_data = __UNCONST(xname);
208 node.sysctl_size = strlen(xname) + 1;
209 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
210 }
211
212 static int
sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)213 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)
214 {
215 struct sysctlnode node;
216
217 if (booted_kernel == NULL || booted_kernel[0] == '\0')
218 return (EOPNOTSUPP);
219
220 node = *rnode;
221 node.sysctl_data = booted_kernel;
222 node.sysctl_size = strlen(booted_kernel) + 1;
223 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
224 }
225
226 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
227 {
228
229 sysctl_createv(clog, 0, NULL, NULL,
230 CTLFLAG_PERMANENT,
231 CTLTYPE_NODE, "machdep", NULL,
232 NULL, 0, NULL, 0,
233 CTL_MACHDEP, CTL_EOL);
234
235 /* Deprecated */
236 sysctl_createv(clog, 0, NULL, NULL,
237 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
238 CTLTYPE_INT, "cachelinesize", NULL,
239 NULL, curcpu()->ci_ci.dcache_line_size, NULL, 0,
240 CTL_MACHDEP, CPU_CACHELINE, CTL_EOL);
241 sysctl_createv(clog, 0, NULL, NULL,
242 CTLFLAG_PERMANENT,
243 CTLTYPE_INT, "timebase", NULL,
244 NULL, 0, &cpu_timebase, 0,
245 CTL_MACHDEP, CPU_TIMEBASE, CTL_EOL);
246 sysctl_createv(clog, 0, NULL, NULL,
247 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
248 CTLTYPE_INT, "printfataltraps", NULL,
249 NULL, 0, &cpu_printfataltraps, 0,
250 CTL_MACHDEP, CPU_PRINTFATALTRAPS, CTL_EOL);
251 /* Use this instead of CPU_CACHELINE */
252 sysctl_createv(clog, 0, NULL, NULL,
253 CTLFLAG_PERMANENT,
254 CTLTYPE_STRUCT, "cacheinfo", NULL,
255 sysctl_machdep_cacheinfo, 0, NULL, 0,
256 CTL_MACHDEP, CPU_CACHEINFO, CTL_EOL);
257 #if !defined (PPC_IBM4XX)
258 sysctl_createv(clog, 0, NULL, NULL,
259 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
260 CTLTYPE_INT, "powersave", NULL,
261 sysctl_machdep_powersave, 0, &powersave, 0,
262 CTL_MACHDEP, CPU_POWERSAVE, CTL_EOL);
263 #endif
264 #if defined(PPC_IBM4XX) || defined(PPC_BOOKE)
265 sysctl_createv(clog, 0, NULL, NULL,
266 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
267 CTLTYPE_INT, "altivec", NULL,
268 NULL, 0, NULL, 0,
269 CTL_MACHDEP, CPU_ALTIVEC, CTL_EOL);
270 #else
271 sysctl_createv(clog, 0, NULL, NULL,
272 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
273 CTLTYPE_INT, "altivec", NULL,
274 NULL, cpu_altivec, NULL, 0,
275 CTL_MACHDEP, CPU_ALTIVEC, CTL_EOL);
276 #endif
277 #ifdef PPC_BOOKE
278 sysctl_createv(clog, 0, NULL, NULL,
279 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
280 CTLTYPE_INT, "execprot", NULL,
281 NULL, 1, NULL, 0,
282 CTL_MACHDEP, CPU_EXECPROT, CTL_EOL);
283 #endif
284 sysctl_createv(clog, 0, NULL, NULL,
285 CTLFLAG_PERMANENT,
286 CTLTYPE_STRING, "booted_device", NULL,
287 sysctl_machdep_booted_device, 0, NULL, 0,
288 CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
289 sysctl_createv(clog, 0, NULL, NULL,
290 CTLFLAG_PERMANENT,
291 CTLTYPE_STRING, "booted_kernel", NULL,
292 sysctl_machdep_booted_kernel, 0, NULL, 0,
293 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
294 sysctl_createv(clog, 0, NULL, NULL,
295 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
296 CTLTYPE_INT, "fpu_present", NULL,
297 NULL,
298 #if defined(PPC_HAVE_FPU)
299 1,
300 #else
301 0,
302 #endif
303 NULL, 0,
304 CTL_MACHDEP, CPU_FPU, CTL_EOL);
305 sysctl_createv(clog, 0, NULL, NULL,
306 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
307 CTLTYPE_INT, "no_unaligned", NULL,
308 NULL,
309 #if defined(PPC_NO_UNALIGNED)
310 1,
311 #else
312 0,
313 #endif
314 NULL, 0,
315 CTL_MACHDEP, CPU_NO_UNALIGNED, CTL_EOL);
316 }
317
318 /*
319 * Crash dump handling.
320 */
321 u_int32_t dumpmag = 0x8fca0101; /* magic number */
322 int dumpsize = 0; /* size of dump in pages */
323 long dumplo = -1; /* blocks */
324
325 /*
326 * This is called by main to set dumplo and dumpsize.
327 */
328 void
cpu_dumpconf(void)329 cpu_dumpconf(void)
330 {
331 int nblks; /* size of dump device */
332 int skip;
333
334 if (dumpdev == NODEV)
335 return;
336 nblks = bdev_size(dumpdev);
337 if (nblks <= ctod(1))
338 return;
339
340 dumpsize = physmem;
341
342 /* Skip enough blocks at start of disk to preserve an eventual disklabel. */
343 skip = LABELSECTOR + 1;
344 skip += ctod(1) - 1;
345 skip = ctod(dtoc(skip));
346 if (dumplo < skip)
347 dumplo = skip;
348
349 /* Put dump at end of partition */
350 if (dumpsize > dtoc(nblks - dumplo))
351 dumpsize = dtoc(nblks - dumplo);
352 if (dumplo < nblks - ctod(dumpsize))
353 dumplo = nblks - ctod(dumpsize);
354 }
355
356 /*
357 * Start a new LWP
358 */
359 void
startlwp(void * arg)360 startlwp(void *arg)
361 {
362 ucontext_t * const uc = arg;
363 lwp_t * const l = curlwp;
364 struct trapframe * const tf = l->l_md.md_utf;
365 int error __diagused;
366
367 error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
368 KASSERT(error == 0);
369
370 kmem_free(uc, sizeof(ucontext_t));
371 userret(l, tf);
372 }
373
374 /*
375 * Process the tail end of a posix_spawn() for the child.
376 */
377 void
cpu_spawn_return(struct lwp * l)378 cpu_spawn_return(struct lwp *l)
379 {
380 struct trapframe * const tf = l->l_md.md_utf;
381
382 userret(l, tf);
383 }
384
385 bool
cpu_intr_p(void)386 cpu_intr_p(void)
387 {
388
389 return curcpu()->ci_idepth >= 0;
390 }
391
392 void
cpu_idle(void)393 cpu_idle(void)
394 {
395 KASSERT(mfmsr() & PSL_EE);
396 KASSERTMSG(curcpu()->ci_cpl == IPL_NONE,
397 "ci_cpl = %d", curcpu()->ci_cpl);
398 (*curcpu()->ci_idlespin)();
399 }
400
401 void
cpu_ast(struct lwp * l,struct cpu_info * ci)402 cpu_ast(struct lwp *l, struct cpu_info *ci)
403 {
404 l->l_md.md_astpending = 0; /* we are about to do it */
405 if (l->l_pflag & LP_OWEUPC) {
406 l->l_pflag &= ~LP_OWEUPC;
407 ADDUPROF(l);
408 }
409 }
410
411 void
cpu_need_resched(struct cpu_info * ci,struct lwp * l,int flags)412 cpu_need_resched(struct cpu_info *ci, struct lwp *l, int flags)
413 {
414 KASSERT(kpreempt_disabled());
415
416 #ifdef __HAVE_PREEMPTION
417 if ((flags & RESCHED_KPREEMPT) != 0) {
418 if ((flags & RESCHED_REMOTE) != 0) {
419 cpu_send_ipi(cpu_index(ci), IPI_KPREEMPT);
420 } else {
421 softint_trigger(SOFTINT_KPREEMPT);
422 }
423 return;
424 }
425 #endif
426 if ((flags & RESCHED_REMOTE) != 0) {
427 #if defined(MULTIPROCESSOR)
428 cpu_send_ipi(cpu_index(ci), IPI_AST);
429 #endif
430 } else {
431 l->l_md.md_astpending = 1; /* force call to cpu_ast() */
432 }
433 }
434
435 void
cpu_need_proftick(lwp_t * l)436 cpu_need_proftick(lwp_t *l)
437 {
438 l->l_pflag |= LP_OWEUPC;
439 l->l_md.md_astpending = 1;
440 }
441
442 void
cpu_signotify(lwp_t * l)443 cpu_signotify(lwp_t *l)
444 {
445 if (l->l_cpu != curcpu()) {
446 #if defined(MULTIPROCESSOR)
447 cpu_send_ipi(cpu_index(l->l_cpu), IPI_AST);
448 #endif
449 } else {
450 l->l_md.md_astpending = 1;
451 }
452 }
453
454 vaddr_t
cpu_lwp_pc(lwp_t * l)455 cpu_lwp_pc(lwp_t *l)
456 {
457 return l->l_md.md_utf->tf_srr0;
458 }
459
460 bool
cpu_clkf_usermode(const struct clockframe * cf)461 cpu_clkf_usermode(const struct clockframe *cf)
462 {
463 return (cf->cf_srr1 & PSL_PR) != 0;
464 }
465
466 vaddr_t
cpu_clkf_pc(const struct clockframe * cf)467 cpu_clkf_pc(const struct clockframe *cf)
468 {
469 return cf->cf_srr0;
470 }
471
472 bool
cpu_clkf_intr(const struct clockframe * cf)473 cpu_clkf_intr(const struct clockframe *cf)
474 {
475 return cf->cf_idepth > 0;
476 }
477
478 #ifdef MULTIPROCESSOR
479 /*
480 * MD support for xcall(9) interface.
481 */
482
483 void
xc_send_ipi(struct cpu_info * ci)484 xc_send_ipi(struct cpu_info *ci)
485 {
486 KASSERT(kpreempt_disabled());
487 KASSERT(curcpu() != ci);
488
489 cpuid_t target = (ci != NULL ? cpu_index(ci) : IPI_DST_NOTME);
490
491 /* Unicast: remote CPU. */
492 /* Broadcast: all, but local CPU (caller will handle it). */
493 cpu_send_ipi(target, IPI_XCALL);
494 }
495
496 void
cpu_ipi(struct cpu_info * ci)497 cpu_ipi(struct cpu_info *ci)
498 {
499 KASSERT(kpreempt_disabled());
500 KASSERT(curcpu() != ci);
501
502 cpuid_t target = (ci != NULL ? cpu_index(ci) : IPI_DST_NOTME);
503
504 /* Unicast: remote CPU. */
505 /* Broadcast: all, but local CPU (caller will handle it). */
506 cpu_send_ipi(target, IPI_GENERIC);
507 }
508
509 /* XXX kcpuset_create(9), kcpuset_clone(9) couldn't use interrupt context */
510 typedef uint32_t __cpuset_t;
511 CTASSERT(MAXCPUS <= 32);
512
513 #define CPUSET_SINGLE(cpu) ((__cpuset_t)1 << (cpu))
514
515 #define CPUSET_ADD(set, cpu) atomic_or_32(&(set), CPUSET_SINGLE(cpu))
516 #define CPUSET_DEL(set, cpu) atomic_and_32(&(set), ~CPUSET_SINGLE(cpu))
517 #define CPUSET_SUB(set1, set2) atomic_and_32(&(set1), ~(set2))
518
519 #define CPUSET_EXCEPT(set, cpu) ((set) & ~CPUSET_SINGLE(cpu))
520
521 #define CPUSET_HAS_P(set, cpu) ((set) & CPUSET_SINGLE(cpu))
522 #define CPUSET_NEXT(set) (ffs(set) - 1)
523
524 #define CPUSET_EMPTY_P(set) ((set) == (__cpuset_t)0)
525 #define CPUSET_EQUAL_P(set1, set2) ((set1) == (set2))
526 #define CPUSET_CLEAR(set) ((set) = (__cpuset_t)0)
527 #define CPUSET_ASSIGN(set1, set2) ((set1) = (set2))
528
529 #define CPUSET_EXPORT(kset, set) kcpuset_export_u32((kset), &(set), sizeof(set))
530
531 /*
532 * Send an inter-processor interrupt to CPUs in cpuset (excludes curcpu())
533 */
534 static void
cpu_multicast_ipi(__cpuset_t cpuset,uint32_t msg)535 cpu_multicast_ipi(__cpuset_t cpuset, uint32_t msg)
536 {
537 CPU_INFO_ITERATOR cii;
538 struct cpu_info *ci;
539
540 CPUSET_DEL(cpuset, cpu_index(curcpu()));
541 if (CPUSET_EMPTY_P(cpuset))
542 return;
543
544 for (CPU_INFO_FOREACH(cii, ci)) {
545 const int index = cpu_index(ci);
546 if (CPUSET_HAS_P(cpuset, index)) {
547 CPUSET_DEL(cpuset, index);
548 cpu_send_ipi(index, msg);
549 }
550 }
551 }
552
553 static void
cpu_ipi_error(const char * s,kcpuset_t * succeeded,__cpuset_t expected)554 cpu_ipi_error(const char *s, kcpuset_t *succeeded, __cpuset_t expected)
555 {
556 __cpuset_t cpuset;
557
558 CPUSET_EXPORT(succeeded, cpuset);
559 CPUSET_SUB(expected, cpuset);
560 if (!CPUSET_EMPTY_P(expected)) {
561 printf("Failed to %s:", s);
562 do {
563 const int index = CPUSET_NEXT(expected);
564 CPUSET_DEL(expected, index);
565 printf(" cpu%d", index);
566 } while (!CPUSET_EMPTY_P(expected));
567 printf("\n");
568 }
569 }
570
571 static int
cpu_ipi_wait(kcpuset_t * watchset,__cpuset_t mask)572 cpu_ipi_wait(kcpuset_t *watchset, __cpuset_t mask)
573 {
574 uint64_t tmout = curcpu()->ci_data.cpu_cc_freq; /* some finite amount of time */
575 __cpuset_t cpuset;
576
577 while (tmout--) {
578 CPUSET_EXPORT(watchset, cpuset);
579 if (cpuset == mask)
580 return 0; /* success */
581 }
582 return 1; /* timed out */
583 }
584
585 /*
586 * Halt this cpu.
587 */
588 void
cpu_halt(void)589 cpu_halt(void)
590 {
591 struct cpuset_info * const csi = &cpuset_info;
592 const cpuid_t index = cpu_index(curcpu());
593
594 printf("cpu%ld: shutting down\n", index);
595 kcpuset_set(csi->cpus_halted, index);
596 spl0(); /* allow interrupts e.g. further ipi ? */
597
598 /* spin */
599 for (;;)
600 continue;
601 /*NOTREACHED*/
602 }
603
604 /*
605 * Halt all running cpus, excluding current cpu.
606 */
607 void
cpu_halt_others(void)608 cpu_halt_others(void)
609 {
610 struct cpuset_info * const csi = &cpuset_info;
611 const cpuid_t index = cpu_index(curcpu());
612 __cpuset_t cpumask, cpuset, halted;
613
614 KASSERT(kpreempt_disabled());
615
616 CPUSET_EXPORT(csi->cpus_running, cpuset);
617 CPUSET_DEL(cpuset, index);
618 CPUSET_ASSIGN(cpumask, cpuset);
619 CPUSET_EXPORT(csi->cpus_halted, halted);
620 CPUSET_SUB(cpuset, halted);
621
622 if (CPUSET_EMPTY_P(cpuset))
623 return;
624
625 cpu_multicast_ipi(cpuset, IPI_HALT);
626 if (cpu_ipi_wait(csi->cpus_halted, cpumask))
627 cpu_ipi_error("halt", csi->cpus_halted, cpumask);
628
629 /*
630 * TBD
631 * Depending on available firmware methods, other cpus will
632 * either shut down themselves, or spin and wait for us to
633 * stop them.
634 */
635 }
636
637 /*
638 * Pause this cpu.
639 */
640 void
cpu_pause(struct trapframe * tf)641 cpu_pause(struct trapframe *tf)
642 {
643 volatile struct cpuset_info * const csi = &cpuset_info;
644 int s = splhigh();
645 const cpuid_t index = cpu_index(curcpu());
646
647 for (;;) {
648 kcpuset_set(csi->cpus_paused, index);
649 while (kcpuset_isset(csi->cpus_paused, index))
650 docritpollhooks();
651 kcpuset_set(csi->cpus_resumed, index);
652 #ifdef DDB
653 if (ddb_running_on_this_cpu_p())
654 cpu_Debugger();
655 if (ddb_running_on_any_cpu_p())
656 continue;
657 #endif /* DDB */
658 break;
659 }
660
661 splx(s);
662 }
663
664 /*
665 * Pause all running cpus, excluding current cpu.
666 */
667 void
cpu_pause_others(void)668 cpu_pause_others(void)
669 {
670 struct cpuset_info * const csi = &cpuset_info;
671 const cpuid_t index = cpu_index(curcpu());
672 __cpuset_t cpuset;
673
674 KASSERT(kpreempt_disabled());
675
676 CPUSET_EXPORT(csi->cpus_running, cpuset);
677 CPUSET_DEL(cpuset, index);
678
679 if (CPUSET_EMPTY_P(cpuset))
680 return;
681
682 cpu_multicast_ipi(cpuset, IPI_SUSPEND);
683 if (cpu_ipi_wait(csi->cpus_paused, cpuset))
684 cpu_ipi_error("pause", csi->cpus_paused, cpuset);
685 }
686
687 /*
688 * Resume a single cpu.
689 */
690 void
cpu_resume(cpuid_t index)691 cpu_resume(cpuid_t index)
692 {
693 struct cpuset_info * const csi = &cpuset_info;
694 __cpuset_t cpuset = CPUSET_SINGLE(index);
695
696 kcpuset_zero(csi->cpus_resumed);
697 kcpuset_clear(csi->cpus_paused, index);
698
699 if (cpu_ipi_wait(csi->cpus_paused, cpuset))
700 cpu_ipi_error("resume", csi->cpus_resumed, cpuset);
701 }
702
703 /*
704 * Resume all paused cpus.
705 */
706 void
cpu_resume_others(void)707 cpu_resume_others(void)
708 {
709 struct cpuset_info * const csi = &cpuset_info;
710 __cpuset_t cpuset;
711
712 kcpuset_zero(csi->cpus_resumed);
713 CPUSET_EXPORT(csi->cpus_paused, cpuset);
714 kcpuset_zero(csi->cpus_paused);
715
716 if (cpu_ipi_wait(csi->cpus_resumed, cpuset))
717 cpu_ipi_error("resume", csi->cpus_resumed, cpuset);
718 }
719
720 int
cpu_is_paused(int index)721 cpu_is_paused(int index)
722 {
723 struct cpuset_info * const csi = &cpuset_info;
724
725 return kcpuset_isset(csi->cpus_paused, index);
726 }
727
728 #ifdef DDB
729 void
cpu_debug_dump(void)730 cpu_debug_dump(void)
731 {
732 struct cpuset_info * const csi = &cpuset_info;
733 CPU_INFO_ITERATOR cii;
734 struct cpu_info *ci;
735 char running, hatched, paused, resumed, halted;
736
737 #ifdef _LP64
738 db_printf("CPU CPUID STATE CPUINFO CPL INT MTX IPIS\n");
739 #else
740 db_printf("CPU CPUID STATE CPUINFO CPL INT MTX IPIS\n");
741 #endif
742 for (CPU_INFO_FOREACH(cii, ci)) {
743 const cpuid_t index = cpu_index(ci);
744 hatched = (kcpuset_isset(csi->cpus_hatched, index) ? 'H' : '-');
745 running = (kcpuset_isset(csi->cpus_running, index) ? 'R' : '-');
746 paused = (kcpuset_isset(csi->cpus_paused, index) ? 'P' : '-');
747 resumed = (kcpuset_isset(csi->cpus_resumed, index) ? 'r' : '-');
748 halted = (kcpuset_isset(csi->cpus_halted, index) ? 'h' : '-');
749 db_printf("%3ld 0x%03x %c%c%c%c%c %p %3d %3d %3d 0x%08x\n",
750 index, ci->ci_cpuid,
751 running, hatched, paused, resumed, halted,
752 ci, ci->ci_cpl, ci->ci_idepth, ci->ci_mtx_count,
753 ci->ci_pending_ipis);
754 }
755 }
756 #endif /* DDB */
757 #endif /* MULTIPROCESSOR */
758
759 int
emulate_mxmsr(struct lwp * l,struct trapframe * tf,uint32_t opcode)760 emulate_mxmsr(struct lwp *l, struct trapframe *tf, uint32_t opcode)
761 {
762
763 #define OPC_MFMSR_CODE 0x7c0000a6
764 #define OPC_MFMSR_MASK 0xfc1fffff
765 #define OPC_MFMSR_P(o) (((o) & OPC_MFMSR_MASK) == OPC_MFMSR_CODE)
766
767 #define OPC_MTMSR_CODE 0x7c000124
768 #define OPC_MTMSR_MASK 0xfc1fffff
769 #define OPC_MTMSR_P(o) (((o) & OPC_MTMSR_MASK) == OPC_MTMSR_CODE)
770
771 #define OPC_MXMSR_REG(o) (((o) >> 21) & 0x1f)
772
773 if (OPC_MFMSR_P(opcode)) {
774 struct pcb * const pcb = lwp_getpcb(l);
775 register_t msr = tf->tf_srr1 & PSL_USERSRR1;
776
777 if (fpu_used_p(l))
778 msr |= PSL_FP;
779 #ifdef ALTIVEC
780 if (vec_used_p(l))
781 msr |= PSL_VEC;
782 #endif
783
784 msr |= (pcb->pcb_flags & PSL_FE_PREC);
785 tf->tf_fixreg[OPC_MXMSR_REG(opcode)] = msr;
786 return 1;
787 }
788
789 if (OPC_MTMSR_P(opcode)) {
790 struct pcb * const pcb = lwp_getpcb(l);
791 register_t msr = tf->tf_fixreg[OPC_MXMSR_REG(opcode)];
792
793 /*
794 * Ignore the FP enable bit in the requested MSR.
795 * It might be set in the thread's actual MSR but the
796 * user code isn't allowed to change it.
797 */
798 msr &= ~PSL_FP;
799 #ifdef ALTIVEC
800 msr &= ~PSL_VEC;
801 #endif
802
803 /*
804 * Don't let the user muck with bits he's not allowed to.
805 */
806 #ifdef PPC_HAVE_FPU
807 if (!PSL_USEROK_P(msr))
808 #else
809 if (!PSL_USEROK_P(msr & ~PSL_FE_PREC))
810 #endif
811 return 0;
812
813 /*
814 * For now, only update the FP exception mode.
815 */
816 pcb->pcb_flags &= ~PSL_FE_PREC;
817 pcb->pcb_flags |= msr & PSL_FE_PREC;
818
819 #ifdef PPC_HAVE_FPU
820 /*
821 * If we think we have the FPU, update SRR1 too. If we're
822 * wrong userret() will take care of it.
823 */
824 if (tf->tf_srr1 & PSL_FP) {
825 tf->tf_srr1 &= ~(PSL_FE0|PSL_FE1);
826 tf->tf_srr1 |= msr & (PSL_FE0|PSL_FE1);
827 }
828 #endif
829 return 1;
830 }
831
832 return 0;
833 }
834
835 #if defined(MODULAR) && !defined(__PPC_HAVE_MODULE_INIT_MD)
836 /*
837 * Push any modules loaded by the boot loader.
838 */
839 void
module_init_md(void)840 module_init_md(void)
841 {
842 }
843 #endif
844
845 bool
mm_md_direct_mapped_phys(paddr_t pa,vaddr_t * vap)846 mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap)
847 {
848 if (atop(pa) < physmem) {
849 *vap = pa;
850 return true;
851 }
852
853 return false;
854 }
855
856 int
mm_md_physacc(paddr_t pa,vm_prot_t prot)857 mm_md_physacc(paddr_t pa, vm_prot_t prot)
858 {
859
860 return (atop(pa) < physmem) ? 0 : EFAULT;
861 }
862
863 int
mm_md_kernacc(void * va,vm_prot_t prot,bool * handled)864 mm_md_kernacc(void *va, vm_prot_t prot, bool *handled)
865 {
866 if (atop((paddr_t)va) < physmem) {
867 *handled = true;
868 return 0;
869 }
870
871 if ((vaddr_t)va < VM_MIN_KERNEL_ADDRESS
872 || (vaddr_t)va >= VM_MAX_KERNEL_ADDRESS)
873 return EFAULT;
874
875 *handled = false;
876 return 0;
877 }
878