xref: /netbsd-src/sys/arch/mips/mips/cpu_subr.c (revision a355028fa4f1eaedfae402807330fbc7bf5e9102)
1 /*	$NetBSD: cpu_subr.c,v 1.64 2023/10/04 20:28:05 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010, 2019, 2023 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas of 3am Software Foundry.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.64 2023/10/04 20:28:05 ad Exp $");
34 
35 #include "opt_cputype.h"
36 #include "opt_ddb.h"
37 #include "opt_modular.h"
38 #include "opt_multiprocessor.h"
39 
40 #include <sys/param.h>
41 #include <sys/atomic.h>
42 #include <sys/bitops.h>
43 #include <sys/cpu.h>
44 #include <sys/device.h>
45 #include <sys/device_impl.h>	/* XXX autoconf abuse */
46 #include <sys/idle.h>
47 #include <sys/intr.h>
48 #include <sys/ipi.h>
49 #include <sys/kernel.h>
50 #include <sys/lwp.h>
51 #include <sys/module.h>
52 #include <sys/proc.h>
53 #include <sys/ras.h>
54 #include <sys/reboot.h>
55 #include <sys/xcall.h>
56 
57 #include <uvm/uvm.h>
58 
59 #include <mips/locore.h>
60 #include <mips/regnum.h>
61 #include <mips/pcb.h>
62 #include <mips/cache.h>
63 #include <mips/frame.h>
64 #include <mips/userret.h>
65 #include <mips/pte.h>
66 
67 #if defined(DDB) || defined(KGDB)
68 #ifdef DDB
69 #include <mips/db_machdep.h>
70 #include <ddb/db_command.h>
71 #include <ddb/db_output.h>
72 #endif
73 #endif
74 
75 #ifdef MIPS64_OCTEON
76 #include <mips/cavium/octeonvar.h>
77 extern struct cpu_softc octeon_cpu_softc[];
78 #endif
79 
80 struct cpu_info cpu_info_store
81 #if defined(MULTIPROCESSOR) && !defined(MIPS64_OCTEON)
82 	__section(".data1")
83 	__aligned(1LU << ilog2((2*sizeof(struct cpu_info)-1)))
84 #endif
85     = {
86 	.ci_curlwp = &lwp0,
87 	.ci_tlb_info = &pmap_tlb0_info,
88 	.ci_pmap_kern_segtab = &pmap_kern_segtab,
89 	.ci_pmap_user_segtab = NULL,
90 #ifdef _LP64
91 	.ci_pmap_user_seg0tab = NULL,
92 #endif
93 	.ci_cpl = IPL_HIGH,
94 	.ci_tlb_slot = -1,
95 #ifdef MULTIPROCESSOR
96 	.ci_flags = CPUF_PRIMARY|CPUF_PRESENT|CPUF_RUNNING,
97 #endif
98 #ifdef MIPS64_OCTEON
99 	.ci_softc = &octeon_cpu_softc[0],
100 #endif
101 };
102 
103 const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = {
104 	[PCU_FPU] = &mips_fpu_ops,
105 #if (MIPS32R2 + MIPS64R2) > 0
106 	[PCU_DSP] = &mips_dsp_ops,
107 #endif
108 };
109 
110 #ifdef MULTIPROCESSOR
111 struct cpu_info * cpuid_infos[MAXCPUS] = {
112 	[0] = &cpu_info_store,
113 };
114 
115 kcpuset_t *cpus_halted;
116 kcpuset_t *cpus_hatched;
117 kcpuset_t *cpus_paused;
118 kcpuset_t *cpus_resumed;
119 kcpuset_t *cpus_running;
120 
121 static void cpu_ipi_wait(const char *, const kcpuset_t *, const kcpuset_t *);
122 
123 struct cpu_info *
cpu_info_alloc(struct pmap_tlb_info * ti,cpuid_t cpu_id,cpuid_t cpu_package_id,cpuid_t cpu_core_id,cpuid_t cpu_smt_id)124 cpu_info_alloc(struct pmap_tlb_info *ti, cpuid_t cpu_id, cpuid_t cpu_package_id,
125 	cpuid_t cpu_core_id, cpuid_t cpu_smt_id)
126 {
127 
128 	KASSERT(cpu_id < MAXCPUS);
129 
130 #ifdef MIPS64_OCTEON
131 	const int exc_step = 1 << MIPS_EBASE_EXC_BASE_SHIFT;
132 	vaddr_t exc_page = MIPS_UTLB_MISS_EXC_VEC + exc_step * cpu_id;
133 	__CTASSERT(sizeof(struct cpu_info) + sizeof(struct pmap_tlb_info)
134 	    <= exc_step - 0x280);
135 
136 	struct cpu_info * const ci = ((struct cpu_info *)(exc_page + exc_step)) - 1;
137 	memset((void *)exc_page, 0, exc_step);
138 
139 	if (ti == NULL) {
140 		ti = ((struct pmap_tlb_info *)ci) - 1;
141 		pmap_tlb_info_init(ti);
142 	}
143 #else
144 	const vaddr_t cpu_info_offset = (vaddr_t)&cpu_info_store & PAGE_MASK;
145 	struct pglist pglist;
146 	int error;
147 
148 	/*
149 	* Grab a page from the first 512MB (mappable by KSEG0) to use to store
150 	* exception vectors and cpu_info for this cpu.
151 	*/
152 	error = uvm_pglistalloc(PAGE_SIZE,
153 	    0, MIPS_KSEG1_START - MIPS_KSEG0_START,
154 	    PAGE_SIZE, PAGE_SIZE, &pglist, 1, false);
155 	if (error)
156 		return NULL;
157 
158 	const paddr_t pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
159 	const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa);
160 	struct cpu_info * const ci = (void *) (va + cpu_info_offset);
161 	memset((void *)va, 0, PAGE_SIZE);
162 
163 	/*
164 	 * If we weren't passed a pmap_tlb_info to use, the caller wants us
165 	 * to take care of that for him.  Since we have room left over in the
166 	 * page we just allocated, just use a piece of that for it.
167 	 */
168 	if (ti == NULL) {
169 		if (cpu_info_offset >= sizeof(*ti)) {
170 			ti = (void *) va;
171 		} else {
172 			KASSERT(PAGE_SIZE - cpu_info_offset + sizeof(*ci) >= sizeof(*ti));
173 			ti = (struct pmap_tlb_info *)(va + PAGE_SIZE) - 1;
174 		}
175 		pmap_tlb_info_init(ti);
176 	}
177 
178 	/*
179 	 * Attach its TLB info (which must be direct-mapped)
180 	 */
181 #ifdef _LP64
182 	KASSERT(MIPS_KSEG0_P(ti) || MIPS_XKPHYS_P(ti));
183 #else
184 	KASSERT(MIPS_KSEG0_P(ti));
185 #endif
186 #endif /* MIPS64_OCTEON */
187 
188 	KASSERT(cpu_id != 0);
189 	ci->ci_cpuid = cpu_id;
190 	ci->ci_pmap_kern_segtab = &pmap_kern_segtab,
191 	ci->ci_cpu_freq = cpu_info_store.ci_cpu_freq;
192 	ci->ci_cctr_freq = cpu_info_store.ci_cctr_freq;
193 	ci->ci_cycles_per_hz = cpu_info_store.ci_cycles_per_hz;
194 	ci->ci_divisor_delay = cpu_info_store.ci_divisor_delay;
195 	ci->ci_divisor_recip = cpu_info_store.ci_divisor_recip;
196 	ci->ci_cpuwatch_count = cpu_info_store.ci_cpuwatch_count;
197 
198 	cpu_topology_set(ci, cpu_package_id, cpu_core_id, cpu_smt_id, 0);
199 
200 	pmap_md_alloc_ephemeral_address_space(ci);
201 
202 	mi_cpu_attach(ci);
203 
204 	pmap_tlb_info_attach(ti, ci);
205 
206 	return ci;
207 }
208 #endif /* MULTIPROCESSOR */
209 
210 static void
cpu_hwrena_setup(void)211 cpu_hwrena_setup(void)
212 {
213 #if (MIPS32R2 + MIPS64R2) > 0
214 	const int cp0flags = mips_options.mips_cpu->cpu_cp0flags;
215 
216 	if ((cp0flags & MIPS_CP0FL_USE) == 0)
217 		return;
218 
219 	if (CPUISMIPSNNR2) {
220 		mipsNN_cp0_hwrena_write(
221 		    (MIPS_HAS_USERLOCAL ? MIPS_HWRENA_ULR : 0)
222 		    | MIPS_HWRENA_CCRES
223 		    | MIPS_HWRENA_CC
224 		    | MIPS_HWRENA_SYNCI_STEP
225 		    | MIPS_HWRENA_CPUNUM);
226 		if (MIPS_HAS_USERLOCAL) {
227 			mipsNN_cp0_userlocal_write(curlwp->l_private);
228 		}
229 	}
230 #endif
231 }
232 
233 void
cpu_attach_common(device_t self,struct cpu_info * ci)234 cpu_attach_common(device_t self, struct cpu_info *ci)
235 {
236 	const char * const xname = device_xname(self);
237 
238 	/*
239 	 * Cross link cpu_info and its device together
240 	 *
241 	 * XXX autoconf abuse: Can't use device_set_private here
242 	 * because some callers already do so -- and some callers
243 	 * (sbmips cpu_attach) already have a softc allocated by
244 	 * autoconf.
245 	 */
246 	ci->ci_dev = self;
247 	self->dv_private = ci;
248 	KASSERT(ci->ci_idepth == 0);
249 
250 	evcnt_attach_dynamic(&ci->ci_ev_count_compare,
251 		EVCNT_TYPE_INTR, NULL, xname,
252 		"int 5 (clock)");
253 	evcnt_attach_dynamic(&ci->ci_ev_count_compare_missed,
254 		EVCNT_TYPE_INTR, NULL, xname,
255 		"int 5 (clock) missed");
256 	evcnt_attach_dynamic(&ci->ci_ev_fpu_loads,
257 		EVCNT_TYPE_MISC, NULL, xname,
258 		"fpu loads");
259 	evcnt_attach_dynamic(&ci->ci_ev_fpu_saves,
260 		EVCNT_TYPE_MISC, NULL, xname,
261 		"fpu saves");
262 	evcnt_attach_dynamic(&ci->ci_ev_dsp_loads,
263 		EVCNT_TYPE_MISC, NULL, xname,
264 		"dsp loads");
265 	evcnt_attach_dynamic(&ci->ci_ev_dsp_saves,
266 		EVCNT_TYPE_MISC, NULL, xname,
267 		"dsp saves");
268 	evcnt_attach_dynamic(&ci->ci_ev_tlbmisses,
269 		EVCNT_TYPE_TRAP, NULL, xname,
270 		"tlb misses");
271 
272 #ifdef MULTIPROCESSOR
273 	if (ci != &cpu_info_store) {
274 		/*
275 		 * Tail insert this onto the list of cpu_info's.
276 		 * atomic_store_release matches PTR_L/SYNC_ACQ in
277 		 * locore_octeon.S (XXX what about non-Octeon?).
278 		 */
279 		KASSERT(cpuid_infos[ci->ci_cpuid] == NULL);
280 		atomic_store_release(&cpuid_infos[ci->ci_cpuid], ci);
281 		membar_producer(); /* Cavium sync plunger */
282 	}
283 	KASSERT(cpuid_infos[ci->ci_cpuid] != NULL);
284 	evcnt_attach_dynamic(&ci->ci_evcnt_synci_activate_rqst,
285 	    EVCNT_TYPE_MISC, NULL, xname,
286 	    "syncicache activate request");
287 	evcnt_attach_dynamic(&ci->ci_evcnt_synci_deferred_rqst,
288 	    EVCNT_TYPE_MISC, NULL, xname,
289 	    "syncicache deferred request");
290 	evcnt_attach_dynamic(&ci->ci_evcnt_synci_ipi_rqst,
291 	    EVCNT_TYPE_MISC, NULL, xname,
292 	    "syncicache ipi request");
293 	evcnt_attach_dynamic(&ci->ci_evcnt_synci_onproc_rqst,
294 	    EVCNT_TYPE_MISC, NULL, xname,
295 	    "syncicache onproc request");
296 
297 	/*
298 	 * Initialize IPI framework for this cpu instance
299 	 */
300 	ipi_init(ci);
301 
302 	kcpuset_create(&ci->ci_shootdowncpus, true);
303 	kcpuset_create(&ci->ci_multicastcpus, true);
304 	kcpuset_create(&ci->ci_watchcpus, true);
305 	kcpuset_create(&ci->ci_ddbcpus, true);
306 #endif
307 }
308 
309 void
cpu_startup_common(void)310 cpu_startup_common(void)
311 {
312 	vaddr_t minaddr, maxaddr;
313 	char pbuf[9];	/* "99999 MB" */
314 
315 	pmap_tlb_info_evcnt_attach(&pmap_tlb0_info);
316 
317 #ifdef MULTIPROCESSOR
318 	kcpuset_create(&cpus_halted, true);
319 		KASSERT(cpus_halted != NULL);
320 	kcpuset_create(&cpus_hatched, true);
321 		KASSERT(cpus_hatched != NULL);
322 	kcpuset_create(&cpus_paused, true);
323 		KASSERT(cpus_paused != NULL);
324 	kcpuset_create(&cpus_resumed, true);
325 		KASSERT(cpus_resumed != NULL);
326 	kcpuset_create(&cpus_running, true);
327 		KASSERT(cpus_running != NULL);
328 	kcpuset_set(cpus_hatched, cpu_number());
329 	kcpuset_set(cpus_running, cpu_number());
330 #endif
331 
332 	cpu_hwrena_setup();
333 
334 	/*
335 	 * Good {morning,afternoon,evening,night}.
336 	 */
337 	printf("%s%s", copyright, version);
338 	printf("%s\n", cpu_getmodel());
339 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
340 	printf("total memory = %s\n", pbuf);
341 
342 	minaddr = 0;
343 	/*
344 	 * Allocate a submap for physio.
345 	 */
346 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
347 				    VM_PHYS_SIZE, 0, FALSE, NULL);
348 
349 	/*
350 	 * (No need to allocate an mbuf cluster submap.  Mbuf clusters
351 	 * are allocated via the pool allocator, and we use KSEG/XKPHYS to
352 	 * map those pages.)
353 	 */
354 
355 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false)));
356 	printf("avail memory = %s\n", pbuf);
357 
358 #if defined(__mips_n32)
359 	module_machine = "mips-n32";
360 #endif
361 }
362 
363 void
cpu_getmcontext(struct lwp * l,mcontext_t * mcp,unsigned int * flags)364 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
365 {
366 	const struct trapframe *tf = l->l_md.md_utf;
367 	__greg_t *gr = mcp->__gregs;
368 	__greg_t ras_pc;
369 
370 	/* Save register context. Dont copy R0 - it is always 0 */
371 	memcpy(&gr[_REG_AT], &tf->tf_regs[_R_AST], sizeof(mips_reg_t) * 31);
372 
373 	gr[_REG_MDLO]  = tf->tf_regs[_R_MULLO];
374 	gr[_REG_MDHI]  = tf->tf_regs[_R_MULHI];
375 	gr[_REG_CAUSE] = tf->tf_regs[_R_CAUSE];
376 	gr[_REG_EPC]   = tf->tf_regs[_R_PC];
377 	gr[_REG_SR]    = tf->tf_regs[_R_SR];
378 	mcp->_mc_tlsbase = (intptr_t)l->l_private;
379 
380 	if ((ras_pc = (intptr_t)ras_lookup(l->l_proc,
381 	    (void *) (intptr_t)gr[_REG_EPC])) != -1)
382 		gr[_REG_EPC] = ras_pc;
383 
384 	*flags |= _UC_CPU | _UC_TLSBASE;
385 
386 	/* Save floating point register context, if any. */
387 	KASSERT(l == curlwp);
388 	if (fpu_used_p(l)) {
389 		size_t fplen;
390 		/*
391 		 * If this process is the current FP owner, dump its
392 		 * context to the PCB first.
393 		 */
394 		fpu_save(l);
395 
396 		/*
397 		 * The PCB FP regs struct includes the FP CSR, so use the
398 		 * size of __fpregs.__fp_r when copying.
399 		 */
400 #if !defined(__mips_o32)
401 		if (_MIPS_SIM_NEWABI_P(l->l_proc->p_md.md_abi)) {
402 #endif
403 			fplen = sizeof(struct fpreg);
404 #if !defined(__mips_o32)
405 		} else {
406 			fplen = sizeof(struct fpreg_oabi);
407 		}
408 #endif
409 		struct pcb * const pcb = lwp_getpcb(l);
410 		memcpy(&mcp->__fpregs, &pcb->pcb_fpregs, fplen);
411 		*flags |= _UC_FPU;
412 	}
413 }
414 
415 int
cpu_mcontext_validate(struct lwp * l,const mcontext_t * mcp)416 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
417 {
418 
419 	/* XXX:  Do we validate the addresses?? */
420 	return 0;
421 }
422 
423 int
cpu_setmcontext(struct lwp * l,const mcontext_t * mcp,unsigned int flags)424 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
425 {
426 	struct trapframe *tf = l->l_md.md_utf;
427 	struct proc *p = l->l_proc;
428 	const __greg_t *gr = mcp->__gregs;
429 	int error;
430 
431 	/* Restore register context, if any. */
432 	if (flags & _UC_CPU) {
433 		error = cpu_mcontext_validate(l, mcp);
434 		if (error)
435 			return error;
436 
437 		/* Save register context. */
438 
439 #ifdef __mips_n32
440 		CTASSERT(_R_AST == _REG_AT);
441 		if (__predict_false(p->p_md.md_abi == _MIPS_BSD_API_O32)) {
442 			const mcontext_o32_t *mcp32 = (const mcontext_o32_t *)mcp;
443 			const __greg32_t *gr32 = mcp32->__gregs;
444 			for (size_t i = _R_AST; i < 32; i++) {
445 				tf->tf_regs[i] = gr32[i];
446 			}
447 		} else
448 #endif
449 		memcpy(&tf->tf_regs[_R_AST], &gr[_REG_AT],
450 		       sizeof(mips_reg_t) * 31);
451 
452 		tf->tf_regs[_R_MULLO] = gr[_REG_MDLO];
453 		tf->tf_regs[_R_MULHI] = gr[_REG_MDHI];
454 		tf->tf_regs[_R_CAUSE] = gr[_REG_CAUSE];
455 		tf->tf_regs[_R_PC]    = gr[_REG_EPC];
456 		/* Do not restore SR. */
457 	}
458 
459 	/* Restore the private thread context */
460 	if (flags & _UC_TLSBASE) {
461 		lwp_setprivate(l, (void *)(intptr_t)mcp->_mc_tlsbase);
462 	}
463 
464 	/* Restore floating point register context, if any. */
465 	if (flags & _UC_FPU) {
466 		size_t fplen;
467 
468 		/* Disable the FPU contents. */
469 		fpu_discard(l);
470 
471 #if !defined(__mips_o32)
472 		if (_MIPS_SIM_NEWABI_P(l->l_proc->p_md.md_abi)) {
473 #endif
474 			fplen = sizeof(struct fpreg);
475 #if !defined(__mips_o32)
476 		} else {
477 			fplen = sizeof(struct fpreg_oabi);
478 		}
479 #endif
480 		/*
481 		 * The PCB FP regs struct includes the FP CSR, so use the
482 		 * proper size of fpreg when copying.
483 		 */
484 		struct pcb * const pcb = lwp_getpcb(l);
485 		memcpy(&pcb->pcb_fpregs, &mcp->__fpregs, fplen);
486 	}
487 
488 	mutex_enter(p->p_lock);
489 	if (flags & _UC_SETSTACK)
490 		l->l_sigstk.ss_flags |= SS_ONSTACK;
491 	if (flags & _UC_CLRSTACK)
492 		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
493 	mutex_exit(p->p_lock);
494 
495 	return (0);
496 }
497 
498 void
cpu_need_resched(struct cpu_info * ci,struct lwp * l,int flags)499 cpu_need_resched(struct cpu_info *ci, struct lwp *l, int flags)
500 {
501 
502 	KASSERT(kpreempt_disabled());
503 
504 	if ((flags & RESCHED_KPREEMPT) != 0) {
505 #ifdef __HAVE_PREEMPTION
506 		if ((flags & RESCHED_REMOTE) != 0) {
507 			cpu_send_ipi(ci, IPI_KPREEMPT);
508 		} else {
509 			softint_trigger(SOFTINT_KPREEMPT);
510 		}
511 #endif
512 		return;
513 	}
514 	if ((flags & RESCHED_REMOTE) != 0) {
515 #ifdef MULTIPROCESSOR
516 		cpu_send_ipi(ci, IPI_AST);
517 #endif
518 	} else {
519 		l->l_md.md_astpending = 1;		/* force call to ast() */
520 	}
521 }
522 
523 uint32_t
cpu_clkf_usermode_mask(void)524 cpu_clkf_usermode_mask(void)
525 {
526 
527 	return CPUISMIPS3 ? MIPS_SR_KSU_USER : MIPS_SR_KU_PREV;
528 }
529 
530 void
cpu_signotify(struct lwp * l)531 cpu_signotify(struct lwp *l)
532 {
533 
534 	KASSERT(kpreempt_disabled());
535 #ifdef __HAVE_FAST_SOFTINTS
536 	KASSERT(lwp_locked(l, NULL));
537 #endif
538 
539 	if (l->l_cpu != curcpu()) {
540 #ifdef MULTIPROCESSOR
541 		cpu_send_ipi(l->l_cpu, IPI_AST);
542 #endif
543 	} else {
544 		l->l_md.md_astpending = 1; 	/* force call to ast() */
545 	}
546 }
547 
548 void
cpu_need_proftick(struct lwp * l)549 cpu_need_proftick(struct lwp *l)
550 {
551 
552 	KASSERT(kpreempt_disabled());
553 	KASSERT(l->l_cpu == curcpu());
554 
555 	l->l_pflag |= LP_OWEUPC;
556 	l->l_md.md_astpending = 1;		/* force call to ast() */
557 }
558 
559 #ifdef __HAVE_PREEMPTION
560 bool
cpu_kpreempt_enter(uintptr_t where,int s)561 cpu_kpreempt_enter(uintptr_t where, int s)
562 {
563 
564 	KASSERT(kpreempt_disabled());
565 
566 #if 0
567 	if (where == (intptr_t)-2) {
568 		KASSERT(curcpu()->ci_mtx_count == 0);
569 		/*
570 		 * We must be called via kern_intr (which already checks for
571 		 * IPL_NONE so of course we call be preempted).
572 		 */
573 		return true;
574 	}
575 	/*
576 	 * We are called from KPREEMPT_ENABLE().  If we are at IPL_NONE,
577 	 * of course we can be preempted.  If we aren't, ask for a
578 	 * softint so that kern_intr can call kpreempt.
579 	 */
580 	if (s == IPL_NONE) {
581 		KASSERT(curcpu()->ci_mtx_count == 0);
582 		return true;
583 	}
584 	softint_trigger(SOFTINT_KPREEMPT);
585 #endif
586 	return false;
587 }
588 
589 void
cpu_kpreempt_exit(uintptr_t where)590 cpu_kpreempt_exit(uintptr_t where)
591 {
592 
593 	/* do nothing */
594 }
595 
596 /*
597  * Return true if preemption is disabled for MD reasons.  Must be called
598  * with preemption disabled, and thus is only for diagnostic checks.
599  */
600 bool
cpu_kpreempt_disabled(void)601 cpu_kpreempt_disabled(void)
602 {
603 
604 	/*
605 	 * Any elevated IPL disables preemption.
606 	 */
607 	return curcpu()->ci_cpl > IPL_NONE;
608 }
609 #endif /* __HAVE_PREEMPTION */
610 
611 void
cpu_idle(void)612 cpu_idle(void)
613 {
614 	void (*const mach_idle)(void) = mips_locoresw.lsw_cpu_idle;
615 	struct cpu_info * const ci = curcpu();
616 
617 	while (!ci->ci_want_resched) {
618 #ifdef __HAVE_FAST_SOFTINTS
619 		KASSERT(ci->ci_data.cpu_softints == 0);
620 #endif
621 		(*mach_idle)();
622 	}
623 }
624 
625 bool
cpu_intr_p(void)626 cpu_intr_p(void)
627 {
628 	int idepth;
629 	long pctr;
630 	lwp_t *l;
631 
632 	l = curlwp;
633 	do {
634 		pctr = lwp_pctr();
635 		idepth = l->l_cpu->ci_idepth;
636 	} while (__predict_false(pctr != lwp_pctr()));
637 
638 	return idepth != 0;
639 }
640 
641 #ifdef MULTIPROCESSOR
642 
643 void
cpu_broadcast_ipi(int tag)644 cpu_broadcast_ipi(int tag)
645 {
646 
647 	// No reason to remove ourselves since multicast_ipi will do that for us
648 	cpu_multicast_ipi(cpus_running, tag);
649 }
650 
651 void
cpu_multicast_ipi(const kcpuset_t * kcp,int tag)652 cpu_multicast_ipi(const kcpuset_t *kcp, int tag)
653 {
654 	struct cpu_info * const ci = curcpu();
655 	kcpuset_t *kcp2 = ci->ci_multicastcpus;
656 
657 	if (kcpuset_match(cpus_running, ci->ci_kcpuset))
658 		return;
659 
660 	kcpuset_copy(kcp2, kcp);
661 	kcpuset_remove(kcp2, ci->ci_kcpuset);
662 	for (cpuid_t cii; (cii = kcpuset_ffs(kcp2)) != 0; ) {
663 		kcpuset_clear(kcp2, --cii);
664 		(void)cpu_send_ipi(cpu_lookup(cii), tag);
665 	}
666 }
667 
668 int
cpu_send_ipi(struct cpu_info * ci,int tag)669 cpu_send_ipi(struct cpu_info *ci, int tag)
670 {
671 
672 	return (*mips_locoresw.lsw_send_ipi)(ci, tag);
673 }
674 
675 static void
cpu_ipi_wait(const char * s,const kcpuset_t * watchset,const kcpuset_t * wanted)676 cpu_ipi_wait(const char *s, const kcpuset_t *watchset, const kcpuset_t *wanted)
677 {
678 	bool done = false;
679 	struct cpu_info * const ci = curcpu();
680 	kcpuset_t *kcp = ci->ci_watchcpus;
681 
682 	/* some finite amount of time */
683 
684 	for (u_long limit = curcpu()->ci_cpu_freq/10; !done && limit--; ) {
685 		kcpuset_copy(kcp, watchset);
686 		kcpuset_intersect(kcp, wanted);
687 		done = kcpuset_match(kcp, wanted);
688 	}
689 
690 	if (!done) {
691 		cpuid_t cii;
692 		kcpuset_copy(kcp, wanted);
693 		kcpuset_remove(kcp, watchset);
694 		if ((cii = kcpuset_ffs(kcp)) != 0) {
695 			printf("Failed to %s:", s);
696 			do {
697 				kcpuset_clear(kcp, --cii);
698 				printf(" cpu%lu", cii);
699 			} while ((cii = kcpuset_ffs(kcp)) != 0);
700 			printf("\n");
701 		}
702 	}
703 }
704 
705 /*
706  * Halt this cpu
707  */
708 void
cpu_halt(void)709 cpu_halt(void)
710 {
711 	cpuid_t cii = cpu_index(curcpu());
712 
713 	printf("cpu%lu: shutting down\n", cii);
714 	kcpuset_atomic_set(cpus_halted, cii);
715 	spl0();		/* allow interrupts e.g. further ipi ? */
716 	for (;;) ;	/* spin */
717 
718 	/* NOTREACHED */
719 }
720 
721 /*
722  * Halt all running cpus, excluding current cpu.
723  */
724 void
cpu_halt_others(void)725 cpu_halt_others(void)
726 {
727 	kcpuset_t *kcp;
728 
729 	// If we are the only CPU running, there's nothing to do.
730 	if (kcpuset_match(cpus_running, curcpu()->ci_kcpuset))
731 		return;
732 
733 	// Get all running CPUs
734 	kcpuset_clone(&kcp, cpus_running);
735 	// Remove ourself
736 	kcpuset_remove(kcp, curcpu()->ci_kcpuset);
737 	// Remove any halted CPUs
738 	kcpuset_remove(kcp, cpus_halted);
739 	// If there are CPUs left, send the IPIs
740 	if (!kcpuset_iszero(kcp)) {
741 		cpu_multicast_ipi(kcp, IPI_HALT);
742 		cpu_ipi_wait("halt", cpus_halted, kcp);
743 	}
744 	kcpuset_destroy(kcp);
745 
746 	/*
747 	 * TBD
748 	 * Depending on available firmware methods, other cpus will
749 	 * either shut down themselves, or spin and wait for us to
750 	 * stop them.
751 	 */
752 }
753 
754 /*
755  * Pause this cpu
756  */
757 void
cpu_pause(struct reg * regsp)758 cpu_pause(struct reg *regsp)
759 {
760 	int s = splhigh();
761 	cpuid_t cii = cpu_index(curcpu());
762 
763 	if (__predict_false(cold)) {
764 		splx(s);
765 		return;
766 	}
767 
768 	do {
769 		kcpuset_atomic_set(cpus_paused, cii);
770 		do {
771 			;
772 		} while (kcpuset_isset(cpus_paused, cii));
773 		kcpuset_atomic_set(cpus_resumed, cii);
774 #if defined(DDB)
775 		if (ddb_running_on_this_cpu_p())
776 			cpu_Debugger();
777 		if (ddb_running_on_any_cpu_p())
778 			continue;
779 #endif
780 	} while (false);
781 
782 	splx(s);
783 }
784 
785 /*
786  * Pause all running cpus, excluding current cpu.
787  */
788 void
cpu_pause_others(void)789 cpu_pause_others(void)
790 {
791 	struct cpu_info * const ci = curcpu();
792 
793 	if (cold || kcpuset_match(cpus_running, ci->ci_kcpuset))
794 		return;
795 
796 	kcpuset_t *kcp = ci->ci_ddbcpus;
797 
798 	kcpuset_copy(kcp, cpus_running);
799 	kcpuset_remove(kcp, ci->ci_kcpuset);
800 	kcpuset_remove(kcp, cpus_paused);
801 
802 	cpu_broadcast_ipi(IPI_SUSPEND);
803 	cpu_ipi_wait("pause", cpus_paused, kcp);
804 }
805 
806 /*
807  * Resume a single cpu
808  */
809 void
cpu_resume(cpuid_t cii)810 cpu_resume(cpuid_t cii)
811 {
812 
813 	if (__predict_false(cold))
814 		return;
815 
816 	struct cpu_info * const ci = curcpu();
817 	kcpuset_t *kcp = ci->ci_ddbcpus;
818 
819 	kcpuset_set(kcp, cii);
820 	kcpuset_atomicly_remove(cpus_resumed, cpus_resumed);
821 	kcpuset_atomic_clear(cpus_paused, cii);
822 
823 	cpu_ipi_wait("resume", cpus_resumed, kcp);
824 }
825 
826 /*
827  * Resume all paused cpus.
828  */
829 void
cpu_resume_others(void)830 cpu_resume_others(void)
831 {
832 
833 	if (__predict_false(cold))
834 		return;
835 
836 	struct cpu_info * const ci = curcpu();
837 	kcpuset_t *kcp = ci->ci_ddbcpus;
838 
839 	kcpuset_atomicly_remove(cpus_resumed, cpus_resumed);
840 	kcpuset_copy(kcp, cpus_paused);
841 	kcpuset_atomicly_remove(cpus_paused, cpus_paused);
842 
843 	/* CPUs awake on cpus_paused clear */
844 	cpu_ipi_wait("resume", cpus_resumed, kcp);
845 }
846 
847 bool
cpu_is_paused(cpuid_t cii)848 cpu_is_paused(cpuid_t cii)
849 {
850 
851 	return !cold && kcpuset_isset(cpus_paused, cii);
852 }
853 
854 #ifdef DDB
855 void
cpu_debug_dump(void)856 cpu_debug_dump(void)
857 {
858 	CPU_INFO_ITERATOR cii;
859 	struct cpu_info *ci;
860 	char running, hatched, paused, resumed, halted;
861 	db_printf("CPU CPUID STATE CPUINFO            CPL INT MTX IPIS(A/R)\n");
862 	for (CPU_INFO_FOREACH(cii, ci)) {
863 		hatched = (kcpuset_isset(cpus_hatched, cpu_index(ci)) ? 'H' : '-');
864 		running = (kcpuset_isset(cpus_running, cpu_index(ci)) ? 'R' : '-');
865 		paused  = (kcpuset_isset(cpus_paused,  cpu_index(ci)) ? 'P' : '-');
866 		resumed = (kcpuset_isset(cpus_resumed, cpu_index(ci)) ? 'r' : '-');
867 		halted  = (kcpuset_isset(cpus_halted,  cpu_index(ci)) ? 'h' : '-');
868 		db_printf("%3d 0x%03lx %c%c%c%c%c %p "
869 			"%3d %3d %3d "
870 			"0x%02" PRIx64 "/0x%02" PRIx64 "\n",
871 			cpu_index(ci), ci->ci_cpuid,
872 			running, hatched, paused, resumed, halted,
873 			ci, ci->ci_cpl, ci->ci_idepth, ci->ci_mtx_count,
874 			ci->ci_active_ipis, ci->ci_request_ipis);
875 	}
876 }
877 #endif
878 
879 void
cpu_hatch(struct cpu_info * ci)880 cpu_hatch(struct cpu_info *ci)
881 {
882 	struct pmap_tlb_info * const ti = ci->ci_tlb_info;
883 
884 	/*
885 	 * Invalidate all the TLB enties (even wired ones) and then reserve
886 	 * space for the wired TLB entries.
887 	 */
888 	mips3_cp0_wired_write(0);
889 	tlb_invalidate_all();
890 	mips3_cp0_wired_write(ti->ti_wired);
891 
892 	/*
893 	 * Setup HWRENA and USERLOCAL COP0 registers (MIPSxxR2).
894 	 */
895 	cpu_hwrena_setup();
896 
897 	/*
898 	 * If we are using register zero relative addressing to access cpu_info
899 	 * in the exception vectors, enter that mapping into TLB now.
900 	 */
901 	if (ci->ci_tlb_slot >= 0) {
902 		const uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V
903 		    | mips3_paddr_to_tlbpfn((vaddr_t)ci);
904 		const struct tlbmask tlbmask = {
905 			.tlb_hi = -PAGE_SIZE | KERNEL_PID,
906 #if (PGSHIFT & 1)
907 			.tlb_lo0 = tlb_lo,
908 			.tlb_lo1 = tlb_lo + MIPS3_PG_NEXT,
909 #else
910 			.tlb_lo0 = 0,
911 			.tlb_lo1 = tlb_lo,
912 #endif
913 			.tlb_mask = -1,
914 		};
915 
916 		tlb_invalidate_addr(tlbmask.tlb_hi, KERNEL_PID);
917 		tlb_write_entry(ci->ci_tlb_slot, &tlbmask);
918 	}
919 
920 	/*
921 	 * Flush the icache just be sure.
922 	 */
923 	mips_icache_sync_all();
924 
925 	/*
926 	 * Let this CPU do its own initialization (for things that have to be
927 	 * done on the local CPU).
928 	 */
929 	(*mips_locoresw.lsw_cpu_init)(ci);
930 
931 	// Show this CPU as present.
932 	atomic_or_ulong(&ci->ci_flags, CPUF_PRESENT);
933 
934 	/*
935 	 * Announce we are hatched
936 	 */
937 	kcpuset_atomic_set(cpus_hatched, cpu_index(ci));
938 
939 	/*
940 	 * Now wait to be set free!
941 	 */
942 	while (! kcpuset_isset(cpus_running, cpu_index(ci))) {
943 		/* spin, spin, spin */
944 	}
945 
946 	/*
947 	 * initialize the MIPS count/compare clock
948 	 */
949 	mips3_cp0_count_write(ci->ci_data.cpu_cc_skew);
950 	KASSERT(ci->ci_cycles_per_hz != 0);
951 	ci->ci_next_cp0_clk_intr = ci->ci_data.cpu_cc_skew + ci->ci_cycles_per_hz;
952 	mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr);
953 	ci->ci_data.cpu_cc_skew = 0;
954 
955 	/*
956 	 * Let this CPU do its own post-running initialization
957 	 * (for things that have to be done on the local CPU).
958 	 */
959 	(*mips_locoresw.lsw_cpu_run)(ci);
960 
961 	/*
962 	 * Now turn on interrupts (and verify they are on).
963 	 */
964 	spl0();
965 	KASSERTMSG(ci->ci_cpl == IPL_NONE, "cpl %d", ci->ci_cpl);
966 	KASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
967 
968 	kcpuset_atomic_set(pmap_kernel()->pm_onproc, cpu_index(ci));
969 	kcpuset_atomic_set(pmap_kernel()->pm_active, cpu_index(ci));
970 
971 	/*
972 	 * And do a tail call to idle_loop
973 	 */
974 	idle_loop(NULL);
975 }
976 
977 void
cpu_boot_secondary_processors(void)978 cpu_boot_secondary_processors(void)
979 {
980 	CPU_INFO_ITERATOR cii;
981 	struct cpu_info *ci;
982 
983 	if ((boothowto & RB_MD1) != 0)
984 		return;
985 
986 	for (CPU_INFO_FOREACH(cii, ci)) {
987 		if (CPU_IS_PRIMARY(ci))
988 			continue;
989 		KASSERT(ci->ci_data.cpu_idlelwp);
990 
991 		/*
992 		 * Skip this CPU if it didn't successfully hatch.
993 		 */
994 		if (!kcpuset_isset(cpus_hatched, cpu_index(ci)))
995 			continue;
996 
997 		ci->ci_data.cpu_cc_skew = mips3_cp0_count_read();
998 		atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING);
999 		kcpuset_set(cpus_running, cpu_index(ci));
1000 		// Spin until the cpu calls idle_loop
1001 		for (u_int i = 0; i < 10000; i++) {
1002 			if (kcpuset_isset(kcpuset_running, cpu_index(ci)))
1003 				break;
1004 			delay(1000);
1005 		}
1006 	}
1007 }
1008 
1009 void
xc_send_ipi(struct cpu_info * ci)1010 xc_send_ipi(struct cpu_info *ci)
1011 {
1012 
1013 	(*mips_locoresw.lsw_send_ipi)(ci, IPI_XCALL);
1014 }
1015 
1016 void
cpu_ipi(struct cpu_info * ci)1017 cpu_ipi(struct cpu_info *ci)
1018 {
1019 
1020 	(*mips_locoresw.lsw_send_ipi)(ci, IPI_GENERIC);
1021 }
1022 
1023 #endif /* MULTIPROCESSOR */
1024 
1025 void
cpu_offline_md(void)1026 cpu_offline_md(void)
1027 {
1028 
1029 	(*mips_locoresw.lsw_cpu_offline_md)();
1030 }
1031 
1032 #ifdef _LP64
1033 void
cpu_vmspace_exec(lwp_t * l,vaddr_t start,vaddr_t end)1034 cpu_vmspace_exec(lwp_t *l, vaddr_t start, vaddr_t end)
1035 {
1036 	/*
1037 	 * We need to turn on/off UX so that copyout/copyin will work
1038 	 * well before setreg gets called.
1039 	 */
1040 	uint32_t sr = mips_cp0_status_read();
1041 
1042 	if (end != (uint32_t) end) {
1043 		mips_cp0_status_write(sr | MIPS3_SR_UX);
1044 	} else {
1045 		mips_cp0_status_write(sr & ~MIPS3_SR_UX);
1046 	}
1047 }
1048 #endif
1049 
1050 int
cpu_lwp_setprivate(lwp_t * l,void * v)1051 cpu_lwp_setprivate(lwp_t *l, void *v)
1052 {
1053 
1054 #if (MIPS32R2 + MIPS64R2) > 0
1055 	if (l == curlwp && MIPS_HAS_USERLOCAL) {
1056 		mipsNN_cp0_userlocal_write(v);
1057 	}
1058 #endif
1059 	return 0;
1060 }
1061 
1062 
1063 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
1064 
1065 #if (CPUWATCH_MAX != 8)
1066 # error CPUWATCH_MAX
1067 #endif
1068 
1069 /*
1070  * cpuwatch_discover - determine how many COP0 watchpoints this CPU supports
1071  */
1072 u_int
cpuwatch_discover(void)1073 cpuwatch_discover(void)
1074 {
1075 	int i;
1076 
1077 	for (i=0; i < CPUWATCH_MAX; i++) {
1078 		uint32_t watchhi = mipsNN_cp0_watchhi_read(i);
1079 		if ((watchhi & __BIT(31)) == 0)	/* test 'M' bit */
1080 			break;
1081 	}
1082 	return i + 1;
1083 }
1084 
1085 void
cpuwatch_free(cpu_watchpoint_t * cwp)1086 cpuwatch_free(cpu_watchpoint_t *cwp)
1087 {
1088 #ifdef DIAGNOSTIC
1089 	struct cpu_info * const ci = curcpu();
1090 
1091 	KASSERT(cwp >= &ci->ci_cpuwatch_tab[0] &&
1092 		cwp <= &ci->ci_cpuwatch_tab[ci->ci_cpuwatch_count-1]);
1093 #endif
1094 	cwp->cw_mode = 0;
1095 	cwp->cw_asid = 0;
1096 	cwp->cw_addr = 0;
1097 	cpuwatch_clr(cwp);
1098 }
1099 
1100 /*
1101  * cpuwatch_alloc
1102  * 	find an empty slot
1103  *	no locking for the table since it is CPU private
1104  */
1105 cpu_watchpoint_t *
cpuwatch_alloc(void)1106 cpuwatch_alloc(void)
1107 {
1108 	struct cpu_info * const ci = curcpu();
1109 	cpu_watchpoint_t *cwp;
1110 
1111 	for (int i=0; i < ci->ci_cpuwatch_count; i++) {
1112 		cwp = &ci->ci_cpuwatch_tab[i];
1113 		if ((cwp->cw_mode & CPUWATCH_RWX) == 0)
1114 			return cwp;
1115 	}
1116 	return NULL;
1117 }
1118 
1119 
1120 void
cpuwatch_set_all(void)1121 cpuwatch_set_all(void)
1122 {
1123 	struct cpu_info * const ci = curcpu();
1124 	cpu_watchpoint_t *cwp;
1125 	int i;
1126 
1127 	for (i=0; i < ci->ci_cpuwatch_count; i++) {
1128 		cwp = &ci->ci_cpuwatch_tab[i];
1129 		if ((cwp->cw_mode & CPUWATCH_RWX) != 0)
1130 			cpuwatch_set(cwp);
1131 	}
1132 }
1133 
1134 void
cpuwatch_clr_all(void)1135 cpuwatch_clr_all(void)
1136 {
1137 	struct cpu_info * const ci = curcpu();
1138 	cpu_watchpoint_t *cwp;
1139 	int i;
1140 
1141 	for (i=0; i < ci->ci_cpuwatch_count; i++) {
1142 		cwp = &ci->ci_cpuwatch_tab[i];
1143 		if ((cwp->cw_mode & CPUWATCH_RWX) != 0)
1144 			cpuwatch_clr(cwp);
1145 	}
1146 }
1147 
1148 /*
1149  * cpuwatch_set - establish a MIPS COP0 watchpoint
1150  */
1151 void
cpuwatch_set(cpu_watchpoint_t * cwp)1152 cpuwatch_set(cpu_watchpoint_t *cwp)
1153 {
1154 	struct cpu_info * const ci = curcpu();
1155 	uint32_t watchhi;
1156 	register_t watchlo;
1157 	int cwnum = cwp - &ci->ci_cpuwatch_tab[0];
1158 
1159 	KASSERT(cwp >= &ci->ci_cpuwatch_tab[0] &&
1160 		cwp <= &ci->ci_cpuwatch_tab[ci->ci_cpuwatch_count-1]);
1161 
1162 	watchlo = cwp->cw_addr;
1163 	if (cwp->cw_mode & CPUWATCH_WRITE)
1164 		watchlo |= __BIT(0);
1165 	if (cwp->cw_mode & CPUWATCH_READ)
1166 		watchlo |= __BIT(1);
1167 	if (cwp->cw_mode & CPUWATCH_EXEC)
1168 		watchlo |= __BIT(2);
1169 
1170 	if (cwp->cw_mode & CPUWATCH_ASID)
1171 		watchhi = cwp->cw_asid << 16;	/* addr qualified by asid */
1172 	else
1173 		watchhi = __BIT(30);		/* addr not qual. by asid (Global) */
1174 	if (cwp->cw_mode & CPUWATCH_MASK)
1175 		watchhi |= cwp->cw_mask;	/* set "dont care" addr match bits */
1176 
1177 	mipsNN_cp0_watchhi_write(cwnum, watchhi);
1178 	mipsNN_cp0_watchlo_write(cwnum, watchlo);
1179 }
1180 
1181 /*
1182  * cpuwatch_clr - disestablish a MIPS COP0 watchpoint
1183  */
1184 void
cpuwatch_clr(cpu_watchpoint_t * cwp)1185 cpuwatch_clr(cpu_watchpoint_t *cwp)
1186 {
1187 	struct cpu_info * const ci = curcpu();
1188 	int cwnum = cwp - &ci->ci_cpuwatch_tab[0];
1189 
1190 	KASSERT(cwp >= &ci->ci_cpuwatch_tab[0] &&
1191 		cwp <= &ci->ci_cpuwatch_tab[ci->ci_cpuwatch_count-1]);
1192 
1193 	mipsNN_cp0_watchhi_write(cwnum, 0);
1194 	mipsNN_cp0_watchlo_write(cwnum, 0);
1195 }
1196 
1197 #endif	/* (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 */
1198