xref: /netbsd-src/sys/arch/mips/rmi/rmixl_cpu.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /*	$NetBSD: rmixl_cpu.c,v 1.6 2013/11/25 03:01:58 christos Exp $	*/
2 
3 /*
4  * Copyright 2002 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Simon Burge for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed for the NetBSD Project by
20  *      Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include "locators.h"
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: rmixl_cpu.c,v 1.6 2013/11/25 03:01:58 christos Exp $");
42 
43 #include "opt_multiprocessor.h"
44 #include "opt_ddb.h"
45 
46 #include <sys/param.h>
47 #include <sys/device.h>
48 #include <sys/systm.h>
49 #include <sys/cpu.h>
50 #include <sys/lock.h>
51 #include <sys/lwp.h>
52 #include <sys/cpu.h>
53 #include <sys/malloc.h>
54 #include <uvm/uvm_pglist.h>
55 #include <uvm/uvm_extern.h>
56 #include <mips/regnum.h>
57 #include <mips/asm.h>
58 #include <mips/pmap.h>
59 #include <mips/rmi/rmixlreg.h>
60 #include <mips/rmi/rmixlvar.h>
61 #include <mips/rmi/rmixl_cpucorevar.h>
62 #include <mips/rmi/rmixl_cpuvar.h>
63 #include <mips/rmi/rmixl_intr.h>
64 #include <mips/rmi/rmixl_fmnvar.h>
65 #ifdef DDB
66 #include <mips/db_machdep.h>
67 #endif
68 
69 
70 static int	cpu_rmixl_match(device_t, cfdata_t, void *);
71 static void	cpu_rmixl_attach(device_t, device_t, void *);
72 static void	cpu_rmixl_attach_primary(struct rmixl_cpu_softc * const);
73 #ifdef NOTYET
74 static int	cpu_fmn_intr(void *, rmixl_fmn_rxmsg_t *);
75 #endif
76 
77 #ifdef MULTIPROCESSOR
78 void		cpu_rmixl_hatch(struct cpu_info *);
79 void		cpu_rmixl_run(struct cpu_info *);
80 static int	cpu_setup_trampoline_common(struct cpu_info *, struct rmixl_cpu_trampoline_args *);
81 static void	cpu_setup_trampoline_callback(struct cpu_info *);
82 #endif	/* MULTIPROCESSOR */
83 
84 #ifdef DEBUG
85 void		rmixl_cpu_data_print(struct cpu_data *);
86 struct cpu_info *
87 		rmixl_cpuinfo_print(u_int);
88 #endif	/* DEBUG */
89 
90 CFATTACH_DECL_NEW(cpu_rmixl, sizeof(struct rmixl_cpu_softc),
91 	cpu_rmixl_match, cpu_rmixl_attach, NULL, NULL);
92 
93 #ifdef MULTIPROCESSOR
94 static struct rmixl_cpu_trampoline_args rmixl_cpu_trampoline_args;
95 #endif
96 
97 /*
98  * cpu_rmixl_watchpoint_init - initialize COP0 watchpoint stuff
99  *
100  * clear IEU_DEFEATURE[DBE] to ensure T_WATCH on watchpoint exception
101  * set COP0 watchhi and watchlo
102  *
103  * disable all watchpoints
104  */
105 static void
106 cpu_rmixl_watchpoint_init(void)
107 {
108 	uint32_t r;
109 
110 	r = rmixl_mfcr(RMIXL_PCR_IEU_DEFEATURE);
111 	r &= ~__BIT(7);		/* DBE */
112 	rmixl_mtcr(RMIXL_PCR_IEU_DEFEATURE, r);
113 
114 	cpuwatch_clr_all();
115 }
116 
117 /*
118  * cpu_xls616_erratum
119  *
120  * on the XLS616, COUNT/COMPARE clock regs seem to interact between
121  * threads on a core
122  *
123  * the symptom of the error is retarded clock interrupts
124  * and very slow apparent system performance
125  *
126  * other XLS chips may have the same problem.
127  * we may need to add other PID checks.
128  */
129 static inline bool
130 cpu_xls616_erratum(device_t parent, struct cpucore_attach_args *ca)
131 {
132 #if 0
133 	if (mips_options.mips_cpu->cpu_pid == MIPS_XLS616) {
134 		if (ca->ca_thread > 0) {
135 			aprint_error_dev(parent, "XLS616 CLOCK ERRATUM: "
136 				"deconfigure cpu%d\n", ca->ca_thread);
137 			return true;
138 		}
139 	}
140 #endif
141 	return false;
142 }
143 
144 static bool
145 cpu_rmixl_erratum(device_t parent, struct cpucore_attach_args *ca)
146 {
147 	return cpu_xls616_erratum(parent, ca);
148 }
149 
150 static int
151 cpu_rmixl_match(device_t parent, cfdata_t cf, void *aux)
152 {
153 	struct cpucore_attach_args *ca = aux;
154 	int thread = cf->cf_loc[CPUCORECF_THREAD];
155 
156 	if (!cpu_rmixl(mips_options.mips_cpu))
157 		return 0;
158 
159 	if (strncmp(ca->ca_name, cf->cf_name, strlen(cf->cf_name)) == 0
160 #ifndef MULTIPROCESSOR
161 	    && ca->ca_thread == 0
162 #endif
163 	    && (thread == CPUCORECF_THREAD_DEFAULT || thread == ca->ca_thread)
164 	    && (!cpu_rmixl_erratum(parent, ca)))
165 			return 1;
166 
167 	return 0;
168 }
169 
170 static void
171 cpu_rmixl_attach(device_t parent, device_t self, void *aux)
172 {
173 	struct rmixl_cpu_softc * const sc = device_private(self);
174 	struct cpu_info *ci = NULL;
175 	static bool once = false;
176 	extern void rmixl_spl_init_cpu(void);
177 
178 	if (once == false) {
179 		/* first attach is the primary cpu */
180 		once = true;
181 		ci = curcpu();
182 		sc->sc_dev = self;
183 		sc->sc_ci = ci;
184 		ci->ci_softc = (void *)sc;
185 
186 		rmixl_spl_init_cpu();	/* spl initialization for CPU#0 */
187 		cpu_rmixl_attach_primary(sc);
188 
189 #ifdef MULTIPROCESSOR
190 		mips_locoresw.lsw_cpu_init = cpu_rmixl_hatch;
191 		mips_locoresw.lsw_cpu_run = cpu_rmixl_run;
192 	} else {
193 		struct cpucore_attach_args * const ca = aux;
194 		struct cpucore_softc * const ccsc = device_private(parent);
195 		rmixlfw_psb_type_t psb_type = rmixl_configuration.rc_psb_type;
196 		cpuid_t cpuid;
197 
198 		KASSERT(ca->ca_core < 8);
199 		KASSERT(ca->ca_thread < 4);
200 		cpuid = (ca->ca_core << 2) | ca->ca_thread;
201 		ci = cpu_info_alloc(ccsc->sc_tlbinfo, cpuid,
202 		    /* XXX */ 0, ca->ca_core, ca->ca_thread);
203 		KASSERT(ci != NULL);
204 		if (ccsc->sc_tlbinfo == NULL)
205 			ccsc->sc_tlbinfo = ci->ci_tlb_info;
206 		sc->sc_dev = self;
207 		sc->sc_ci = ci;
208 		ci->ci_softc = (void *)sc;
209 
210 		switch (psb_type) {
211 		case PSB_TYPE_RMI:
212 		case PSB_TYPE_DELL:
213 			cpu_setup_trampoline_callback(ci);
214 			break;
215 		default:
216 			aprint_error(": psb type=%s cpu_wakeup unsupported\n",
217 				rmixlfw_psb_type_name(psb_type));
218 			return;
219 		}
220 
221 		const u_long cpu_mask = 1L << cpu_index(ci);
222 		for (size_t i=0; i < 10000; i++) {
223 			if ((cpus_hatched & cpu_mask) != 0)
224 				 break;
225 			DELAY(100);
226 		}
227 		if ((cpus_hatched & cpu_mask) == 0) {
228 			aprint_error(": failed to hatch\n");
229 			return;
230 		}
231 #endif	/* MULTIPROCESSOR */
232 	}
233 
234 	/*
235 	 * do per-cpu interrupt initialization
236 	 */
237 	rmixl_intr_init_cpu(ci);
238 
239 	aprint_normal("\n");
240 
241         cpu_attach_common(self, ci);
242 }
243 
244 /*
245  * attach the primary processor
246  */
247 static void
248 cpu_rmixl_attach_primary(struct rmixl_cpu_softc * const sc)
249 {
250 	struct cpu_info *ci = sc->sc_ci;
251 	uint32_t ebase;
252 
253 	KASSERT(CPU_IS_PRIMARY(ci));
254 
255 	/*
256 	 * obtain and set cpuid of the primary processor
257 	 */
258 	asm volatile("dmfc0 %0, $15, 1;" : "=r"(ebase));
259 	ci->ci_cpuid = ebase & __BITS(9,0);
260 
261 	cpu_rmixl_watchpoint_init();
262 
263 	rmixl_fmn_init();
264 
265 	rmixl_intr_init_clk();
266 #ifdef MULTIPROCESSOR
267 	rmixl_intr_init_ipi();
268 #endif
269 
270 #ifdef NOTYET
271 	void *ih = rmixl_fmn_intr_establish(RMIXL_FMN_STID_CORE0,
272 		cpu_fmn_intr, ci);
273 	if (ih == NULL)
274 		panic("%s: rmixl_fmn_intr_establish failed",
275 			__func__);
276 	sc->sc_ih_fmn = ih;
277 #endif
278 }
279 
280 #ifdef NOTYET
281 static int
282 cpu_fmn_intr(void *arg, rmixl_fmn_rxmsg_t *rxmsg)
283 {
284 	if (CPU_IS_PRIMARY(curcpu())) {
285 		printf("%s: cpu%ld: rxsid=%#x, code=%d, size=%d\n",
286 			__func__, cpu_number(),
287 			rxmsg->rxsid, rxmsg->code, rxmsg->size);
288 		for (int i=0; i < rxmsg->size; i++)
289 			printf("\t%#"PRIx64"\n", rxmsg->msg.data[i]);
290 	}
291 
292 	return 1;
293 }
294 #endif
295 
296 #ifdef MULTIPROCESSOR
297 /*
298  * cpu_rmixl_run
299  *
300  * - chip-specific post-running code called from cpu_hatch via lsw_cpu_run
301  */
302 void
303 cpu_rmixl_run(struct cpu_info *ci)
304 {
305 	struct rmixl_cpu_softc * const sc = (void *)ci->ci_softc;
306 	cpucore_rmixl_run(device_parent(sc->sc_dev));
307 }
308 
309 /*
310  * cpu_rmixl_hatch
311  *
312  * - chip-specific hatch code called from cpu_hatch via lsw_cpu_init
313  */
314 void
315 cpu_rmixl_hatch(struct cpu_info *ci)
316 {
317 	struct rmixl_cpu_softc * const sc = (void *)ci->ci_softc;
318 	extern void rmixl_spl_init_cpu(void);
319 
320 	rmixl_spl_init_cpu();	/* spl initialization for this CPU */
321 
322 	(void)splhigh();
323 
324 #ifdef DEBUG
325 	uint32_t ebase;
326 	asm volatile("dmfc0 %0, $15, 1;" : "=r"(ebase));
327 	KASSERT((ebase & __BITS(9,0)) == ci->ci_cpuid);
328 	KASSERT(curcpu() == ci);
329 #endif
330 
331 	cpucore_rmixl_hatch(device_parent(sc->sc_dev));
332 
333 	cpu_rmixl_watchpoint_init();
334 }
335 
336 static int
337 cpu_setup_trampoline_common(struct cpu_info *ci, struct rmixl_cpu_trampoline_args *ta)
338 {
339 	struct lwp *l = ci->ci_data.cpu_idlelwp;
340 	uintptr_t stacktop;
341 
342 #ifdef DIAGNOSTIC
343 	/* Ensure our current stack can be used by the firmware */
344 	uint64_t sp;
345 	__asm__ volatile("move	%0, $sp\n" : "=r"(sp));
346 #ifdef _LP64
347 	/* can be made into a KSEG0 addr */
348 	KASSERT(MIPS_XKPHYS_P(sp));
349 	KASSERT((MIPS_XKPHYS_TO_PHYS(sp) >> 32) == 0);
350 #else
351 	/* is a KSEG0 addr */
352 	KASSERT(MIPS_KSEG0_P(sp));
353 #endif	/* _LP64 */
354 #endif	/* DIAGNOSTIC */
355 
356 #ifndef _LP64
357 	/*
358 	 * Ensure 'ci' is a KSEG0 address for trampoline args
359 	 * to avoid TLB fault in cpu_trampoline() when loading ci_idlelwp
360 	 */
361 	KASSERT(MIPS_KSEG0_P(ci));
362 #endif
363 
364 	/*
365 	 * Ensure 'ta' is a KSEG0 address for trampoline args
366 	 * to avoid TLB fault in trampoline when loading args.
367 	 *
368 	 * Note:
369 	 *   RMI firmware only passes the lower 32-bit half of 'ta'
370 	 *   to rmixl_cpu_trampoline (the upper half is clear)
371 	 *   so rmixl_cpu_trampoline must reconstruct the missing upper half
372 	 *   rmixl_cpu_trampoline "knows" 'ta' is a KSEG0 address
373 	 *   and sign-extends to make an LP64 KSEG0 address.
374 	 */
375 	KASSERT(MIPS_KSEG0_P(ta));
376 
377 	/*
378 	 * marshal args for rmixl_cpu_trampoline;
379 	 * note for non-LP64 kernel, use of intptr_t
380 	 * forces sign extension of 32 bit pointers
381 	 */
382 	stacktop = (uintptr_t)l->l_md.md_utf - CALLFRAME_SIZ;
383 	ta->ta_sp = (uint64_t)(intptr_t)stacktop;
384 	ta->ta_lwp = (uint64_t)(intptr_t)l;
385 	ta->ta_cpuinfo = (uint64_t)(intptr_t)ci;
386 
387 	return 0;
388 }
389 
390 static void
391 cpu_setup_trampoline_callback(struct cpu_info *ci)
392 {
393 	void (*wakeup_cpu)(void *, void *, unsigned int);
394 	struct rmixl_cpu_trampoline_args *ta = &rmixl_cpu_trampoline_args;
395 	extern void rmixl_cpu_trampoline(void *);
396 	extern void rmixlfw_wakeup_cpu(void *, void *, u_int64_t, void *);
397 
398 	cpu_setup_trampoline_common(ci, ta);
399 
400 #if _LP64
401 	wakeup_cpu = (void *)rmixl_configuration.rc_psb_info.wakeup;
402 #else
403 	wakeup_cpu = (void *)(intptr_t)
404 		(rmixl_configuration.rc_psb_info.wakeup & 0xffffffff);
405 #endif
406 
407 	rmixlfw_wakeup_cpu(rmixl_cpu_trampoline, (void *)ta,
408 		(uint64_t)1 << ci->ci_cpuid, wakeup_cpu);
409 }
410 #endif	/* MULTIPROCESSOR */
411 
412 
413 #ifdef DEBUG
414 void
415 rmixl_cpu_data_print(struct cpu_data *dp)
416 {
417 	printf("cpu_biglock_wanted %p\n", dp->cpu_biglock_wanted);
418 	printf("cpu_callout %p\n", dp->cpu_callout);
419 	printf("cpu_unused1 %p\n", dp->cpu_unused1);
420 	printf("cpu_unused2 %d\n", dp->cpu_unused2);
421 	printf("&cpu_schedstate %p\n", &dp->cpu_schedstate);	/* TBD */
422 	printf("&cpu_xcall %p\n", &dp->cpu_xcall);		/* TBD */
423 	printf("cpu_xcall_pending %d\n", dp->cpu_xcall_pending);
424 	printf("cpu_onproc %p\n", dp->cpu_onproc);
425 	printf("cpu_idlelwp %p\n", dp->cpu_idlelwp);
426 	printf("cpu_lockstat %p\n", dp->cpu_lockstat);
427 	printf("cpu_index %d\n", dp->cpu_index);
428 	printf("cpu_biglock_count %d\n", dp->cpu_biglock_count);
429 	printf("cpu_spin_locks %d\n", dp->cpu_spin_locks);
430 	printf("cpu_simple_locks %d\n", dp->cpu_simple_locks);
431 	printf("cpu_spin_locks2 %d\n", dp->cpu_spin_locks2);
432 	printf("cpu_lkdebug_recurse %d\n", dp->cpu_lkdebug_recurse);
433 	printf("cpu_softints %d\n", dp->cpu_softints);
434 	printf("cpu_nsyscall %"PRIu64"\n", dp->cpu_nsyscall);
435 	printf("cpu_ntrap %"PRIu64"\n", dp->cpu_ntrap);
436 	printf("cpu_nfault %"PRIu64"\n", dp->cpu_nfault);
437 	printf("cpu_nintr %"PRIu64"\n", dp->cpu_nintr);
438 	printf("cpu_nsoft %"PRIu64"\n", dp->cpu_nsoft);
439 	printf("cpu_nswtch %"PRIu64"\n", dp->cpu_nswtch);
440 	printf("cpu_uvm %p\n", dp->cpu_uvm);
441 	printf("cpu_softcpu %p\n", dp->cpu_softcpu);
442 	printf("&cpu_biodone %p\n", &dp->cpu_biodone);		/* TBD */
443 	printf("&cpu_percpu %p\n", &dp->cpu_percpu);		/* TBD */
444 	printf("cpu_selcluster %p\n", dp->cpu_selcluster);
445 	printf("cpu_nch %p\n", dp->cpu_nch);
446 	printf("&cpu_ld_locks %p\n", &dp->cpu_ld_locks);	/* TBD */
447 	printf("&cpu_ld_lock %p\n", &dp->cpu_ld_lock);		/* TBD */
448 	printf("cpu_cc_freq %#"PRIx64"\n", dp->cpu_cc_freq);
449 	printf("cpu_cc_skew %#"PRIx64"\n", dp->cpu_cc_skew);
450 }
451 
452 struct cpu_info *
453 rmixl_cpuinfo_print(u_int cpuindex)
454 {
455 	struct cpu_info * const ci = cpu_lookup(cpuindex);
456 
457 	if (ci != NULL) {
458 		rmixl_cpu_data_print(&ci->ci_data);
459 		printf("ci_dev %p\n", ci->ci_dev);
460 		printf("ci_cpuid %ld\n", ci->ci_cpuid);
461 		printf("ci_cctr_freq %ld\n", ci->ci_cctr_freq);
462 		printf("ci_cpu_freq %ld\n", ci->ci_cpu_freq);
463 		printf("ci_cycles_per_hz %ld\n", ci->ci_cycles_per_hz);
464 		printf("ci_divisor_delay %ld\n", ci->ci_divisor_delay);
465 		printf("ci_divisor_recip %ld\n", ci->ci_divisor_recip);
466 		printf("ci_curlwp %p\n", ci->ci_curlwp);
467 		printf("ci_want_resched %d\n", ci->ci_want_resched);
468 		printf("ci_mtx_count %d\n", ci->ci_mtx_count);
469 		printf("ci_mtx_oldspl %d\n", ci->ci_mtx_oldspl);
470 		printf("ci_idepth %d\n", ci->ci_idepth);
471 		printf("ci_cpl %d\n", ci->ci_cpl);
472 		printf("&ci_cpl %p\n", &ci->ci_cpl);	/* XXX */
473 		printf("ci_next_cp0_clk_intr %#x\n", ci->ci_next_cp0_clk_intr);
474 		for (int i=0; i < SOFTINT_COUNT; i++)
475 			printf("ci_softlwps[%d] %p\n", i, ci->ci_softlwps[i]);
476 		printf("ci_tlb_slot %d\n", ci->ci_tlb_slot);
477 		printf("ci_pmap_asid_cur %d\n", ci->ci_pmap_asid_cur);
478 		printf("ci_tlb_info %p\n", ci->ci_tlb_info);
479 		printf("ci_pmap_seg0tab %p\n", ci->ci_pmap_seg0tab);
480 #ifdef _LP64
481 		printf("ci_pmap_segtab %p\n", ci->ci_pmap_segtab);
482 #else
483 		printf("ci_pmap_srcbase %#"PRIxVADDR"\n", ci->ci_pmap_srcbase);
484 		printf("ci_pmap_dstbase %#"PRIxVADDR"\n", ci->ci_pmap_dstbase);
485 #endif
486 #ifdef MULTIPROCESSOR
487 		printf("ci_flags %#lx\n", ci->ci_flags);
488 		printf("ci_request_ipis %#"PRIx64"\n", ci->ci_request_ipis);
489 		printf("ci_active_ipis %#"PRIx64"\n", ci->ci_active_ipis);
490 		printf("ci_ksp_tlb_slot %d\n", ci->ci_ksp_tlb_slot);
491 #endif
492 	}
493 
494 	return ci;
495 }
496 #endif	/* DEBUG */
497