1 /* $NetBSD: rmixl_cpu.c,v 1.15 2022/09/29 07:00:47 skrll Exp $ */
2
3 /*
4 * Copyright 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Simon Burge for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include "locators.h"
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: rmixl_cpu.c,v 1.15 2022/09/29 07:00:47 skrll Exp $");
42
43 #include "opt_multiprocessor.h"
44 #include "opt_ddb.h"
45
46 #include <sys/param.h>
47 #include <sys/device.h>
48 #include <sys/systm.h>
49 #include <sys/cpu.h>
50 #include <sys/lock.h>
51 #include <sys/lwp.h>
52 #include <sys/cpu.h>
53 #include <uvm/uvm_pglist.h>
54 #include <uvm/uvm_extern.h>
55 #include <mips/regnum.h>
56 #include <mips/pmap.h>
57 #include <mips/rmi/rmixlreg.h>
58 #include <mips/rmi/rmixlvar.h>
59 #include <mips/rmi/rmixl_cpucorevar.h>
60 #include <mips/rmi/rmixl_cpuvar.h>
61 #include <mips/rmi/rmixl_intr.h>
62 #include <mips/rmi/rmixl_fmnvar.h>
63 #ifdef DDB
64 #include <mips/db_machdep.h>
65 #endif
66
67 #include <mips/asm.h> /* XXX CALLFRAME_SIZ */
68
69 static int cpu_rmixl_match(device_t, cfdata_t, void *);
70 static void cpu_rmixl_attach(device_t, device_t, void *);
71 static void cpu_rmixl_attach_primary(struct rmixl_cpu_softc * const);
72 #ifdef NOTYET
73 static int cpu_fmn_intr(void *, rmixl_fmn_rxmsg_t *);
74 #endif
75
76 #ifdef MULTIPROCESSOR
77 void cpu_rmixl_hatch(struct cpu_info *);
78 void cpu_rmixl_run(struct cpu_info *);
79 static int cpu_setup_trampoline_common(struct cpu_info *, struct rmixl_cpu_trampoline_args *);
80 static void cpu_setup_trampoline_callback(struct cpu_info *);
81 #endif /* MULTIPROCESSOR */
82
83 #ifdef DEBUG
84 void rmixl_cpu_data_print(struct cpu_data *);
85 struct cpu_info *
86 rmixl_cpuinfo_print(u_int);
87 #endif /* DEBUG */
88
89 CFATTACH_DECL_NEW(cpu_rmixl, sizeof(struct rmixl_cpu_softc),
90 cpu_rmixl_match, cpu_rmixl_attach, NULL, NULL);
91
92 #ifdef MULTIPROCESSOR
93 static struct rmixl_cpu_trampoline_args rmixl_cpu_trampoline_args;
94 #endif
95
96 /*
97 * cpu_rmixl_watchpoint_init - initialize COP0 watchpoint stuff
98 *
99 * clear IEU_DEFEATURE[DBE] to ensure T_WATCH on watchpoint exception
100 * set COP0 watchhi and watchlo
101 *
102 * disable all watchpoints
103 */
104 static void
cpu_rmixl_watchpoint_init(void)105 cpu_rmixl_watchpoint_init(void)
106 {
107 uint32_t r;
108
109 r = rmixl_mfcr(RMIXL_PCR_IEU_DEFEATURE);
110 r &= ~__BIT(7); /* DBE */
111 rmixl_mtcr(RMIXL_PCR_IEU_DEFEATURE, r);
112
113 cpuwatch_clr_all();
114 }
115
116 /*
117 * cpu_xls616_erratum
118 *
119 * on the XLS616, COUNT/COMPARE clock regs seem to interact between
120 * threads on a core
121 *
122 * the symptom of the error is retarded clock interrupts
123 * and very slow apparent system performance
124 *
125 * other XLS chips may have the same problem.
126 * we may need to add other PID checks.
127 */
128 static inline bool
cpu_xls616_erratum(device_t parent,struct cpucore_attach_args * ca)129 cpu_xls616_erratum(device_t parent, struct cpucore_attach_args *ca)
130 {
131 #if 0
132 if (mips_options.mips_cpu->cpu_pid == MIPS_XLS616) {
133 if (ca->ca_thread > 0) {
134 aprint_error_dev(parent, "XLS616 CLOCK ERRATUM: "
135 "deconfigure cpu%d\n", ca->ca_thread);
136 return true;
137 }
138 }
139 #endif
140 return false;
141 }
142
143 static bool
cpu_rmixl_erratum(device_t parent,struct cpucore_attach_args * ca)144 cpu_rmixl_erratum(device_t parent, struct cpucore_attach_args *ca)
145 {
146 return cpu_xls616_erratum(parent, ca);
147 }
148
149 static int
cpu_rmixl_match(device_t parent,cfdata_t cf,void * aux)150 cpu_rmixl_match(device_t parent, cfdata_t cf, void *aux)
151 {
152 struct cpucore_attach_args *ca = aux;
153 int thread = cf->cf_loc[CPUCORECF_THREAD];
154
155 if (!cpu_rmixl(mips_options.mips_cpu))
156 return 0;
157
158 if (strncmp(ca->ca_name, cf->cf_name, strlen(cf->cf_name)) == 0
159 #ifndef MULTIPROCESSOR
160 && ca->ca_thread == 0
161 #endif
162 && (thread == CPUCORECF_THREAD_DEFAULT || thread == ca->ca_thread)
163 && (!cpu_rmixl_erratum(parent, ca)))
164 return 1;
165
166 return 0;
167 }
168
169 static void
cpu_rmixl_attach(device_t parent,device_t self,void * aux)170 cpu_rmixl_attach(device_t parent, device_t self, void *aux)
171 {
172 struct rmixl_cpu_softc * const sc = device_private(self);
173 struct cpu_info *ci = NULL;
174 static bool once = false;
175 extern void rmixl_spl_init_cpu(void);
176
177 if (once == false) {
178 /* first attach is the primary cpu */
179 once = true;
180 ci = curcpu();
181 sc->sc_dev = self;
182 sc->sc_ci = ci;
183 ci->ci_softc = (void *)sc;
184
185 rmixl_spl_init_cpu(); /* spl initialization for CPU#0 */
186 cpu_rmixl_attach_primary(sc);
187
188 #ifdef MULTIPROCESSOR
189 mips_locoresw.lsw_cpu_init = cpu_rmixl_hatch;
190 mips_locoresw.lsw_cpu_run = cpu_rmixl_run;
191 } else {
192 struct cpucore_attach_args * const ca = aux;
193 struct cpucore_softc * const ccsc = device_private(parent);
194 rmixlfw_psb_type_t psb_type = rmixl_configuration.rc_psb_type;
195 cpuid_t cpuid;
196
197 KASSERT(ca->ca_core < 8);
198 KASSERT(ca->ca_thread < 4);
199 cpuid = (ca->ca_core << 2) | ca->ca_thread;
200 ci = cpu_info_alloc(ccsc->sc_tlbinfo, cpuid,
201 /* XXX */ 0, ca->ca_core, ca->ca_thread);
202 KASSERT(ci != NULL);
203 if (ccsc->sc_tlbinfo == NULL)
204 ccsc->sc_tlbinfo = ci->ci_tlb_info;
205 sc->sc_dev = self;
206 sc->sc_ci = ci;
207 ci->ci_softc = (void *)sc;
208
209 switch (psb_type) {
210 case PSB_TYPE_RMI:
211 case PSB_TYPE_DELL:
212 cpu_setup_trampoline_callback(ci);
213 break;
214 default:
215 aprint_error(": psb type=%s cpu_wakeup unsupported\n",
216 rmixlfw_psb_type_name(psb_type));
217 return;
218 }
219
220 for (size_t i=0; i < 10000; i++) {
221 if (!kcpuset_isset(cpus_hatched, cpu_index(ci)))
222 break;
223 DELAY(100);
224 }
225 if (!kcpuset_isset(cpus_hatched, cpu_index(ci))) {
226 aprint_error(": failed to hatch\n");
227 return;
228 }
229 #endif /* MULTIPROCESSOR */
230 }
231
232 /*
233 * do per-cpu interrupt initialization
234 */
235 rmixl_intr_init_cpu(ci);
236
237 aprint_normal("\n");
238
239 cpu_attach_common(self, ci);
240 }
241
242 /*
243 * attach the primary processor
244 */
245 static void
cpu_rmixl_attach_primary(struct rmixl_cpu_softc * const sc)246 cpu_rmixl_attach_primary(struct rmixl_cpu_softc * const sc)
247 {
248 struct cpu_info *ci = sc->sc_ci;
249 uint32_t ebase;
250
251 KASSERT(CPU_IS_PRIMARY(ci));
252
253 /*
254 * obtain and set cpuid of the primary processor
255 */
256 asm volatile("dmfc0 %0, $15, 1;" : "=r"(ebase));
257 ci->ci_cpuid = ebase & __BITS(9,0);
258
259 cpu_rmixl_watchpoint_init();
260
261 rmixl_fmn_init();
262
263 rmixl_intr_init_clk();
264 #ifdef MULTIPROCESSOR
265 rmixl_intr_init_ipi();
266 #endif
267
268 #ifdef NOTYET
269 void *ih = rmixl_fmn_intr_establish(RMIXL_FMN_STID_CORE0,
270 cpu_fmn_intr, ci);
271 if (ih == NULL)
272 panic("%s: rmixl_fmn_intr_establish failed",
273 __func__);
274 sc->sc_ih_fmn = ih;
275 #endif
276 }
277
278 #ifdef NOTYET
279 static int
cpu_fmn_intr(void * arg,rmixl_fmn_rxmsg_t * rxmsg)280 cpu_fmn_intr(void *arg, rmixl_fmn_rxmsg_t *rxmsg)
281 {
282 if (CPU_IS_PRIMARY(curcpu())) {
283 printf("%s: cpu%ld: rxsid=%#x, code=%d, size=%d\n",
284 __func__, cpu_number(),
285 rxmsg->rxsid, rxmsg->code, rxmsg->size);
286 for (int i=0; i < rxmsg->size; i++)
287 printf("\t%#"PRIx64"\n", rxmsg->msg.data[i]);
288 }
289
290 return 1;
291 }
292 #endif
293
294 #ifdef MULTIPROCESSOR
295 /*
296 * cpu_rmixl_run
297 *
298 * - chip-specific post-running code called from cpu_hatch via lsw_cpu_run
299 */
300 void
cpu_rmixl_run(struct cpu_info * ci)301 cpu_rmixl_run(struct cpu_info *ci)
302 {
303 struct rmixl_cpu_softc * const sc = (void *)ci->ci_softc;
304 cpucore_rmixl_run(device_parent(sc->sc_dev));
305 }
306
307 /*
308 * cpu_rmixl_hatch
309 *
310 * - chip-specific hatch code called from cpu_hatch via lsw_cpu_init
311 */
312 void
cpu_rmixl_hatch(struct cpu_info * ci)313 cpu_rmixl_hatch(struct cpu_info *ci)
314 {
315 struct rmixl_cpu_softc * const sc = (void *)ci->ci_softc;
316 extern void rmixl_spl_init_cpu(void);
317
318 rmixl_spl_init_cpu(); /* spl initialization for this CPU */
319
320 (void)splhigh();
321
322 #ifdef DIAGNOSTIC
323 uint32_t ebase = mipsNN_cp0_ebase_read();
324 KASSERT((ebase & MIPS_EBASE_CPUNUM) == ci->ci_cpuid);
325 KASSERT(curcpu() == ci);
326 #endif
327
328 cpucore_rmixl_hatch(device_parent(sc->sc_dev));
329
330 cpu_rmixl_watchpoint_init();
331 }
332
333 static int
cpu_setup_trampoline_common(struct cpu_info * ci,struct rmixl_cpu_trampoline_args * ta)334 cpu_setup_trampoline_common(struct cpu_info *ci, struct rmixl_cpu_trampoline_args *ta)
335 {
336 struct lwp *l = ci->ci_data.cpu_idlelwp;
337 uintptr_t stacktop;
338
339 #ifdef DIAGNOSTIC
340 /* Ensure our current stack can be used by the firmware */
341 uint64_t sp;
342 __asm__ volatile("move %0, $sp\n" : "=r"(sp));
343 #ifdef _LP64
344 /* can be made into a KSEG0 addr */
345 KASSERT(MIPS_XKPHYS_P(sp));
346 KASSERT((MIPS_XKPHYS_TO_PHYS(sp) >> 32) == 0);
347 #else
348 /* is a KSEG0 addr */
349 KASSERT(MIPS_KSEG0_P(sp));
350 #endif /* _LP64 */
351 #endif /* DIAGNOSTIC */
352
353 #ifndef _LP64
354 /*
355 * Ensure 'ci' is a KSEG0 address for trampoline args
356 * to avoid TLB fault in cpu_trampoline() when loading ci_idlelwp
357 */
358 KASSERT(MIPS_KSEG0_P(ci));
359 #endif
360
361 /*
362 * Ensure 'ta' is a KSEG0 address for trampoline args
363 * to avoid TLB fault in trampoline when loading args.
364 *
365 * Note:
366 * RMI firmware only passes the lower 32-bit half of 'ta'
367 * to rmixl_cpu_trampoline (the upper half is clear)
368 * so rmixl_cpu_trampoline must reconstruct the missing upper half
369 * rmixl_cpu_trampoline "knows" 'ta' is a KSEG0 address
370 * and sign-extends to make an LP64 KSEG0 address.
371 */
372 KASSERT(MIPS_KSEG0_P(ta));
373
374 /*
375 * marshal args for rmixl_cpu_trampoline;
376 * note for non-LP64 kernel, use of intptr_t
377 * forces sign extension of 32 bit pointers
378 */
379 stacktop = (uintptr_t)l->l_md.md_utf - CALLFRAME_SIZ;
380 ta->ta_sp = (uint64_t)(intptr_t)stacktop;
381 ta->ta_lwp = (uint64_t)(intptr_t)l;
382 ta->ta_cpuinfo = (uint64_t)(intptr_t)ci;
383
384 return 0;
385 }
386
387 static void
cpu_setup_trampoline_callback(struct cpu_info * ci)388 cpu_setup_trampoline_callback(struct cpu_info *ci)
389 {
390 void (*wakeup_cpu)(void *, void *, unsigned int);
391 struct rmixl_cpu_trampoline_args *ta = &rmixl_cpu_trampoline_args;
392 extern void rmixl_cpu_trampoline(void *);
393 extern void rmixlfw_wakeup_cpu(void *, void *, u_int64_t, void *);
394
395 cpu_setup_trampoline_common(ci, ta);
396
397 #if _LP64
398 wakeup_cpu = (void *)rmixl_configuration.rc_psb_info.wakeup;
399 #else
400 wakeup_cpu = (void *)(intptr_t)
401 (rmixl_configuration.rc_psb_info.wakeup & 0xffffffff);
402 #endif
403
404 rmixlfw_wakeup_cpu(rmixl_cpu_trampoline, (void *)ta,
405 (uint64_t)1 << ci->ci_cpuid, wakeup_cpu);
406 }
407 #endif /* MULTIPROCESSOR */
408
409
410 #ifdef DEBUG
411 void
rmixl_cpu_data_print(struct cpu_data * dp)412 rmixl_cpu_data_print(struct cpu_data *dp)
413 {
414 printf("cpu_biglock_wanted %p\n", dp->cpu_biglock_wanted);
415 printf("cpu_callout %p\n", dp->cpu_callout);
416 printf("&cpu_schedstate %p\n", &dp->cpu_schedstate); /* TBD */
417 printf("&cpu_xcall %p\n", &dp->cpu_xcall); /* TBD */
418 printf("cpu_xcall_pending %d\n", dp->cpu_xcall_pending);
419 printf("cpu_idlelwp %p\n", dp->cpu_idlelwp);
420 printf("cpu_lockstat %p\n", dp->cpu_lockstat);
421 printf("cpu_index %d\n", dp->cpu_index);
422 printf("cpu_biglock_count %d\n", dp->cpu_biglock_count);
423 printf("cpu_psz_read_depth %d\n", dp->cpu_psz_read_depth);
424 printf("cpu_lkdebug_recurse %d\n", dp->cpu_lkdebug_recurse);
425 printf("cpu_softints %d\n", dp->cpu_softints);
426 printf("cpu_nsyscall %"PRIu64"\n", dp->cpu_nsyscall);
427 printf("cpu_ntrap %"PRIu64"\n", dp->cpu_ntrap);
428 printf("cpu_nfault %"PRIu64"\n", dp->cpu_nfault);
429 printf("cpu_nintr %"PRIu64"\n", dp->cpu_nintr);
430 printf("cpu_nsoft %"PRIu64"\n", dp->cpu_nsoft);
431 printf("cpu_nswtch %"PRIu64"\n", dp->cpu_nswtch);
432 printf("cpu_uvm %p\n", dp->cpu_uvm);
433 printf("cpu_softcpu %p\n", dp->cpu_softcpu);
434 printf("&cpu_biodone %p\n", &dp->cpu_biodone); /* TBD */
435 printf("&cpu_percpu %p\n", &dp->cpu_percpu); /* TBD */
436 printf("cpu_selcluster %p\n", dp->cpu_selcluster);
437 printf("cpu_nch %p\n", dp->cpu_nch);
438 printf("&cpu_ld_locks %p\n", &dp->cpu_ld_locks); /* TBD */
439 printf("&cpu_ld_lock %p\n", &dp->cpu_ld_lock); /* TBD */
440 printf("cpu_cc_freq %#"PRIx64"\n", dp->cpu_cc_freq);
441 printf("cpu_cc_skew %#"PRIx64"\n", dp->cpu_cc_skew);
442 }
443
444 struct cpu_info *
rmixl_cpuinfo_print(u_int cpuindex)445 rmixl_cpuinfo_print(u_int cpuindex)
446 {
447 struct cpu_info * const ci = cpu_lookup(cpuindex);
448
449 if (ci != NULL) {
450 rmixl_cpu_data_print(&ci->ci_data);
451 printf("ci_dev %p\n", ci->ci_dev);
452 printf("ci_cpuid %ld\n", ci->ci_cpuid);
453 printf("ci_cctr_freq %ld\n", ci->ci_cctr_freq);
454 printf("ci_cpu_freq %ld\n", ci->ci_cpu_freq);
455 printf("ci_cycles_per_hz %ld\n", ci->ci_cycles_per_hz);
456 printf("ci_divisor_delay %ld\n", ci->ci_divisor_delay);
457 printf("ci_divisor_recip %ld\n", ci->ci_divisor_recip);
458 printf("ci_curlwp %p\n", ci->ci_curlwp);
459 printf("ci_onproc %p\n", dp->ci_onproc);
460 printf("ci_want_resched %d\n", ci->ci_want_resched);
461 printf("ci_mtx_count %d\n", ci->ci_mtx_count);
462 printf("ci_mtx_oldspl %d\n", ci->ci_mtx_oldspl);
463 printf("ci_idepth %d\n", ci->ci_idepth);
464 printf("ci_cpl %d\n", ci->ci_cpl);
465 printf("&ci_cpl %p\n", &ci->ci_cpl); /* XXX */
466 printf("ci_next_cp0_clk_intr %#x\n", ci->ci_next_cp0_clk_intr);
467 for (int i=0; i < SOFTINT_COUNT; i++)
468 printf("ci_softlwps[%d] %p\n", i, ci->ci_softlwps[i]);
469 printf("ci_tlb_slot %d\n", ci->ci_tlb_slot);
470 printf("ci_pmap_asid_cur %d\n", ci->ci_pmap_asid_cur);
471 printf("ci_tlb_info %p\n", ci->ci_tlb_info);
472 printf("ci_pmap_kern_segtab %p\n", ci->ci_pmap_kern_segtab);
473 printf("ci_pmap_user_segtab %p\n", ci->ci_pmap_user_segtab);
474 #ifdef _LP64
475 printf("ci_pmap_kern_seg0tab %p\n", ci->ci_pmap_kern_seg0tab);
476 printf("ci_pmap_user_seg0tab %p\n", ci->ci_pmap_user_seg0tab);
477 #else
478 printf("ci_pmap_srcbase %#"PRIxVADDR"\n", ci->ci_pmap_srcbase);
479 printf("ci_pmap_dstbase %#"PRIxVADDR"\n", ci->ci_pmap_dstbase);
480 #endif
481 #ifdef MULTIPROCESSOR
482 printf("ci_flags %#lx\n", ci->ci_flags);
483 printf("ci_request_ipis %#"PRIx64"\n", ci->ci_request_ipis);
484 printf("ci_active_ipis %#"PRIx64"\n", ci->ci_active_ipis);
485 printf("ci_ksp_tlb_slot %d\n", ci->ci_ksp_tlb_slot);
486 #endif
487 }
488
489 return ci;
490 }
491 #endif /* DEBUG */
492