xref: /netbsd-src/sys/arch/mips/rmi/rmixl_cpucore.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: rmixl_cpucore.c,v 1.5 2011/04/29 22:00:03 matt Exp $	*/
2 
3 /*
4  * Copyright 2002 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Simon Burge for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed for the NetBSD Project by
20  *      Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include "locators.h"
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: rmixl_cpucore.c,v 1.5 2011/04/29 22:00:03 matt Exp $");
42 
43 #include "opt_multiprocessor.h"
44 
45 #include <sys/param.h>
46 #include <sys/device.h>
47 #include <sys/systm.h>
48 #include <sys/cpu.h>
49 
50 #include <uvm/uvm_extern.h>
51 
52 #include <mips/rmi/rmixlvar.h>
53 #include <mips/rmi/rmixl_cpunodevar.h>
54 #include <mips/rmi/rmixl_cpucorevar.h>
55 #include <mips/rmi/rmixl_fmnvar.h>
56 
57 static int	cpucore_rmixl_match(device_t, cfdata_t, void *);
58 static void	cpucore_rmixl_attach(device_t, device_t, void *);
59 static int	cpucore_rmixl_print(void *, const char *);
60 
61 CFATTACH_DECL_NEW(cpucore_rmixl, sizeof(struct cpucore_softc),
62 	cpucore_rmixl_match, cpucore_rmixl_attach, NULL, NULL);
63 
64 static int
65 cpucore_rmixl_match(device_t parent, cfdata_t cf, void *aux)
66 {
67 	struct cpunode_attach_args *na = aux;
68 	int core = cf->cf_loc[CPUNODECF_CORE];
69 
70 	if (!cpu_rmixl(mips_options.mips_cpu))
71 		return 0;
72 
73 	if (strncmp(na->na_name, cf->cf_name, strlen(cf->cf_name)) == 0
74 #ifndef MULTIPROCESSOR
75 	    && na->na_core == 0
76 #endif
77 	    && (core == CPUNODECF_CORE_DEFAULT || core == na->na_core))
78 		return 1;
79 
80 	return 0;
81 }
82 
83 static void
84 cpucore_rmixl_attach(device_t parent, device_t self, void *aux)
85 {
86 	struct cpucore_softc * const sc = device_private(self);
87 	struct cpunode_attach_args *na = aux;
88 	struct cpucore_attach_args ca;
89 	u_int nthreads;
90 	struct rmixl_config *rcp = &rmixl_configuration;
91 
92 	sc->sc_dev = self;
93 	sc->sc_core = na->na_core;
94 	KASSERT(sc->sc_hatched == false);
95 
96 #if 0
97 #ifdef MULTIPROCESSOR
98 	/*
99 	 * Create the TLB structure needed - one per core and core0 uses the
100 	 * default one for the system.
101 	 */
102 	if (sc->sc_core == 0) {
103 		sc->sc_tlbinfo = &pmap_tlb0_info;
104 	} else {
105 		const vaddr_t va = (vaddr_t)&sc->sc_tlbinfo0;
106 		paddr_t pa;
107 
108 		if (! pmap_extract(pmap_kernel(), va, &pa))
109 			panic("%s: pmap_extract fail, va %#"PRIxVADDR, __func__, va);
110 #ifdef _LP64
111 		sc->sc_tlbinfo = (struct pmap_tlb_info *)
112 			MIPS_PHYS_TO_XKPHYS_CACHED(pa);
113 #else
114 		sc->sc_tlbinfo = (struct pmap_tlb_info *)
115 			MIPS_PHYS_TO_KSEG0(pa);
116 #endif
117 		pmap_tlb_info_init(sc->sc_tlbinfo);
118 	}
119 #endif
120 #endif
121 
122 	aprint_normal("\n");
123 	aprint_normal_dev(self, "%lu.%02luMHz (hz cycles = %lu, "
124 	    "delay divisor = %lu)\n",
125 	    curcpu()->ci_cpu_freq / 1000000,
126 	    (curcpu()->ci_cpu_freq % 1000000) / 10000,
127 	    curcpu()->ci_cycles_per_hz, curcpu()->ci_divisor_delay);
128 
129 	aprint_normal("%s: ", device_xname(self));
130 	cpu_identify(self);
131 
132 	nthreads = MIPS_CIDFL_RMI_NTHREADS(mips_options.mips_cpu->cpu_cidflags);
133 	aprint_normal_dev(self, "%d %s on core\n", nthreads,
134 		nthreads == 1 ? "thread" : "threads");
135 
136 	/*
137 	 * Attach CPU (RMI thread contexts) devices
138 	 * according to userapp_cpu_map bitmask.
139 	 */
140 	u_int thread_mask = (1 << nthreads) - 1;
141 	u_int core_shft = sc->sc_core * nthreads;
142 	u_int threads_enb =
143 		(u_int)(rcp->rc_psb_info.userapp_cpu_map >> core_shft) & thread_mask;
144 	u_int threads_dis = (~threads_enb) & thread_mask;
145 
146 	sc->sc_threads_dis = threads_dis;
147 	if (threads_dis != 0) {
148 		aprint_normal_dev(self, "threads");
149 		u_int d = threads_dis;
150 		while (d != 0) {
151 			const u_int t = ffs(d) - 1;
152 			d ^= (1 << t);
153 			aprint_normal(" %d%s", t, (d==0) ? "" : ",");
154 		}
155 		aprint_normal(" offline (disabled by firmware)\n");
156 	}
157 
158 	u_int threads_try_attach = threads_enb;
159 	while (threads_try_attach != 0) {
160 		const u_int t = ffs(threads_try_attach) - 1;
161 		const u_int bit = 1 << t;
162 		threads_try_attach ^= bit;
163 		ca.ca_name = "cpu";
164 		ca.ca_thread = t;
165 		ca.ca_core = sc->sc_core;
166 		if (config_found(self, &ca, cpucore_rmixl_print) == NULL) {
167 			/*
168 			 * thread did not attach, e.g. not configured
169 			 * arrange to have it disabled in THREADEN PCR
170 			 */
171 			threads_enb ^= bit;
172 			threads_dis |= bit;
173 		}
174 	}
175 
176 	sc->sc_threads_enb = threads_enb;
177 	sc->sc_threads_dis = threads_dis;
178 
179 	/*
180 	 * when attaching the core of the primary cpu,
181 	 * do the post-running initialization here
182 	 */
183 	if (sc->sc_core == RMIXL_CPU_CORE((curcpu()->ci_cpuid)))
184 		cpucore_rmixl_run(self);
185 }
186 
187 static int
188 cpucore_rmixl_print(void *aux, const char *pnp)
189 {
190 	struct cpucore_attach_args *ca = aux;
191 
192 	if (pnp != NULL)
193 		aprint_normal("%s:", pnp);
194 	aprint_normal(" thread %d", ca->ca_thread);
195 
196 	return (UNCONF);
197 }
198 
199 /*
200  * cpucore_rmixl_run
201  *	called from cpucore_rmixl_attach for primary core
202  *	and called from cpu_rmixl_run for each hatched cpu
203  *	the first call for each cpucore causes init of per-core features:
204  *	- disable unused threads
205  *	- set Fine-grained (Round Robin) thread scheduling mode
206  */
207 void
208 cpucore_rmixl_run(device_t self)
209 {
210 	struct cpucore_softc * const sc = device_private(self);
211 
212 	if (sc->sc_running == false) {
213 		sc->sc_running = true;
214 		rmixl_mtcr(RMIXL_PCR_THREADEN, sc->sc_threads_enb);
215 		rmixl_mtcr(RMIXL_PCR_SCHEDULING, 0);
216 	}
217 }
218 
219 #ifdef MULTIPROCESSOR
220 /*
221  * cpucore_rmixl_hatch
222  *	called from cpu_rmixl_hatch for each cpu
223  *	the first call for each cpucore causes init of per-core features
224  */
225 void
226 cpucore_rmixl_hatch(device_t self)
227 {
228 	struct cpucore_softc * const sc = device_private(self);
229 
230 	if (sc->sc_hatched == false) {
231 		/* PCRs for core#0 are set up in mach_init() */
232 		if (sc->sc_core != 0)
233 			rmixl_pcr_init_core();
234 		rmixl_fmn_init_core();
235 		sc->sc_hatched = true;
236 	}
237 }
238 #endif	/* MULTIPROCESSOR */
239