1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24 /*
25 * Copyright (c) 2010, Intel Corporation.
26 * All rights reserved.
27 */
28
29 #include <sys/types.h>
30 #include <sys/sysmacros.h>
31 #include <sys/disp.h>
32 #include <sys/promif.h>
33 #include <sys/clock.h>
34 #include <sys/cpuvar.h>
35 #include <sys/stack.h>
36 #include <vm/as.h>
37 #include <vm/hat.h>
38 #include <sys/reboot.h>
39 #include <sys/avintr.h>
40 #include <sys/vtrace.h>
41 #include <sys/proc.h>
42 #include <sys/thread.h>
43 #include <sys/cpupart.h>
44 #include <sys/pset.h>
45 #include <sys/copyops.h>
46 #include <sys/pg.h>
47 #include <sys/disp.h>
48 #include <sys/debug.h>
49 #include <sys/sunddi.h>
50 #include <sys/x86_archext.h>
51 #include <sys/privregs.h>
52 #include <sys/machsystm.h>
53 #include <sys/ontrap.h>
54 #include <sys/bootconf.h>
55 #include <sys/boot_console.h>
56 #include <sys/kdi_machimpl.h>
57 #include <sys/archsystm.h>
58 #include <sys/promif.h>
59 #include <sys/pci_cfgspace.h>
60 #ifdef __xpv
61 #include <sys/hypervisor.h>
62 #else
63 #include <sys/xpv_support.h>
64 #endif
65
66 /*
67 * some globals for patching the result of cpuid
68 * to solve problems w/ creative cpu vendors
69 */
70
71 extern uint32_t cpuid_feature_ecx_include;
72 extern uint32_t cpuid_feature_ecx_exclude;
73 extern uint32_t cpuid_feature_edx_include;
74 extern uint32_t cpuid_feature_edx_exclude;
75
76 /*
77 * Dummy spl priority masks
78 */
79 static unsigned char dummy_cpu_pri[MAXIPL + 1] = {
80 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf,
81 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf
82 };
83
84 /*
85 * Set console mode
86 */
87 static void
set_console_mode(uint8_t val)88 set_console_mode(uint8_t val)
89 {
90 struct bop_regs rp = {0};
91
92 rp.eax.byte.ah = 0x0;
93 rp.eax.byte.al = val;
94 rp.ebx.word.bx = 0x0;
95
96 BOP_DOINT(bootops, 0x10, &rp);
97 }
98
99
100 /*
101 * Setup routine called right before main(). Interposing this function
102 * before main() allows us to call it in a machine-independent fashion.
103 */
104 void
mlsetup(struct regs * rp)105 mlsetup(struct regs *rp)
106 {
107 u_longlong_t prop_value;
108 extern struct classfuncs sys_classfuncs;
109 extern disp_t cpu0_disp;
110 extern char t0stack[];
111 extern int post_fastreboot;
112 extern int console;
113 extern uint64_t plat_dr_options;
114
115 ASSERT_STACK_ALIGNED();
116
117 /*
118 * initialize cpu_self
119 */
120 cpu[0]->cpu_self = cpu[0];
121
122 #if defined(__xpv)
123 /*
124 * Point at the hypervisor's virtual cpu structure
125 */
126 cpu[0]->cpu_m.mcpu_vcpu_info = &HYPERVISOR_shared_info->vcpu_info[0];
127 #endif
128
129 /*
130 * Set up dummy cpu_pri_data values till psm spl code is
131 * installed. This allows splx() to work on amd64.
132 */
133
134 cpu[0]->cpu_pri_data = dummy_cpu_pri;
135
136 /*
137 * check if we've got special bits to clear or set
138 * when checking cpu features
139 */
140
141 if (bootprop_getval("cpuid_feature_ecx_include", &prop_value) != 0)
142 cpuid_feature_ecx_include = 0;
143 else
144 cpuid_feature_ecx_include = (uint32_t)prop_value;
145
146 if (bootprop_getval("cpuid_feature_ecx_exclude", &prop_value) != 0)
147 cpuid_feature_ecx_exclude = 0;
148 else
149 cpuid_feature_ecx_exclude = (uint32_t)prop_value;
150
151 if (bootprop_getval("cpuid_feature_edx_include", &prop_value) != 0)
152 cpuid_feature_edx_include = 0;
153 else
154 cpuid_feature_edx_include = (uint32_t)prop_value;
155
156 if (bootprop_getval("cpuid_feature_edx_exclude", &prop_value) != 0)
157 cpuid_feature_edx_exclude = 0;
158 else
159 cpuid_feature_edx_exclude = (uint32_t)prop_value;
160
161 /*
162 * Initialize idt0, gdt0, ldt0_default, ktss0 and dftss.
163 */
164 init_desctbls();
165
166 /*
167 * lgrp_init() and possibly cpuid_pass1() need PCI config
168 * space access
169 */
170 #if defined(__xpv)
171 if (DOMAIN_IS_INITDOMAIN(xen_info))
172 pci_cfgspace_init();
173 #else
174 pci_cfgspace_init();
175 #endif
176
177 /*
178 * The first lightweight pass (pass0) through the cpuid data
179 * was done in locore before mlsetup was called. Do the next
180 * pass in C code.
181 *
182 * The x86_featureset is initialized here based on the capabilities
183 * of the boot CPU. Note that if we choose to support CPUs that have
184 * different feature sets (at which point we would almost certainly
185 * want to set the feature bits to correspond to the feature
186 * minimum) this value may be altered.
187 */
188 cpuid_pass1(cpu[0], x86_featureset);
189
190 #if !defined(__xpv)
191
192 if (get_hwenv() == HW_XEN_HVM)
193 xen_hvm_init();
194
195 /*
196 * Patch the tsc_read routine with appropriate set of instructions,
197 * depending on the processor family and architecure, to read the
198 * time-stamp counter while ensuring no out-of-order execution.
199 * Patch it while the kernel text is still writable.
200 *
201 * Note: tsc_read is not patched for intel processors whose family
202 * is >6 and for amd whose family >f (in case they don't support rdtscp
203 * instruction, unlikely). By default tsc_read will use cpuid for
204 * serialization in such cases. The following code needs to be
205 * revisited if intel processors of family >= f retains the
206 * instruction serialization nature of mfence instruction.
207 * Note: tsc_read is not patched for x86 processors which do
208 * not support "mfence". By default tsc_read will use cpuid for
209 * serialization in such cases.
210 *
211 * The Xen hypervisor does not correctly report whether rdtscp is
212 * supported or not, so we must assume that it is not.
213 */
214 if (get_hwenv() != HW_XEN_HVM &&
215 is_x86_feature(x86_featureset, X86FSET_TSCP))
216 patch_tsc_read(X86_HAVE_TSCP);
217 else if (cpuid_getvendor(CPU) == X86_VENDOR_AMD &&
218 cpuid_getfamily(CPU) <= 0xf &&
219 is_x86_feature(x86_featureset, X86FSET_SSE2))
220 patch_tsc_read(X86_TSC_MFENCE);
221 else if (cpuid_getvendor(CPU) == X86_VENDOR_Intel &&
222 cpuid_getfamily(CPU) <= 6 &&
223 is_x86_feature(x86_featureset, X86FSET_SSE2))
224 patch_tsc_read(X86_TSC_LFENCE);
225
226 #endif /* !__xpv */
227
228 #if defined(__i386) && !defined(__xpv)
229 /*
230 * Some i386 processors do not implement the rdtsc instruction,
231 * or at least they do not implement it correctly. Patch them to
232 * return 0.
233 */
234 if (!is_x86_feature(x86_featureset, X86FSET_TSC))
235 patch_tsc_read(X86_NO_TSC);
236 #endif /* __i386 && !__xpv */
237
238 #if defined(__amd64) && !defined(__xpv)
239 patch_memops(cpuid_getvendor(CPU));
240 #endif /* __amd64 && !__xpv */
241
242 #if !defined(__xpv)
243 /* XXPV what, if anything, should be dorked with here under xen? */
244
245 /*
246 * While we're thinking about the TSC, let's set up %cr4 so that
247 * userland can issue rdtsc, and initialize the TSC_AUX value
248 * (the cpuid) for the rdtscp instruction on appropriately
249 * capable hardware.
250 */
251 if (is_x86_feature(x86_featureset, X86FSET_TSC))
252 setcr4(getcr4() & ~CR4_TSD);
253
254 if (is_x86_feature(x86_featureset, X86FSET_TSCP))
255 (void) wrmsr(MSR_AMD_TSCAUX, 0);
256
257 if (is_x86_feature(x86_featureset, X86FSET_DE))
258 setcr4(getcr4() | CR4_DE);
259 #endif /* __xpv */
260
261 /*
262 * initialize t0
263 */
264 t0.t_stk = (caddr_t)rp - MINFRAME;
265 t0.t_stkbase = t0stack;
266 t0.t_pri = maxclsyspri - 3;
267 t0.t_schedflag = TS_LOAD | TS_DONT_SWAP;
268 t0.t_procp = &p0;
269 t0.t_plockp = &p0lock.pl_lock;
270 t0.t_lwp = &lwp0;
271 t0.t_forw = &t0;
272 t0.t_back = &t0;
273 t0.t_next = &t0;
274 t0.t_prev = &t0;
275 t0.t_cpu = cpu[0];
276 t0.t_disp_queue = &cpu0_disp;
277 t0.t_bind_cpu = PBIND_NONE;
278 t0.t_bind_pset = PS_NONE;
279 t0.t_bindflag = (uchar_t)default_binding_mode;
280 t0.t_cpupart = &cp_default;
281 t0.t_clfuncs = &sys_classfuncs.thread;
282 t0.t_copyops = NULL;
283 THREAD_ONPROC(&t0, CPU);
284
285 lwp0.lwp_thread = &t0;
286 lwp0.lwp_regs = (void *)rp;
287 lwp0.lwp_procp = &p0;
288 t0.t_tid = p0.p_lwpcnt = p0.p_lwprcnt = p0.p_lwpid = 1;
289
290 p0.p_exec = NULL;
291 p0.p_stat = SRUN;
292 p0.p_flag = SSYS;
293 p0.p_tlist = &t0;
294 p0.p_stksize = 2*PAGESIZE;
295 p0.p_stkpageszc = 0;
296 p0.p_as = &kas;
297 p0.p_lockp = &p0lock;
298 p0.p_brkpageszc = 0;
299 p0.p_t1_lgrpid = LGRP_NONE;
300 p0.p_tr_lgrpid = LGRP_NONE;
301 sigorset(&p0.p_ignore, &ignoredefault);
302
303 CPU->cpu_thread = &t0;
304 bzero(&cpu0_disp, sizeof (disp_t));
305 CPU->cpu_disp = &cpu0_disp;
306 CPU->cpu_disp->disp_cpu = CPU;
307 CPU->cpu_dispthread = &t0;
308 CPU->cpu_idle_thread = &t0;
309 CPU->cpu_flags = CPU_READY | CPU_RUNNING | CPU_EXISTS | CPU_ENABLE;
310 CPU->cpu_dispatch_pri = t0.t_pri;
311
312 CPU->cpu_id = 0;
313
314 CPU->cpu_pri = 12; /* initial PIL for the boot CPU */
315
316 /*
317 * The kernel doesn't use LDTs unless a process explicitly requests one.
318 */
319 p0.p_ldt_desc = null_sdesc;
320
321 /*
322 * Initialize thread/cpu microstate accounting
323 */
324 init_mstate(&t0, LMS_SYSTEM);
325 init_cpu_mstate(CPU, CMS_SYSTEM);
326
327 /*
328 * Initialize lists of available and active CPUs.
329 */
330 cpu_list_init(CPU);
331
332 pg_cpu_bootstrap(CPU);
333
334 /*
335 * Now that we have taken over the GDT, IDT and have initialized
336 * active CPU list it's time to inform kmdb if present.
337 */
338 if (boothowto & RB_DEBUG)
339 kdi_idt_sync();
340
341 /*
342 * Explicitly set console to text mode (0x3) if this is a boot
343 * post Fast Reboot, and the console is set to CONS_SCREEN_TEXT.
344 */
345 if (post_fastreboot && console == CONS_SCREEN_TEXT)
346 set_console_mode(0x3);
347
348 /*
349 * If requested (boot -d) drop into kmdb.
350 *
351 * This must be done after cpu_list_init() on the 64-bit kernel
352 * since taking a trap requires that we re-compute gsbase based
353 * on the cpu list.
354 */
355 if (boothowto & RB_DEBUGENTER)
356 kmdb_enter();
357
358 cpu_vm_data_init(CPU);
359
360 rp->r_fp = 0; /* terminate kernel stack traces! */
361
362 prom_init("kernel", (void *)NULL);
363
364 /* User-set option overrides firmware value. */
365 if (bootprop_getval(PLAT_DR_OPTIONS_NAME, &prop_value) == 0) {
366 plat_dr_options = (uint64_t)prop_value;
367 }
368 #if defined(__xpv)
369 /* No support of DR operations on xpv */
370 plat_dr_options = 0;
371 #else /* __xpv */
372 /* Flag PLAT_DR_FEATURE_ENABLED should only be set by DR driver. */
373 plat_dr_options &= ~PLAT_DR_FEATURE_ENABLED;
374 #ifndef __amd64
375 /* Only enable CPU/memory DR on 64 bits kernel. */
376 plat_dr_options &= ~PLAT_DR_FEATURE_MEMORY;
377 plat_dr_options &= ~PLAT_DR_FEATURE_CPU;
378 #endif /* __amd64 */
379 #endif /* __xpv */
380
381 /*
382 * Get value of "plat_dr_physmax" boot option.
383 * It overrides values calculated from MSCT or SRAT table.
384 */
385 if (bootprop_getval(PLAT_DR_PHYSMAX_NAME, &prop_value) == 0) {
386 plat_dr_physmax = ((uint64_t)prop_value) >> PAGESHIFT;
387 }
388
389 /* Get value of boot_ncpus. */
390 if (bootprop_getval(BOOT_NCPUS_NAME, &prop_value) != 0) {
391 boot_ncpus = NCPU;
392 } else {
393 boot_ncpus = (int)prop_value;
394 if (boot_ncpus <= 0 || boot_ncpus > NCPU)
395 boot_ncpus = NCPU;
396 }
397
398 /*
399 * Set max_ncpus and boot_max_ncpus to boot_ncpus if platform doesn't
400 * support CPU DR operations.
401 */
402 if (plat_dr_support_cpu() == 0) {
403 max_ncpus = boot_max_ncpus = boot_ncpus;
404 } else {
405 if (bootprop_getval(PLAT_MAX_NCPUS_NAME, &prop_value) != 0) {
406 max_ncpus = NCPU;
407 } else {
408 max_ncpus = (int)prop_value;
409 if (max_ncpus <= 0 || max_ncpus > NCPU) {
410 max_ncpus = NCPU;
411 }
412 if (boot_ncpus > max_ncpus) {
413 boot_ncpus = max_ncpus;
414 }
415 }
416
417 if (bootprop_getval(BOOT_MAX_NCPUS_NAME, &prop_value) != 0) {
418 boot_max_ncpus = boot_ncpus;
419 } else {
420 boot_max_ncpus = (int)prop_value;
421 if (boot_max_ncpus <= 0 || boot_max_ncpus > NCPU) {
422 boot_max_ncpus = boot_ncpus;
423 } else if (boot_max_ncpus > max_ncpus) {
424 boot_max_ncpus = max_ncpus;
425 }
426 }
427 }
428
429 /*
430 * Initialize the lgrp framework
431 */
432 lgrp_init(LGRP_INIT_STAGE1);
433
434 if (boothowto & RB_HALT) {
435 prom_printf("unix: kernel halted by -h flag\n");
436 prom_enter_mon();
437 }
438
439 ASSERT_STACK_ALIGNED();
440
441 /*
442 * Fill out cpu_ucode_info. Update microcode if necessary.
443 */
444 ucode_check(CPU);
445
446 if (workaround_errata(CPU) != 0)
447 panic("critical workaround(s) missing for boot cpu");
448 }
449
450
451 void
mach_modpath(char * path,const char * filename)452 mach_modpath(char *path, const char *filename)
453 {
454 /*
455 * Construct the directory path from the filename.
456 */
457
458 int len;
459 char *p;
460 const char isastr[] = "/amd64";
461 size_t isalen = strlen(isastr);
462
463 if ((p = strrchr(filename, '/')) == NULL)
464 return;
465
466 while (p > filename && *(p - 1) == '/')
467 p--; /* remove trailing '/' characters */
468 if (p == filename)
469 p++; /* so "/" -is- the modpath in this case */
470
471 /*
472 * Remove optional isa-dependent directory name - the module
473 * subsystem will put this back again (!)
474 */
475 len = p - filename;
476 if (len > isalen &&
477 strncmp(&filename[len - isalen], isastr, isalen) == 0)
478 p -= isalen;
479
480 /*
481 * "/platform/mumblefrotz" + " " + MOD_DEFPATH
482 */
483 len += (p - filename) + 1 + strlen(MOD_DEFPATH) + 1;
484 (void) strncpy(path, filename, p - filename);
485 }
486