1 /* system dependent functions for use inside the whole kernel. */
2
3 #include <unistd.h>
4 #include <ctype.h>
5 #include <string.h>
6 #include <machine/cmos.h>
7 #include <machine/bios.h>
8 #include <machine/cpu.h>
9 #include <minix/portio.h>
10 #include <minix/cpufeature.h>
11 #include <assert.h>
12 #include <signal.h>
13 #include <machine/vm.h>
14
15 #include <minix/u64.h>
16
17 #include "archconst.h"
18 #include "oxpcie.h"
19
20 #include "glo.h"
21
22 #ifdef USE_APIC
23 #include "apic.h"
24 #endif
25
26 #ifdef USE_ACPI
27 #include "acpi.h"
28 #endif
29
30 static int osfxsr_feature; /* FXSAVE/FXRSTOR instructions support (SSEx) */
31
32 /* set MP and NE flags to handle FPU exceptions in native mode. */
33 #define CR0_MP_NE 0x0022
34 /* set CR4.OSFXSR[bit 9] if FXSR is supported. */
35 #define CR4_OSFXSR (1L<<9)
36 /* set OSXMMEXCPT[bit 10] if we provide #XM handler. */
37 #define CR4_OSXMMEXCPT (1L<<10)
38
39 void * k_stacks;
40
41 static void ser_debug(int c);
42 static void ser_dump_vfs(void);
43
44 #ifdef CONFIG_SMP
45 static void ser_dump_proc_cpu(void);
46 #endif
47 #if !CONFIG_OXPCIE
48 static void ser_init(void);
49 #endif
50
fpu_init(void)51 void fpu_init(void)
52 {
53 unsigned short cw, sw;
54
55 fninit();
56 sw = fnstsw();
57 fnstcw(&cw);
58
59 if((sw & 0xff) == 0 &&
60 (cw & 0x103f) == 0x3f) {
61 /* We have some sort of FPU, but don't check exact model.
62 * Set CR0_NE and CR0_MP to handle fpu exceptions
63 * in native mode. */
64 write_cr0(read_cr0() | CR0_MP_NE);
65 get_cpulocal_var(fpu_presence) = 1;
66 if(_cpufeature(_CPUF_I386_FXSR)) {
67 u32_t cr4 = read_cr4() | CR4_OSFXSR; /* Enable FXSR. */
68
69 /* OSXMMEXCPT if supported
70 * FXSR feature can be available without SSE
71 */
72 if(_cpufeature(_CPUF_I386_SSE))
73 cr4 |= CR4_OSXMMEXCPT;
74
75 write_cr4(cr4);
76 osfxsr_feature = 1;
77 } else {
78 osfxsr_feature = 0;
79 }
80 } else {
81 /* No FPU presents. */
82 get_cpulocal_var(fpu_presence) = 0;
83 osfxsr_feature = 0;
84 return;
85 }
86 }
87
save_local_fpu(struct proc * pr,int retain)88 void save_local_fpu(struct proc *pr, int retain)
89 {
90 char *state = pr->p_seg.fpu_state;
91
92 /* Save process FPU context. If the 'retain' flag is set, keep the FPU
93 * state as is. If the flag is not set, the state is undefined upon
94 * return, and the caller is responsible for reloading a proper state.
95 */
96
97 if(!is_fpu())
98 return;
99
100 assert(state);
101
102 if(osfxsr_feature) {
103 fxsave(state);
104 } else {
105 fnsave(state);
106 if (retain)
107 (void) frstor(state);
108 }
109 }
110
save_fpu(struct proc * pr)111 void save_fpu(struct proc *pr)
112 {
113 #ifdef CONFIG_SMP
114 if (cpuid != pr->p_cpu) {
115 int stopped;
116
117 /* remember if the process was already stopped */
118 stopped = RTS_ISSET(pr, RTS_PROC_STOP);
119
120 /* stop the remote process and force its context to be saved */
121 smp_schedule_stop_proc_save_ctx(pr);
122
123 /*
124 * If the process wasn't stopped let the process run again. The
125 * process is kept block by the fact that the kernel cannot run
126 * on its cpu
127 */
128 if (!stopped)
129 RTS_UNSET(pr, RTS_PROC_STOP);
130
131 return;
132 }
133 #endif
134
135 if (get_cpulocal_var(fpu_owner) == pr) {
136 disable_fpu_exception();
137 save_local_fpu(pr, TRUE /*retain*/);
138 }
139 }
140
141 /* reserve a chunk of memory for fpu state; every one has to
142 * be FPUALIGN-aligned.
143 */
144 static char fpu_state[NR_PROCS][FPU_XFP_SIZE] __aligned(FPUALIGN);
145
arch_proc_reset(struct proc * pr)146 void arch_proc_reset(struct proc *pr)
147 {
148 char *v = NULL;
149 struct stackframe_s reg;
150
151 assert(pr->p_nr < NR_PROCS);
152
153 if(pr->p_nr >= 0) {
154 v = fpu_state[pr->p_nr];
155 /* verify alignment */
156 assert(!((vir_bytes)v % FPUALIGN));
157 /* initialize state */
158 memset(v, 0, FPU_XFP_SIZE);
159 }
160
161 /* Clear process state. */
162 memset(®, 0, sizeof(pr->p_reg));
163 if(iskerneln(pr->p_nr))
164 reg.psw = INIT_TASK_PSW;
165 else
166 reg.psw = INIT_PSW;
167
168 pr->p_seg.fpu_state = v;
169
170 /* Initialize the fundamentals that are (initially) the same for all
171 * processes - the segment selectors it gets to use.
172 */
173 pr->p_reg.cs = USER_CS_SELECTOR;
174 pr->p_reg.gs =
175 pr->p_reg.fs =
176 pr->p_reg.ss =
177 pr->p_reg.es =
178 pr->p_reg.ds = USER_DS_SELECTOR;
179
180 /* set full context and make sure it gets restored */
181 arch_proc_setcontext(pr, ®, 0, KTS_FULLCONTEXT);
182 }
183
arch_set_secondary_ipc_return(struct proc * p,u32_t val)184 void arch_set_secondary_ipc_return(struct proc *p, u32_t val)
185 {
186 p->p_reg.bx = val;
187 }
188
restore_fpu(struct proc * pr)189 int restore_fpu(struct proc *pr)
190 {
191 int failed;
192 char *state = pr->p_seg.fpu_state;
193
194 assert(state);
195
196 if(!proc_used_fpu(pr)) {
197 fninit();
198 pr->p_misc_flags |= MF_FPU_INITIALIZED;
199 } else {
200 if(osfxsr_feature) {
201 failed = fxrstor(state);
202 } else {
203 failed = frstor(state);
204 }
205
206 if (failed) return EINVAL;
207 }
208
209 return OK;
210 }
211
cpu_identify(void)212 void cpu_identify(void)
213 {
214 u32_t eax, ebx, ecx, edx;
215 unsigned cpu = cpuid;
216
217 eax = 0;
218 _cpuid(&eax, &ebx, &ecx, &edx);
219
220 if (ebx == INTEL_CPUID_GEN_EBX && ecx == INTEL_CPUID_GEN_ECX &&
221 edx == INTEL_CPUID_GEN_EDX) {
222 cpu_info[cpu].vendor = CPU_VENDOR_INTEL;
223 } else if (ebx == AMD_CPUID_GEN_EBX && ecx == AMD_CPUID_GEN_ECX &&
224 edx == AMD_CPUID_GEN_EDX) {
225 cpu_info[cpu].vendor = CPU_VENDOR_AMD;
226 } else
227 cpu_info[cpu].vendor = CPU_VENDOR_UNKNOWN;
228
229 if (eax == 0)
230 return;
231
232 eax = 1;
233 _cpuid(&eax, &ebx, &ecx, &edx);
234
235 cpu_info[cpu].family = (eax >> 8) & 0xf;
236 if (cpu_info[cpu].family == 0xf)
237 cpu_info[cpu].family += (eax >> 20) & 0xff;
238 cpu_info[cpu].model = (eax >> 4) & 0xf;
239 if (cpu_info[cpu].model == 0xf || cpu_info[cpu].model == 0x6)
240 cpu_info[cpu].model += ((eax >> 16) & 0xf) << 4 ;
241 cpu_info[cpu].stepping = eax & 0xf;
242 cpu_info[cpu].flags[0] = ecx;
243 cpu_info[cpu].flags[1] = edx;
244 }
245
arch_init(void)246 void arch_init(void)
247 {
248 k_stacks = (void*) &k_stacks_start;
249 assert(!((vir_bytes) k_stacks % K_STACK_SIZE));
250
251 #ifndef CONFIG_SMP
252 /*
253 * use stack 0 and cpu id 0 on a single processor machine, SMP
254 * configuration does this in smp_init() for all cpus at once
255 */
256 tss_init(0, get_k_stack_top(0));
257 #endif
258
259 #if !CONFIG_OXPCIE
260 ser_init();
261 #endif
262
263 #ifdef USE_ACPI
264 acpi_init();
265 #endif
266
267 #if defined(USE_APIC) && !defined(CONFIG_SMP)
268 if (config_no_apic) {
269 DEBUGBASIC(("APIC disabled, using legacy PIC\n"));
270 }
271 else if (!apic_single_cpu_init()) {
272 DEBUGBASIC(("APIC not present, using legacy PIC\n"));
273 }
274 #endif
275
276 /* Reserve some BIOS ranges */
277 cut_memmap(&kinfo, BIOS_MEM_BEGIN, BIOS_MEM_END);
278 cut_memmap(&kinfo, BASE_MEM_TOP, UPPER_MEM_END);
279 }
280
281 /*===========================================================================*
282 * do_ser_debug *
283 *===========================================================================*/
do_ser_debug(void)284 void do_ser_debug(void)
285 {
286 u8_t c, lsr;
287
288 #if CONFIG_OXPCIE
289 {
290 int oxin;
291 if((oxin = oxpcie_in()) >= 0)
292 ser_debug(oxin);
293 }
294 #endif
295
296 lsr= inb(COM1_LSR);
297 if (!(lsr & LSR_DR))
298 return;
299 c = inb(COM1_RBR);
300 ser_debug(c);
301 }
302
ser_dump_queue_cpu(unsigned cpu)303 static void ser_dump_queue_cpu(unsigned cpu)
304 {
305 int q;
306 struct proc ** rdy_head;
307
308 rdy_head = get_cpu_var(cpu, run_q_head);
309
310 for(q = 0; q < NR_SCHED_QUEUES; q++) {
311 struct proc *p;
312 if(rdy_head[q]) {
313 printf("%2d: ", q);
314 for(p = rdy_head[q]; p; p = p->p_nextready) {
315 printf("%s / %d ", p->p_name, p->p_endpoint);
316 }
317 printf("\n");
318 }
319 }
320 }
321
ser_dump_queues(void)322 static void ser_dump_queues(void)
323 {
324 #ifdef CONFIG_SMP
325 unsigned cpu;
326
327 printf("--- run queues ---\n");
328 for (cpu = 0; cpu < ncpus; cpu++) {
329 printf("CPU %d :\n", cpu);
330 ser_dump_queue_cpu(cpu);
331 }
332 #else
333 ser_dump_queue_cpu(0);
334 #endif
335 }
336
337 #ifdef CONFIG_SMP
dump_bkl_usage(void)338 static void dump_bkl_usage(void)
339 {
340 unsigned cpu;
341
342 printf("--- BKL usage ---\n");
343 for (cpu = 0; cpu < ncpus; cpu++) {
344 printf("cpu %3d kernel ticks 0x%x%08x bkl ticks 0x%x%08x succ %d tries %d\n", cpu,
345 ex64hi(kernel_ticks[cpu]),
346 ex64lo(kernel_ticks[cpu]),
347 ex64hi(bkl_ticks[cpu]),
348 ex64lo(bkl_ticks[cpu]),
349 bkl_succ[cpu], bkl_tries[cpu]);
350 }
351 }
352
reset_bkl_usage(void)353 static void reset_bkl_usage(void)
354 {
355 memset(kernel_ticks, 0, sizeof(kernel_ticks));
356 memset(bkl_ticks, 0, sizeof(bkl_ticks));
357 memset(bkl_tries, 0, sizeof(bkl_tries));
358 memset(bkl_succ, 0, sizeof(bkl_succ));
359 }
360 #endif
361
ser_debug(const int c)362 static void ser_debug(const int c)
363 {
364 serial_debug_active = 1;
365
366 switch(c)
367 {
368 case 'Q':
369 minix_shutdown(0);
370 NOT_REACHABLE;
371 #ifdef CONFIG_SMP
372 case 'B':
373 dump_bkl_usage();
374 break;
375 case 'b':
376 reset_bkl_usage();
377 break;
378 #endif
379 case '1':
380 ser_dump_proc();
381 break;
382 case '2':
383 ser_dump_queues();
384 break;
385 #ifdef CONFIG_SMP
386 case '4':
387 ser_dump_proc_cpu();
388 break;
389 #endif
390 case '5':
391 ser_dump_vfs();
392 break;
393 #if DEBUG_TRACE
394 #define TOGGLECASE(ch, flag) \
395 case ch: { \
396 if(verboseflags & flag) { \
397 verboseflags &= ~flag; \
398 printf("%s disabled\n", #flag); \
399 } else { \
400 verboseflags |= flag; \
401 printf("%s enabled\n", #flag); \
402 } \
403 break; \
404 }
405 TOGGLECASE('8', VF_SCHEDULING)
406 TOGGLECASE('9', VF_PICKPROC)
407 #endif
408 #ifdef USE_APIC
409 case 'I':
410 dump_apic_irq_state();
411 break;
412 #endif
413 }
414 serial_debug_active = 0;
415 }
416
417 #if DEBUG_SERIAL
418
ser_dump_vfs(void)419 static void ser_dump_vfs(void)
420 {
421 /* Notify VFS it has to generate stack traces. Kernel can't do that as
422 * it's not aware of user space threads.
423 */
424 mini_notify(proc_addr(KERNEL), VFS_PROC_NR);
425 }
426
427 #ifdef CONFIG_SMP
ser_dump_proc_cpu(void)428 static void ser_dump_proc_cpu(void)
429 {
430 struct proc *pp;
431 unsigned cpu;
432
433 for (cpu = 0; cpu < ncpus; cpu++) {
434 printf("CPU %d processes : \n", cpu);
435 for (pp= BEG_USER_ADDR; pp < END_PROC_ADDR; pp++) {
436 if (isemptyp(pp) || pp->p_cpu != cpu)
437 continue;
438 print_proc(pp);
439 }
440 }
441 }
442 #endif
443
444 #endif /* DEBUG_SERIAL */
445
446 #if SPROFILE
447
arch_init_profile_clock(const u32_t freq)448 int arch_init_profile_clock(const u32_t freq)
449 {
450 int r;
451 /* Set CMOS timer frequency. */
452 outb(RTC_INDEX, RTC_REG_A);
453 outb(RTC_IO, RTC_A_DV_OK | freq);
454 /* Enable CMOS timer interrupts. */
455 outb(RTC_INDEX, RTC_REG_B);
456 r = inb(RTC_IO);
457 outb(RTC_INDEX, RTC_REG_B);
458 outb(RTC_IO, r | RTC_B_PIE);
459 /* Mandatory read of CMOS register to enable timer interrupts. */
460 outb(RTC_INDEX, RTC_REG_C);
461 inb(RTC_IO);
462
463 return CMOS_CLOCK_IRQ;
464 }
465
arch_stop_profile_clock(void)466 void arch_stop_profile_clock(void)
467 {
468 int r;
469 /* Disable CMOS timer interrupts. */
470 outb(RTC_INDEX, RTC_REG_B);
471 r = inb(RTC_IO);
472 outb(RTC_INDEX, RTC_REG_B);
473 outb(RTC_IO, r & ~RTC_B_PIE);
474 }
475
arch_ack_profile_clock(void)476 void arch_ack_profile_clock(void)
477 {
478 /* Mandatory read of CMOS register to re-enable timer interrupts. */
479 outb(RTC_INDEX, RTC_REG_C);
480 inb(RTC_IO);
481 }
482
483 #endif
484
arch_do_syscall(struct proc * proc)485 void arch_do_syscall(struct proc *proc)
486 {
487 /* do_ipc assumes that it's running because of the current process */
488 assert(proc == get_cpulocal_var(proc_ptr));
489 /* Make the system call, for real this time. */
490 assert(proc->p_misc_flags & MF_SC_DEFER);
491 proc->p_reg.retreg =
492 do_ipc(proc->p_defer.r1, proc->p_defer.r2, proc->p_defer.r3);
493 }
494
arch_finish_switch_to_user(void)495 struct proc * arch_finish_switch_to_user(void)
496 {
497 char * stk;
498 struct proc * p;
499
500 #ifdef CONFIG_SMP
501 stk = (char *)tss[cpuid].sp0;
502 #else
503 stk = (char *)tss[0].sp0;
504 #endif
505 /* set pointer to the process to run on the stack */
506 p = get_cpulocal_var(proc_ptr);
507 *((reg_t *)stk) = (reg_t) p;
508
509 /* make sure IF is on in FLAGS so that interrupts won't be disabled
510 * once p's context is restored.
511 */
512 p->p_reg.psw |= IF_MASK;
513
514 /* Set TRACEBIT state properly. */
515 if(p->p_misc_flags & MF_STEP)
516 p->p_reg.psw |= TRACEBIT;
517 else
518 p->p_reg.psw &= ~TRACEBIT;
519
520 return p;
521 }
522
arch_proc_setcontext(struct proc * p,struct stackframe_s * state,int isuser,int trap_style)523 void arch_proc_setcontext(struct proc *p, struct stackframe_s *state,
524 int isuser, int trap_style)
525 {
526 if(isuser) {
527 /* Restore user bits of psw from sc, maintain system bits
528 * from proc.
529 */
530 state->psw = (state->psw & X86_FLAGS_USER) |
531 (p->p_reg.psw & ~X86_FLAGS_USER);
532 }
533
534 /* someone wants to totally re-initialize process state */
535 assert(sizeof(p->p_reg) == sizeof(*state));
536 if(state != &p->p_reg) {
537 memcpy(&p->p_reg, state, sizeof(*state));
538 }
539
540 /* further code is instructed to not touch the context
541 * any more
542 */
543 p->p_misc_flags |= MF_CONTEXT_SET;
544
545 /* on x86 this requires returning using iret (KTS_INT)
546 * so that the full context is restored instead of relying on
547 * the userspace doing it (as it would do on SYSEXIT).
548 * as ESP and EIP are also reset, userspace won't try to
549 * restore bogus context after returning.
550 *
551 * if the process is not blocked, or the kernel will ignore
552 * our trap style, we needn't panic but things will probably
553 * not go well for the process (restored context will be ignored)
554 * and the situation should be debugged.
555 */
556 if(!(p->p_rts_flags)) {
557 printf("WARNINIG: setting full context of runnable process\n");
558 print_proc(p);
559 util_stacktrace();
560 }
561 if(p->p_seg.p_kern_trap_style == KTS_NONE)
562 printf("WARNINIG: setting full context of out-of-kernel process\n");
563 p->p_seg.p_kern_trap_style = trap_style;
564 }
565
restore_user_context(struct proc * p)566 void restore_user_context(struct proc *p)
567 {
568 int trap_style = p->p_seg.p_kern_trap_style;
569 #if 0
570 #define TYPES 10
571 static int restores[TYPES], n = 0;
572
573 if(trap_style >= 0 && trap_style < TYPES)
574 restores[trap_style]++;
575
576 if(!(n++ % 500000)) {
577 int t;
578 for(t = 0; t < TYPES; t++)
579 if(restores[t])
580 printf("%d: %d ", t, restores[t]);
581 printf("\n");
582 }
583 #endif
584
585 p->p_seg.p_kern_trap_style = KTS_NONE;
586
587 if(trap_style == KTS_SYSENTER) {
588 restore_user_context_sysenter(p);
589 NOT_REACHABLE;
590 }
591
592 if(trap_style == KTS_SYSCALL) {
593 restore_user_context_syscall(p);
594 NOT_REACHABLE;
595 }
596
597 switch(trap_style) {
598 case KTS_NONE:
599 panic("no entry trap style known");
600 case KTS_INT_HARD:
601 case KTS_INT_UM:
602 case KTS_FULLCONTEXT:
603 case KTS_INT_ORIG:
604 restore_user_context_int(p);
605 NOT_REACHABLE;
606 default:
607 panic("unknown trap style recorded");
608 NOT_REACHABLE;
609 }
610
611 NOT_REACHABLE;
612 }
613
fpu_sigcontext(struct proc * pr,struct sigframe_sigcontext * fr,struct sigcontext * sc)614 void fpu_sigcontext(struct proc *pr, struct sigframe_sigcontext *fr, struct sigcontext *sc)
615 {
616 int fp_error;
617
618 if (osfxsr_feature) {
619 fp_error = sc->sc_fpu_state.xfp_regs.fp_status &
620 ~sc->sc_fpu_state.xfp_regs.fp_control;
621 } else {
622 fp_error = sc->sc_fpu_state.fpu_regs.fp_status &
623 ~sc->sc_fpu_state.fpu_regs.fp_control;
624 }
625
626 if (fp_error & 0x001) { /* Invalid op */
627 /*
628 * swd & 0x240 == 0x040: Stack Underflow
629 * swd & 0x240 == 0x240: Stack Overflow
630 * User must clear the SF bit (0x40) if set
631 */
632 fr->sf_code = FPE_FLTINV;
633 } else if (fp_error & 0x004) {
634 fr->sf_code = FPE_FLTDIV; /* Divide by Zero */
635 } else if (fp_error & 0x008) {
636 fr->sf_code = FPE_FLTOVF; /* Overflow */
637 } else if (fp_error & 0x012) {
638 fr->sf_code = FPE_FLTUND; /* Denormal, Underflow */
639 } else if (fp_error & 0x020) {
640 fr->sf_code = FPE_FLTRES; /* Precision */
641 } else {
642 fr->sf_code = 0; /* XXX - probably should be used for FPE_INTOVF or
643 * FPE_INTDIV */
644 }
645 }
646
arch_get_sp(struct proc * p)647 reg_t arch_get_sp(struct proc *p) { return p->p_reg.sp; }
648
649 #if !CONFIG_OXPCIE
ser_init(void)650 static void ser_init(void)
651 {
652 unsigned char lcr;
653 unsigned divisor;
654
655 /* keep BIOS settings if cttybaud is not set */
656 if (kinfo.serial_debug_baud <= 0) return;
657
658 /* set DLAB to make baud accessible */
659 lcr = LCR_8BIT | LCR_1STOP | LCR_NPAR;
660 outb(COM1_LCR, lcr | LCR_DLAB);
661
662 /* set baud rate */
663 divisor = UART_BASE_FREQ / kinfo.serial_debug_baud;
664 if (divisor < 1) divisor = 1;
665 if (divisor > 65535) divisor = 65535;
666
667 outb(COM1_DLL, divisor & 0xff);
668 outb(COM1_DLM, (divisor >> 8) & 0xff);
669
670 /* clear DLAB */
671 outb(COM1_LCR, lcr);
672 }
673 #endif
674