1/* 2 * Copyright (c) 2001 Takanori Watanabe <takawata@jp.freebsd.org> 3 * Copyright (c) 2001 Mitsuru IWASAKI <iwasaki@jp.freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27/* 28 * Copyright (c) 2008 Mike Larkin <mlarkin@openbsd.org> 29 * 30 * Permission to use, copy, modify, and distribute this software for any 31 * purpose with or without fee is hereby granted, provided that the above 32 * copyright notice and this permission notice appear in all copies. 33 * 34 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 35 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 36 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 37 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 38 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 39 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 40 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 41 */ 42 43#define _ACPI_WAKECODE 44 45#include "assym.h" 46#include <machine/asm.h> 47#ifdef HIBERNATE 48#include <machine/hibernate_var.h> 49#endif /* HIBERNATE */ 50#include <machine/specialreg.h> 51#include <machine/param.h> 52#include <machine/segments.h> 53#include <dev/acpi/acpivar.h> 54 55#define _ACPI_TRMP_LABEL(a) a = . - _C_LABEL(acpi_real_mode_resume) + ACPI_TRAMPOLINE 56#define _ACPI_TRMP_OFFSET(a) a = . - _C_LABEL(acpi_real_mode_resume) 57#define _ACPI_TRMP_DATA_LABEL(a) a = . - _C_LABEL(acpi_tramp_data_start) + \ 58 ACPI_TRAMP_DATA 59#define _ACPI_TRMP_DATA_OFFSET(a) a = . - _C_LABEL(acpi_tramp_data_start) 60#define _ACPI_RM_CODE_SEG (ACPI_TRAMPOLINE >> 4) 61#define _ACPI_RM_DATA_SEG (ACPI_TRAMP_DATA >> 4) 62 63#ifdef HIBERNATE 64#define HIBERNATE_STACK_OFFSET 0x0F00 65#endif 66 67/* 68 * On wakeup, we'll start executing at acpi_real_mode_resume. 69 * This is based on the wakeup vector previously stored with 70 * ACPI before we went to sleep. ACPI's wakeup vector is a 71 * physical address - in our case, it's calculated and mapped 72 * by the kernel and stuffed into a low page early in the boot 73 * process. 74 * 75 * We wakeup in real mode, at some phys addr based on the ACPI 76 * specification (cs = phys>>8, ip = phys & 0xF). For example, 77 * if our phys addr is 0x13000, we'd have cs=0x1300,ip=0 78 * 79 * The wakeup code needs to do the following: 80 * 1. Reenable the video display 81 * 2. Enter 32 bit protected mode 82 * 3. Reenable paging 83 * 4. Restore saved CPU registers 84 */ 85 86 .text 87 .code16 88 .align 4, 0xcc 89 .global _C_LABEL(acpi_real_mode_resume) 90 .global _C_LABEL(acpi_protected_mode_resume) 91 .global _C_LABEL(acpi_resume_end) 92 .global _C_LABEL(acpi_tramp_data_start) 93 .global _C_LABEL(acpi_tramp_data_end) 94_C_LABEL(acpi_real_mode_resume): 95_ACPI_TRMP_OFFSET(acpi_s3_vector_real) 96 nop 97 cli 98 cld 99 100 /* 101 * Set up segment registers for real mode. 102 * We'll only be in real mode for a moment, and we don't have 103 * want real dependencies on data or stack, so we'll just use 104 * the code segment for data and stack (eg, a 64k memory space). 105 */ 106 movw $(_ACPI_RM_DATA_SEG), %ax 107 movw %ax, %ds 108 movw %ax, %ss 109 movw %cs, %ax 110 movw %ax, %es 111 lidtl clean_idt 112 113 /* 114 * Set up stack to grow down from offset 0x0FFE. 115 * We will only be doing a few push/pops and no calls in real 116 * mode, so as long as the real mode code in the segment 117 * plus stack doesn't exceed 0x0FFE (4094) bytes, we'll be ok. 118 */ 119 movw $0x0FFE,%sp 120 121 /* 122 * Clear flags 123 */ 124 pushl $0 125 popfl 126 127 /* 128 * Flush instruction prefetch queue 129 */ 130 jmp 1f 1311: jmp 1f 1321: 133 134 /* 135 * We're about to enter protected mode, so we need a GDT for that. 136 * Set up a temporary GDT describing 2 segments, one for code 137 * extending from 0x00000000-0xffffffff and one for data 138 * with the same range. This GDT will only be in use for a short 139 * time, until we restore the saved GDT that we had when we went 140 * to sleep (although on i386, the saved GDT will most likely 141 * represent something similar based on machine/segment.h). 142 */ 143 data32 addr32 lgdt tmp_gdt 144 145 /* 146 * Enable protected mode by setting the PE bit in CR0 147 */ 148 mov %cr0,%eax 149 orl $(CR0_PE),%eax 150 mov %eax,%cr0 151 152 /* 153 * Force CPU into protected mode 154 * by making an intersegment jump (to ourselves, just a few lines 155 * down from here. We rely on the kernel to fixup the jump 156 * target addres previously. 157 * 158 */ 159 ljmpl $0x8, $acpi_protected_mode_trampoline 160 161 .code32 162 .align 16, 0xcc 163_ACPI_TRMP_LABEL(acpi_protected_mode_trampoline) 164_C_LABEL(acpi_protected_mode_resume): 165 nop 166 167 /* 168 * We're in protected mode now, without paging enabled. 169 * 170 * Set up segment selectors for protected mode. 171 * We've already set up our cs via the intersegment jump earlier, 172 * but we need to set ds,es,fs,gs,ss to all point to the 173 * 4GB flat data segment we defined earlier. 174 */ 175 movw $GSEL(GDATA_SEL,SEL_KPL),%ax 176 movw %ax,%ds 177 movw %ax,%es 178 movw %ax,%gs 179 movw %ax,%ss 180 movw %ax,%fs 181 182 /* 183 * Reset ESP based on protected mode. We can do this here 184 * because we haven't put anything on the stack via a 185 * call or push that we haven't cleaned up already. 186 */ 187 addl $(ACPI_TRAMP_DATA), %esp 188 189 /* 190 * Reset our page size extension (via restoring cr4) to what 191 * it was before we suspended. If we don't do this, cr4 might 192 * contain garbage in the PSE bit, leading to pages that 193 * are incorrectly interpreted as the wrong size 194 * CR4 was added in i586, so there is 195 * an implicit assumption here that this code will execute on 196 * i586 or later. 197 */ 198 mov acpi_saved_cr4,%eax 199 mov %eax,%cr4 200 201 testl $CR4_PAE, %eax 202 jz 1f 203 204 movl $MSR_EFER, %ecx 205 rdmsr 206 orl $EFER_NXE, %eax 207 wrmsr 208 2091: 210 /* 211 * Re-enable paging, using the CR3 we stored before suspend 212 * as our new page table base location. Restore CR0 after 213 * that. 214 */ 215 movl acpi_saved_cr3,%eax 216 movl %eax,%cr3 217 movl acpi_saved_cr0, %eax 218 movl %eax, %cr0 219 220 /* 221 * Flush the prefetch queue in order to enforce usage 222 * of the new (old) page tables we just re-enabled 223 */ 224 jmp 1f 2251: jmp 1f 2261: 227 nop 228 229 /* 230 * Restore CPU segment descriptor registers 231 */ 232 lgdt acpi_saved_gdt 233 lidt acpi_saved_idt 234 lldt acpi_saved_ldt 235 236 mov acpi_saved_cr2,%eax 237 mov %eax,%cr2 238 239 /* 240 * It is highly likely that the selectors we already loaded into 241 * these registers are already accurate, but we reload them 242 * again, for consistency. 243 */ 244 movw acpi_saved_es,%ax 245 movw %ax,%es 246 movw acpi_saved_fs,%ax 247 movw %ax,%fs 248 movw acpi_saved_gs,%ax 249 movw %ax,%gs 250 movw acpi_saved_ss,%ax 251 movw %ax,%ss 252 movw acpi_saved_ds,%ax 253 movw %ax,%ds 254 255 /* 256 * Shortly, we'll restore the TSS for the task that was running 257 * immediately before suspend occured. Since that task was the 258 * running task, it's TSS busy flag will have been set. We need 259 * to clear that bit (since we're effectively "restarting" the OS) 260 * in order to convince the processor that the task is no longer 261 * running (which is true, now). If we don't do this, when the 262 * OS resumes and resumes this task, it will assume we're trying 263 * to recurse into an already active task, which would cause 264 * a GP violation (and probably, a crash). 265 * 266 * We accomplish this by changing the TSS descriptor from 267 * BUSY (0x0B) to AVAILABLE (0x09). We keep the other 268 * high 4 bits intact. 269 */ 270 movl acpi_saved_gdt+2,%ebx 271 xorl %ecx, %ecx 272 movw acpi_saved_tr,%cx 273 leal (%ebx,%ecx),%eax 274 andb $0xF9,5(%eax) 275 276 ltr acpi_saved_tr 277 278 /* 279 * Everything is almost reset back to the way it was immediately before 280 * suspend. There are a few more registers to restore, and after 281 * that, jump back to the OS. There's still some things 282 * to do there, like re-enable interrupts, resume devices, APICs, 283 * etc. 284 */ 285 movl acpi_saved_ebx, %ebx 286 movl acpi_saved_ecx, %ecx 287 movl acpi_saved_edx, %edx 288 movl acpi_saved_ebp, %ebp 289 movl acpi_saved_esi, %esi 290 movl acpi_saved_edi, %edi 291 movl acpi_saved_esp, %esp 292 push acpi_saved_fl 293 popfl 294 295 /* Poke CR3 one more time. Might not be necessary */ 296 movl acpi_saved_cr3,%eax 297 movl %eax,%cr3 298 299 /* 300 * Return to the OS. We've previously saved the resume 301 * address in acpi_saved_ret (via a call to acpi_savecpu 302 * before we went to sleep.) 303 */ 304 xorl %eax, %eax 305 jmp *acpi_saved_ret 306 307#ifdef HIBERNATE 308 /* 309 * hibernate_resume_machdep drops to real mode and 310 * restarts the OS using the saved S3 resume vector 311 */ 312 .code32 313NENTRY(hibernate_resume_machdep) 314 cli 315 /* Jump to the identity mapped version of ourself */ 316 mov $hibernate_resume_vector_2, %eax 317 jmp *%eax 318_ACPI_TRMP_LABEL(hibernate_resume_vector_2) 319 320 /* Get out of 32 bit CS */ 321 lgdt gdt_16 322 ljmp $0x8, $hibernate_resume_vector_3 323 324_ACPI_TRMP_LABEL(hibernate_resume_vector_3) 325 .code16 326 movl %cr0, %eax 327 /* Disable CR0.PG - no paging */ 328 andl $(~CR0_PG), %eax 329 /* Disable CR0.PE - real mode */ 330 andl $(~CR0_PE), %eax 331 movl %eax, %cr0 332 333 /* Flush TLB */ 334 xorl %eax, %eax 335 movl %eax, %cr3 336 337 /* Set up real mode segment selectors */ 338 movw $(_ACPI_RM_DATA_SEG), %ax 339 movw %ax, %ds 340 movw %ax, %ss 341 movw %ax, %es 342 movw %ax, %fs 343 movw %ax, %gs 344 movl $0x0FFE, %esp 345 lidtl clean_idt 346 347 /* Jump to the S3 resume vector */ 348 ljmp $(_ACPI_RM_CODE_SEG), $acpi_s3_vector_real 349 350 .code32 351 /* Switch to hibernate resume pagetable */ 352NENTRY(hibernate_activate_resume_pt_machdep) 353 /* Enable large pages */ 354 movl %cr4, %eax 355 orl $(CR4_PSE), %eax 356 357 /* Disable global pages */ 358 andl $(~CR4_PGE), %eax 359 movl %eax, %cr4 360 361 /* 362 * Switch to the hibernate resume pagetable if we're running 363 * in non-PAE mode. If we're running in PAE mode, this will 364 * switch to the PTPDEs we stashed into the hibernate resume 365 * pagetable, but continue to use the normal pagetables until we 366 * disable PAE below. 367 */ 368 movl $HIBERNATE_PD_PAGE, %eax 369 orl $0xfe0, %eax 370 movl %eax, %cr3 371 372 /* Disable PAE */ 373 movl %cr4, %eax 374 andl $(~CR4_PAE), %eax 375 movl %eax, %cr4 376 377 wbinvd 378 movl $HIBERNATE_PD_PAGE, %eax 379 movl %eax, %cr3 380 jmp 1f 381 3821: nop 383 ret 384 385 /* 386 * Switch to the private resume-time hibernate stack 387 */ 388NENTRY(hibernate_switch_stack_machdep) 389 movl (%esp), %eax 390 movl %eax, HIBERNATE_STACK_PAGE + HIBERNATE_STACK_OFFSET 391 movl $(HIBERNATE_STACK_PAGE + HIBERNATE_STACK_OFFSET), %eax 392 movl %eax, %esp 393 394 /* On our own stack from here onward */ 395 ret 396 397NENTRY(hibernate_flush) 398 invlpg HIBERNATE_INFLATE_PAGE 399 ret 400#endif /* HIBERNATE */ 401 402 /* 403 * End of resume code (code copied to ACPI_TRAMPOLINE) 404 */ 405_C_LABEL(acpi_resume_end): 406 407 /* 408 * Initial copy of this data gets placed in .rodata, kernel makes 409 * RW copy of it in the tramp data page. 410 */ 411 .section .rodata 412_C_LABEL(acpi_tramp_data_start): 413_ACPI_TRMP_DATA_OFFSET(tmp_gdt) 414 .word tmp_gdt_end - tmp_gdtable 415 .long tmp_gdtable 416 417 .align 8, 0xcc 418_ACPI_TRMP_DATA_LABEL(tmp_gdtable) 419 /* 420 * null 421 */ 422 .word 0, 0 423 .byte 0, 0, 0, 0 424 /* 425 * Code 426 * Limit: 0xffffffff 427 * Base: 0x00000000 428 * Descriptor Type: Code 429 * Segment Type: CRA 430 * Present: True 431 * Priv: 0 432 * AVL: False 433 * 64-bit: False 434 * 32-bit: True 435 * 436 */ 437 .word 0xffff, 0 438 .byte 0, 0x9f, 0xcf, 0 439 440 /* 441 * Data 442 * Limit: 0xffffffff 443 * Base: 0x00000000 444 * Descriptor Type: 445 * Segment Type: W 446 * Present: True 447 * Priv: 0 448 * AVL: False 449 * 64-bit: False 450 * 32-bit: True 451 * 452 */ 453 .word 0xffff, 0 454 .byte 0, 0x93, 0xcf, 0 455_ACPI_TRMP_DATA_LABEL(tmp_gdt_end) 456 457 .align 8, 0xcc 458_ACPI_TRMP_DATA_OFFSET(clean_idt) 459 .word 0xffff 460 .long 0 461 .word 0 462 463 /* 464 * gdt_16 is the gdt used when returning to real mode for bios 465 * reads/writes (sets up a 16 bit segment) 466 */ 467 .align 8, 0xcc 468_ACPI_TRMP_DATA_LABEL(gdt_16) 469 .word gdt_16_end - gdt_16_table 470 .long gdt_16_table 471 472 .align 8, 0xcc 473_ACPI_TRMP_DATA_LABEL(gdt_16_table) 474 /* 475 * null 476 */ 477 .word 0, 0 478 .byte 0, 0, 0, 0 479 /* 480 * Code 481 * Limit: 0xffffffff 482 * Base: 0x00000000 483 * Descriptor Type: Code 484 * Segment Type: CRA 485 * Present: True 486 * Priv: 0 487 * AVL: False 488 * 64-bit: False 489 * 32-bit: False 490 * 491 */ 492 .word 0xffff, 0 493 .byte 0, 0x9f, 0x8f, 0 494 495 /* 496 * Data 497 * Limit: 0xffffffff 498 * Base: 0x00000000 499 * Descriptor Type: 500 * Segment Type: W 501 * Present: True 502 * Priv: 0 503 * AVL: False 504 * 64-bit: False 505 * 32-bit: False 506 * 507 */ 508 .word 0xffff, 0 509 .byte 0, 0x93, 0x8f, 0 510 511_ACPI_TRMP_DATA_LABEL(gdt_16_end) 512 513 .align 4, 0xcc 514_ACPI_TRMP_DATA_LABEL(acpi_saved_ebx) 515 .long 0 516_ACPI_TRMP_DATA_LABEL(acpi_saved_ecx) 517 .long 0 518_ACPI_TRMP_DATA_LABEL(acpi_saved_edx) 519 .long 0 520_ACPI_TRMP_DATA_LABEL(acpi_saved_ebp) 521 .long 0 522_ACPI_TRMP_DATA_LABEL(acpi_saved_esi) 523 .long 0 524_ACPI_TRMP_DATA_LABEL(acpi_saved_edi) 525 .long 0 526_ACPI_TRMP_DATA_LABEL(acpi_saved_esp) 527 .long 0 528_ACPI_TRMP_DATA_LABEL(acpi_saved_fl) 529 .long 0 530_ACPI_TRMP_DATA_LABEL(acpi_saved_cr0) 531 .long 0 532_ACPI_TRMP_DATA_LABEL(acpi_saved_cr2) 533 .long 0 534_ACPI_TRMP_DATA_LABEL(acpi_saved_cr3) 535 .long 0 536_ACPI_TRMP_DATA_LABEL(acpi_saved_cr4) 537 .long 0 538_ACPI_TRMP_DATA_LABEL(acpi_saved_ret) 539 .long 0 540 541 .align 16, 0xcc 542_ACPI_TRMP_DATA_LABEL(acpi_saved_idt) 543 .space 6 544 545 .align 16, 0xcc 546_ACPI_TRMP_DATA_LABEL(acpi_saved_gdt) 547 .space 6 548 549 .align 16, 0xcc 550_ACPI_TRMP_DATA_LABEL(acpi_saved_ldt) 551 .short 0 552_ACPI_TRMP_DATA_LABEL(acpi_saved_cs) 553 .short 0 554_ACPI_TRMP_DATA_LABEL(acpi_saved_ds) 555 .short 0 556_ACPI_TRMP_DATA_LABEL(acpi_saved_es) 557 .short 0 558_ACPI_TRMP_DATA_LABEL(acpi_saved_fs) 559 .short 0 560_ACPI_TRMP_DATA_LABEL(acpi_saved_gs) 561 .short 0 562_ACPI_TRMP_DATA_LABEL(acpi_saved_ss) 563 .short 0 564_ACPI_TRMP_DATA_LABEL(acpi_saved_tr) 565 .short 0 566 567_C_LABEL(acpi_tramp_data_end): 568 569 /* 570 * acpi_savecpu saves the processor's registers and flags 571 * for use during the ACPI suspend/resume process. 572 */ 573 574 .code32 575NENTRY(acpi_savecpu) 576 movl (%esp), %eax 577 movl %eax, acpi_saved_ret 578 579 movw %cs, acpi_saved_cs 580 movw %ds, acpi_saved_ds 581 movw %es, acpi_saved_es 582 movw %fs, acpi_saved_fs 583 movw %gs, acpi_saved_gs 584 movw %ss, acpi_saved_ss 585 586 movl %ebx, acpi_saved_ebx 587 movl %ecx, acpi_saved_ecx 588 movl %edx, acpi_saved_edx 589 movl %ebp, acpi_saved_ebp 590 movl %esi, acpi_saved_esi 591 movl %edi, acpi_saved_edi 592 movl %esp, acpi_saved_esp 593 594 pushfl 595 popl acpi_saved_fl 596 597 movl %cr0, %eax 598 movl %eax, acpi_saved_cr0 599 movl %cr2, %eax 600 movl %eax, acpi_saved_cr2 601 movl %cr3, %eax 602 movl %eax, acpi_saved_cr3 603 movl %cr4, %eax 604 movl %eax, acpi_saved_cr4 605 606 sgdt acpi_saved_gdt 607 sidt acpi_saved_idt 608 sldt acpi_saved_ldt 609 str acpi_saved_tr 610 611 movl $1, %eax 612 ret 613