1 /* $OpenBSD: subr_hibernate.c,v 1.53 2013/03/28 16:58:45 deraadt Exp $ */ 2 3 /* 4 * Copyright (c) 2011 Ariane van der Steldt <ariane@stack.nl> 5 * Copyright (c) 2011 Mike Larkin <mlarkin@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <sys/hibernate.h> 21 #include <sys/malloc.h> 22 #include <sys/param.h> 23 #include <sys/tree.h> 24 #include <sys/systm.h> 25 #include <sys/disklabel.h> 26 #include <sys/disk.h> 27 #include <sys/conf.h> 28 #include <sys/buf.h> 29 #include <sys/fcntl.h> 30 #include <sys/stat.h> 31 #include <uvm/uvm.h> 32 #include <uvm/uvm_swap.h> 33 #include <machine/hibernate.h> 34 35 /* 36 * Hibernate piglet layout information 37 * 38 * The piglet is a scratch area of memory allocated by the suspending kernel. 39 * Its phys and virt addrs are recorded in the signature block. The piglet is 40 * used to guarantee an unused area of memory that can be used by the resuming 41 * kernel for various things. The piglet is excluded during unpack operations. 42 * The piglet size is presently 3*HIBERNATE_CHUNK_SIZE (typically 3*4MB). 43 * 44 * Offset from piglet_base Purpose 45 * ---------------------------------------------------------------------------- 46 * 0 I/O page used during resume 47 * 1*PAGE_SIZE I/O page used during hibernate suspend 48 * 2*PAGE_SIZE unused 49 * 3*PAGE_SIZE copy page used during hibernate suspend 50 * 4*PAGE_SIZE final chunk ordering list (8 pages) 51 * 12*PAGE_SIZE piglet chunk ordering list (8 pages) 52 * 20*PAGE_SIZE temp chunk ordering list (8 pages) 53 * 28*PAGE_SIZE start of hiballoc area 54 * 108*PAGE_SIZE end of hiballoc area (80 pages) 55 * ... unused 56 * HIBERNATE_CHUNK_SIZE start of hibernate chunk table 57 * 2*HIBERNATE_CHUNK_SIZE bounce area for chunks being unpacked 58 * 3*HIBERNATE_CHUNK_SIZE end of piglet 59 */ 60 61 /* Temporary vaddr ranges used during hibernate */ 62 vaddr_t hibernate_temp_page; 63 vaddr_t hibernate_copy_page; 64 65 /* Hibernate info as read from disk during resume */ 66 union hibernate_info disk_hiber_info; 67 paddr_t global_pig_start; 68 vaddr_t global_piglet_va; 69 70 void hibernate_copy_chunk_to_piglet(paddr_t, vaddr_t, size_t); 71 72 /* 73 * Hib alloc enforced alignment. 74 */ 75 #define HIB_ALIGN 8 /* bytes alignment */ 76 77 /* 78 * sizeof builtin operation, but with alignment constraint. 79 */ 80 #define HIB_SIZEOF(_type) roundup(sizeof(_type), HIB_ALIGN) 81 82 struct hiballoc_entry { 83 size_t hibe_use; 84 size_t hibe_space; 85 RB_ENTRY(hiballoc_entry) hibe_entry; 86 }; 87 88 /* 89 * Compare hiballoc entries based on the address they manage. 90 * 91 * Since the address is fixed, relative to struct hiballoc_entry, 92 * we just compare the hiballoc_entry pointers. 93 */ 94 static __inline int 95 hibe_cmp(struct hiballoc_entry *l, struct hiballoc_entry *r) 96 { 97 return l < r ? -1 : (l > r); 98 } 99 100 RB_PROTOTYPE(hiballoc_addr, hiballoc_entry, hibe_entry, hibe_cmp) 101 102 /* 103 * Given a hiballoc entry, return the address it manages. 104 */ 105 static __inline void * 106 hib_entry_to_addr(struct hiballoc_entry *entry) 107 { 108 caddr_t addr; 109 110 addr = (caddr_t)entry; 111 addr += HIB_SIZEOF(struct hiballoc_entry); 112 return addr; 113 } 114 115 /* 116 * Given an address, find the hiballoc that corresponds. 117 */ 118 static __inline struct hiballoc_entry* 119 hib_addr_to_entry(void *addr_param) 120 { 121 caddr_t addr; 122 123 addr = (caddr_t)addr_param; 124 addr -= HIB_SIZEOF(struct hiballoc_entry); 125 return (struct hiballoc_entry*)addr; 126 } 127 128 RB_GENERATE(hiballoc_addr, hiballoc_entry, hibe_entry, hibe_cmp) 129 130 /* 131 * Allocate memory from the arena. 132 * 133 * Returns NULL if no memory is available. 134 */ 135 void * 136 hib_alloc(struct hiballoc_arena *arena, size_t alloc_sz) 137 { 138 struct hiballoc_entry *entry, *new_entry; 139 size_t find_sz; 140 141 /* 142 * Enforce alignment of HIB_ALIGN bytes. 143 * 144 * Note that, because the entry is put in front of the allocation, 145 * 0-byte allocations are guaranteed a unique address. 146 */ 147 alloc_sz = roundup(alloc_sz, HIB_ALIGN); 148 149 /* 150 * Find an entry with hibe_space >= find_sz. 151 * 152 * If the root node is not large enough, we switch to tree traversal. 153 * Because all entries are made at the bottom of the free space, 154 * traversal from the end has a slightly better chance of yielding 155 * a sufficiently large space. 156 */ 157 find_sz = alloc_sz + HIB_SIZEOF(struct hiballoc_entry); 158 entry = RB_ROOT(&arena->hib_addrs); 159 if (entry != NULL && entry->hibe_space < find_sz) { 160 RB_FOREACH_REVERSE(entry, hiballoc_addr, &arena->hib_addrs) { 161 if (entry->hibe_space >= find_sz) 162 break; 163 } 164 } 165 166 /* 167 * Insufficient or too fragmented memory. 168 */ 169 if (entry == NULL) 170 return NULL; 171 172 /* 173 * Create new entry in allocated space. 174 */ 175 new_entry = (struct hiballoc_entry*)( 176 (caddr_t)hib_entry_to_addr(entry) + entry->hibe_use); 177 new_entry->hibe_space = entry->hibe_space - find_sz; 178 new_entry->hibe_use = alloc_sz; 179 180 /* 181 * Insert entry. 182 */ 183 if (RB_INSERT(hiballoc_addr, &arena->hib_addrs, new_entry) != NULL) 184 panic("hib_alloc: insert failure"); 185 entry->hibe_space = 0; 186 187 /* Return address managed by entry. */ 188 return hib_entry_to_addr(new_entry); 189 } 190 191 /* 192 * Free a pointer previously allocated from this arena. 193 * 194 * If addr is NULL, this will be silently accepted. 195 */ 196 void 197 hib_free(struct hiballoc_arena *arena, void *addr) 198 { 199 struct hiballoc_entry *entry, *prev; 200 201 if (addr == NULL) 202 return; 203 204 /* 205 * Derive entry from addr and check it is really in this arena. 206 */ 207 entry = hib_addr_to_entry(addr); 208 if (RB_FIND(hiballoc_addr, &arena->hib_addrs, entry) != entry) 209 panic("hib_free: freed item %p not in hib arena", addr); 210 211 /* 212 * Give the space in entry to its predecessor. 213 * 214 * If entry has no predecessor, change its used space into free space 215 * instead. 216 */ 217 prev = RB_PREV(hiballoc_addr, &arena->hib_addrs, entry); 218 if (prev != NULL && 219 (void *)((caddr_t)prev + HIB_SIZEOF(struct hiballoc_entry) + 220 prev->hibe_use + prev->hibe_space) == entry) { 221 /* Merge entry. */ 222 RB_REMOVE(hiballoc_addr, &arena->hib_addrs, entry); 223 prev->hibe_space += HIB_SIZEOF(struct hiballoc_entry) + 224 entry->hibe_use + entry->hibe_space; 225 } else { 226 /* Flip used memory to free space. */ 227 entry->hibe_space += entry->hibe_use; 228 entry->hibe_use = 0; 229 } 230 } 231 232 /* 233 * Initialize hiballoc. 234 * 235 * The allocator will manage memmory at ptr, which is len bytes. 236 */ 237 int 238 hiballoc_init(struct hiballoc_arena *arena, void *p_ptr, size_t p_len) 239 { 240 struct hiballoc_entry *entry; 241 caddr_t ptr; 242 size_t len; 243 244 RB_INIT(&arena->hib_addrs); 245 246 /* 247 * Hib allocator enforces HIB_ALIGN alignment. 248 * Fixup ptr and len. 249 */ 250 ptr = (caddr_t)roundup((vaddr_t)p_ptr, HIB_ALIGN); 251 len = p_len - ((size_t)ptr - (size_t)p_ptr); 252 len &= ~((size_t)HIB_ALIGN - 1); 253 254 /* 255 * Insufficient memory to be able to allocate and also do bookkeeping. 256 */ 257 if (len <= HIB_SIZEOF(struct hiballoc_entry)) 258 return ENOMEM; 259 260 /* 261 * Create entry describing space. 262 */ 263 entry = (struct hiballoc_entry*)ptr; 264 entry->hibe_use = 0; 265 entry->hibe_space = len - HIB_SIZEOF(struct hiballoc_entry); 266 RB_INSERT(hiballoc_addr, &arena->hib_addrs, entry); 267 268 return 0; 269 } 270 271 /* 272 * Zero all free memory. 273 */ 274 void 275 uvm_pmr_zero_everything(void) 276 { 277 struct uvm_pmemrange *pmr; 278 struct vm_page *pg; 279 int i; 280 281 uvm_lock_fpageq(); 282 TAILQ_FOREACH(pmr, &uvm.pmr_control.use, pmr_use) { 283 /* Zero single pages. */ 284 while ((pg = TAILQ_FIRST(&pmr->single[UVM_PMR_MEMTYPE_DIRTY])) 285 != NULL) { 286 uvm_pmr_remove(pmr, pg); 287 uvm_pagezero(pg); 288 atomic_setbits_int(&pg->pg_flags, PG_ZERO); 289 uvmexp.zeropages++; 290 uvm_pmr_insert(pmr, pg, 0); 291 } 292 293 /* Zero multi page ranges. */ 294 while ((pg = RB_ROOT(&pmr->size[UVM_PMR_MEMTYPE_DIRTY])) 295 != NULL) { 296 pg--; /* Size tree always has second page. */ 297 uvm_pmr_remove(pmr, pg); 298 for (i = 0; i < pg->fpgsz; i++) { 299 uvm_pagezero(&pg[i]); 300 atomic_setbits_int(&pg[i].pg_flags, PG_ZERO); 301 uvmexp.zeropages++; 302 } 303 uvm_pmr_insert(pmr, pg, 0); 304 } 305 } 306 uvm_unlock_fpageq(); 307 } 308 309 /* 310 * Mark all memory as dirty. 311 * 312 * Used to inform the system that the clean memory isn't clean for some 313 * reason, for example because we just came back from hibernate. 314 */ 315 void 316 uvm_pmr_dirty_everything(void) 317 { 318 struct uvm_pmemrange *pmr; 319 struct vm_page *pg; 320 int i; 321 322 uvm_lock_fpageq(); 323 TAILQ_FOREACH(pmr, &uvm.pmr_control.use, pmr_use) { 324 /* Dirty single pages. */ 325 while ((pg = TAILQ_FIRST(&pmr->single[UVM_PMR_MEMTYPE_ZERO])) 326 != NULL) { 327 uvm_pmr_remove(pmr, pg); 328 atomic_clearbits_int(&pg->pg_flags, PG_ZERO); 329 uvm_pmr_insert(pmr, pg, 0); 330 } 331 332 /* Dirty multi page ranges. */ 333 while ((pg = RB_ROOT(&pmr->size[UVM_PMR_MEMTYPE_ZERO])) 334 != NULL) { 335 pg--; /* Size tree always has second page. */ 336 uvm_pmr_remove(pmr, pg); 337 for (i = 0; i < pg->fpgsz; i++) 338 atomic_clearbits_int(&pg[i].pg_flags, PG_ZERO); 339 uvm_pmr_insert(pmr, pg, 0); 340 } 341 } 342 343 uvmexp.zeropages = 0; 344 uvm_unlock_fpageq(); 345 } 346 347 /* 348 * Allocate the highest address that can hold sz. 349 * 350 * sz in bytes. 351 */ 352 int 353 uvm_pmr_alloc_pig(paddr_t *addr, psize_t sz) 354 { 355 struct uvm_pmemrange *pmr; 356 struct vm_page *pig_pg, *pg; 357 358 /* 359 * Convert sz to pages, since that is what pmemrange uses internally. 360 */ 361 sz = atop(round_page(sz)); 362 363 uvm_lock_fpageq(); 364 365 TAILQ_FOREACH(pmr, &uvm.pmr_control.use, pmr_use) { 366 RB_FOREACH_REVERSE(pig_pg, uvm_pmr_addr, &pmr->addr) { 367 if (pig_pg->fpgsz >= sz) { 368 goto found; 369 } 370 } 371 } 372 373 /* 374 * Allocation failure. 375 */ 376 uvm_unlock_fpageq(); 377 return ENOMEM; 378 379 found: 380 /* Remove page from freelist. */ 381 uvm_pmr_remove_size(pmr, pig_pg); 382 pig_pg->fpgsz -= sz; 383 pg = pig_pg + pig_pg->fpgsz; 384 if (pig_pg->fpgsz == 0) 385 uvm_pmr_remove_addr(pmr, pig_pg); 386 else 387 uvm_pmr_insert_size(pmr, pig_pg); 388 389 uvmexp.free -= sz; 390 *addr = VM_PAGE_TO_PHYS(pg); 391 392 /* 393 * Update pg flags. 394 * 395 * Note that we trash the sz argument now. 396 */ 397 while (sz > 0) { 398 KASSERT(pg->pg_flags & PQ_FREE); 399 400 atomic_clearbits_int(&pg->pg_flags, 401 PG_PMAP0|PG_PMAP1|PG_PMAP2|PG_PMAP3); 402 403 if (pg->pg_flags & PG_ZERO) 404 uvmexp.zeropages -= sz; 405 atomic_clearbits_int(&pg->pg_flags, 406 PG_ZERO|PQ_FREE); 407 408 pg->uobject = NULL; 409 pg->uanon = NULL; 410 pg->pg_version++; 411 412 /* 413 * Next. 414 */ 415 pg++; 416 sz--; 417 } 418 419 /* Return. */ 420 uvm_unlock_fpageq(); 421 return 0; 422 } 423 424 /* 425 * Allocate a piglet area. 426 * 427 * This is as low as possible. 428 * Piglets are aligned. 429 * 430 * sz and align in bytes. 431 * 432 * The call will sleep for the pagedaemon to attempt to free memory. 433 * The pagedaemon may decide its not possible to free enough memory, causing 434 * the allocation to fail. 435 */ 436 int 437 uvm_pmr_alloc_piglet(vaddr_t *va, paddr_t *pa, vsize_t sz, paddr_t align) 438 { 439 paddr_t pg_addr, piglet_addr; 440 struct uvm_pmemrange *pmr; 441 struct vm_page *pig_pg, *pg; 442 struct pglist pageq; 443 int pdaemon_woken; 444 vaddr_t piglet_va; 445 446 KASSERT((align & (align - 1)) == 0); 447 pdaemon_woken = 0; /* Didn't wake the pagedaemon. */ 448 449 /* 450 * Fixup arguments: align must be at least PAGE_SIZE, 451 * sz will be converted to pagecount, since that is what 452 * pmemrange uses internally. 453 */ 454 if (align < PAGE_SIZE) 455 align = PAGE_SIZE; 456 sz = round_page(sz); 457 458 uvm_lock_fpageq(); 459 460 TAILQ_FOREACH_REVERSE(pmr, &uvm.pmr_control.use, uvm_pmemrange_use, 461 pmr_use) { 462 retry: 463 /* 464 * Search for a range with enough space. 465 * Use the address tree, to ensure the range is as low as 466 * possible. 467 */ 468 RB_FOREACH(pig_pg, uvm_pmr_addr, &pmr->addr) { 469 pg_addr = VM_PAGE_TO_PHYS(pig_pg); 470 piglet_addr = (pg_addr + (align - 1)) & ~(align - 1); 471 472 if (atop(pg_addr) + pig_pg->fpgsz >= 473 atop(piglet_addr) + atop(sz)) 474 goto found; 475 } 476 } 477 478 /* 479 * Try to coerce the pagedaemon into freeing memory 480 * for the piglet. 481 * 482 * pdaemon_woken is set to prevent the code from 483 * falling into an endless loop. 484 */ 485 if (!pdaemon_woken) { 486 pdaemon_woken = 1; 487 if (uvm_wait_pla(ptoa(pmr->low), ptoa(pmr->high) - 1, 488 sz, UVM_PLA_FAILOK) == 0) 489 goto retry; 490 } 491 492 /* Return failure. */ 493 uvm_unlock_fpageq(); 494 return ENOMEM; 495 496 found: 497 /* 498 * Extract piglet from pigpen. 499 */ 500 TAILQ_INIT(&pageq); 501 uvm_pmr_extract_range(pmr, pig_pg, 502 atop(piglet_addr), atop(piglet_addr) + atop(sz), &pageq); 503 504 *pa = piglet_addr; 505 uvmexp.free -= atop(sz); 506 507 /* 508 * Update pg flags. 509 * 510 * Note that we trash the sz argument now. 511 */ 512 TAILQ_FOREACH(pg, &pageq, pageq) { 513 KASSERT(pg->pg_flags & PQ_FREE); 514 515 atomic_clearbits_int(&pg->pg_flags, 516 PG_PMAP0|PG_PMAP1|PG_PMAP2|PG_PMAP3); 517 518 if (pg->pg_flags & PG_ZERO) 519 uvmexp.zeropages--; 520 atomic_clearbits_int(&pg->pg_flags, 521 PG_ZERO|PQ_FREE); 522 523 pg->uobject = NULL; 524 pg->uanon = NULL; 525 pg->pg_version++; 526 } 527 528 uvm_unlock_fpageq(); 529 530 /* 531 * Now allocate a va. 532 * Use direct mappings for the pages. 533 */ 534 535 piglet_va = *va = (vaddr_t)km_alloc(sz, &kv_any, &kp_none, &kd_waitok); 536 if (!piglet_va) { 537 uvm_pglistfree(&pageq); 538 return ENOMEM; 539 } 540 541 /* 542 * Map piglet to va. 543 */ 544 TAILQ_FOREACH(pg, &pageq, pageq) { 545 pmap_kenter_pa(piglet_va, VM_PAGE_TO_PHYS(pg), UVM_PROT_RW); 546 piglet_va += PAGE_SIZE; 547 } 548 pmap_update(pmap_kernel()); 549 550 return 0; 551 } 552 553 /* 554 * Free a piglet area. 555 */ 556 void 557 uvm_pmr_free_piglet(vaddr_t va, vsize_t sz) 558 { 559 paddr_t pa; 560 struct vm_page *pg; 561 562 /* 563 * Fix parameters. 564 */ 565 sz = round_page(sz); 566 567 /* 568 * Find the first page in piglet. 569 * Since piglets are contiguous, the first pg is all we need. 570 */ 571 if (!pmap_extract(pmap_kernel(), va, &pa)) 572 panic("uvm_pmr_free_piglet: piglet 0x%lx has no pages", va); 573 pg = PHYS_TO_VM_PAGE(pa); 574 if (pg == NULL) 575 panic("uvm_pmr_free_piglet: unmanaged page 0x%lx", pa); 576 577 /* 578 * Unmap. 579 */ 580 pmap_kremove(va, sz); 581 pmap_update(pmap_kernel()); 582 583 /* 584 * Free the physical and virtual memory. 585 */ 586 uvm_pmr_freepages(pg, atop(sz)); 587 km_free((void *)va, sz, &kv_any, &kp_none); 588 } 589 590 /* 591 * Physmem RLE compression support. 592 * 593 * Given a physical page address, return the number of pages starting at the 594 * address that are free. Clamps to the number of pages in 595 * HIBERNATE_CHUNK_SIZE. Returns 0 if the page at addr is not free. 596 */ 597 int 598 uvm_page_rle(paddr_t addr) 599 { 600 struct vm_page *pg, *pg_end; 601 struct vm_physseg *vmp; 602 int pseg_idx, off_idx; 603 604 pseg_idx = vm_physseg_find(atop(addr), &off_idx); 605 if (pseg_idx == -1) 606 return 0; 607 608 vmp = &vm_physmem[pseg_idx]; 609 pg = &vmp->pgs[off_idx]; 610 if (!(pg->pg_flags & PQ_FREE)) 611 return 0; 612 613 /* 614 * Search for the first non-free page after pg. 615 * Note that the page may not be the first page in a free pmemrange, 616 * therefore pg->fpgsz cannot be used. 617 */ 618 for (pg_end = pg; pg_end <= vmp->lastpg && 619 (pg_end->pg_flags & PQ_FREE) == PQ_FREE; pg_end++) 620 ; 621 return min((pg_end - pg), HIBERNATE_CHUNK_SIZE/PAGE_SIZE); 622 } 623 624 /* 625 * Fills out the hibernate_info union pointed to by hiber_info 626 * with information about this machine (swap signature block 627 * offsets, number of memory ranges, kernel in use, etc) 628 */ 629 int 630 get_hibernate_info(union hibernate_info *hiber_info, int suspend) 631 { 632 int chunktable_size; 633 struct disklabel dl; 634 char err_string[128], *dl_ret; 635 636 /* Determine I/O function to use */ 637 hiber_info->io_func = get_hibernate_io_function(); 638 if (hiber_info->io_func == NULL) 639 return (1); 640 641 /* Calculate hibernate device */ 642 hiber_info->device = swdevt[0].sw_dev; 643 644 /* Read disklabel (used to calculate signature and image offsets) */ 645 dl_ret = disk_readlabel(&dl, hiber_info->device, err_string, 128); 646 647 if (dl_ret) { 648 printf("Hibernate error reading disklabel: %s\n", dl_ret); 649 return (1); 650 } 651 652 /* Make sure we have a swap partition. */ 653 if (dl.d_partitions[1].p_fstype != FS_SWAP || 654 dl.d_partitions[1].p_size == 0) 655 return (1); 656 657 hiber_info->secsize = dl.d_secsize; 658 659 /* Make sure the signature can fit in one block */ 660 KASSERT(sizeof(union hibernate_info) <= hiber_info->secsize); 661 662 /* Calculate swap offset from start of disk */ 663 hiber_info->swap_offset = dl.d_partitions[1].p_offset; 664 665 /* Calculate signature block location */ 666 hiber_info->sig_offset = dl.d_partitions[1].p_offset + 667 dl.d_partitions[1].p_size - 668 sizeof(union hibernate_info)/hiber_info->secsize; 669 670 chunktable_size = HIBERNATE_CHUNK_TABLE_SIZE / hiber_info->secsize; 671 672 /* Stash kernel version information */ 673 bzero(&hiber_info->kernel_version, 128); 674 bcopy(version, &hiber_info->kernel_version, 675 min(strlen(version), sizeof(hiber_info->kernel_version)-1)); 676 677 if (suspend) { 678 /* Allocate piglet region */ 679 if (uvm_pmr_alloc_piglet(&hiber_info->piglet_va, 680 &hiber_info->piglet_pa, HIBERNATE_CHUNK_SIZE*3, 681 HIBERNATE_CHUNK_SIZE)) { 682 printf("Hibernate failed to allocate the piglet\n"); 683 return (1); 684 } 685 hiber_info->io_page = (void *)hiber_info->piglet_va; 686 687 /* 688 * Initialization of the hibernate IO function for drivers 689 * that need to do prep work (such as allocating memory or 690 * setting up data structures that cannot safely be done 691 * during suspend without causing side effects). There is 692 * a matching HIB_DONE call performed after the write is 693 * completed. 694 */ 695 if (hiber_info->io_func(hiber_info->device, 0, 696 (vaddr_t)NULL, 0, HIB_INIT, hiber_info->io_page)) 697 goto fail; 698 699 } else { 700 /* 701 * Resuming kernels use a regular I/O page since we won't 702 * have access to the suspended kernel's piglet VA at this 703 * point. No need to free this I/O page as it will vanish 704 * as part of the resume. 705 */ 706 hiber_info->io_page = malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT); 707 if (!hiber_info->io_page) 708 return (1); 709 } 710 711 712 if (get_hibernate_info_md(hiber_info)) 713 goto fail; 714 715 /* Calculate memory image location in swap */ 716 hiber_info->image_offset = dl.d_partitions[1].p_offset + 717 dl.d_partitions[1].p_size - 718 (hiber_info->image_size / hiber_info->secsize) - 719 sizeof(union hibernate_info)/hiber_info->secsize - 720 chunktable_size; 721 722 return (0); 723 fail: 724 if (suspend) 725 uvm_pmr_free_piglet(hiber_info->piglet_va, 726 HIBERNATE_CHUNK_SIZE * 3); 727 728 return (1); 729 } 730 731 /* 732 * Allocate nitems*size bytes from the hiballoc area presently in use 733 */ 734 void * 735 hibernate_zlib_alloc(void *unused, int nitems, int size) 736 { 737 struct hibernate_zlib_state *hibernate_state; 738 739 hibernate_state = 740 (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 741 742 return hib_alloc(&hibernate_state->hiballoc_arena, nitems*size); 743 } 744 745 /* 746 * Free the memory pointed to by addr in the hiballoc area presently in 747 * use 748 */ 749 void 750 hibernate_zlib_free(void *unused, void *addr) 751 { 752 struct hibernate_zlib_state *hibernate_state; 753 754 hibernate_state = 755 (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 756 757 hib_free(&hibernate_state->hiballoc_arena, addr); 758 } 759 760 /* 761 * Gets the next RLE value from the image stream 762 */ 763 int 764 hibernate_get_next_rle(void) 765 { 766 int rle, i; 767 struct hibernate_zlib_state *hibernate_state; 768 769 hibernate_state = 770 (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 771 772 /* Read RLE code */ 773 hibernate_state->hib_stream.next_out = (char *)&rle; 774 hibernate_state->hib_stream.avail_out = sizeof(rle); 775 776 i = inflate(&hibernate_state->hib_stream, Z_FULL_FLUSH); 777 if (i != Z_OK && i != Z_STREAM_END) { 778 /* 779 * XXX - this will likely reboot/hang most machines 780 * since the console output buffer will be unmapped, 781 * but there's not much else we can do here. 782 */ 783 panic("inflate rle error"); 784 } 785 786 /* Sanity check what RLE value we got */ 787 if (rle > HIBERNATE_CHUNK_SIZE/PAGE_SIZE || rle < 0) 788 panic("invalid RLE code"); 789 790 if (i == Z_STREAM_END) 791 rle = -1; 792 793 return rle; 794 } 795 796 /* 797 * Inflate next page of data from the image stream 798 */ 799 int 800 hibernate_inflate_page(void) 801 { 802 struct hibernate_zlib_state *hibernate_state; 803 int i; 804 805 hibernate_state = 806 (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 807 808 /* Set up the stream for inflate */ 809 hibernate_state->hib_stream.next_out = (char *)HIBERNATE_INFLATE_PAGE; 810 hibernate_state->hib_stream.avail_out = PAGE_SIZE; 811 812 /* Process next block of data */ 813 i = inflate(&hibernate_state->hib_stream, Z_PARTIAL_FLUSH); 814 if (i != Z_OK && i != Z_STREAM_END) { 815 /* 816 * XXX - this will likely reboot/hang most machines 817 * since the console output buffer will be unmapped, 818 * but there's not much else we can do here. 819 */ 820 panic("inflate error"); 821 } 822 823 /* We should always have extracted a full page ... */ 824 if (hibernate_state->hib_stream.avail_out != 0) { 825 /* 826 * XXX - this will likely reboot/hang most machines 827 * since the console output buffer will be unmapped, 828 * but there's not much else we can do here. 829 */ 830 panic("incomplete page"); 831 } 832 833 return (i == Z_STREAM_END); 834 } 835 836 /* 837 * Inflate size bytes from src into dest, skipping any pages in 838 * [src..dest] that are special (see hibernate_inflate_skip) 839 * 840 * This function executes while using the resume-time stack 841 * and pmap, and therefore cannot use ddb/printf/etc. Doing so 842 * will likely hang or reset the machine since the console output buffer 843 * will be unmapped. 844 */ 845 void 846 hibernate_inflate_region(union hibernate_info *hiber_info, paddr_t dest, 847 paddr_t src, size_t size) 848 { 849 int end_stream = 0 ; 850 struct hibernate_zlib_state *hibernate_state; 851 852 hibernate_state = 853 (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 854 855 hibernate_state->hib_stream.next_in = (char *)src; 856 hibernate_state->hib_stream.avail_in = size; 857 858 do { 859 /* Flush cache and TLB */ 860 hibernate_flush(); 861 862 /* 863 * Is this a special page? If yes, redirect the 864 * inflate output to a scratch page (eg, discard it) 865 */ 866 if (hibernate_inflate_skip(hiber_info, dest)) { 867 hibernate_enter_resume_mapping( 868 HIBERNATE_INFLATE_PAGE, 869 HIBERNATE_INFLATE_PAGE, 0); 870 } else { 871 hibernate_enter_resume_mapping( 872 HIBERNATE_INFLATE_PAGE, dest, 0); 873 } 874 875 hibernate_flush(); 876 end_stream = hibernate_inflate_page(); 877 878 dest += PAGE_SIZE; 879 } while (!end_stream); 880 } 881 882 /* 883 * deflate from src into the I/O page, up to 'remaining' bytes 884 * 885 * Returns number of input bytes consumed, and may reset 886 * the 'remaining' parameter if not all the output space was consumed 887 * (this information is needed to know how much to write to disk 888 */ 889 size_t 890 hibernate_deflate(union hibernate_info *hiber_info, paddr_t src, 891 size_t *remaining) 892 { 893 vaddr_t hibernate_io_page = hiber_info->piglet_va + PAGE_SIZE; 894 struct hibernate_zlib_state *hibernate_state; 895 896 hibernate_state = 897 (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 898 899 /* Set up the stream for deflate */ 900 hibernate_state->hib_stream.next_in = (caddr_t)src; 901 hibernate_state->hib_stream.avail_in = PAGE_SIZE - (src & PAGE_MASK); 902 hibernate_state->hib_stream.next_out = (caddr_t)hibernate_io_page + 903 (PAGE_SIZE - *remaining); 904 hibernate_state->hib_stream.avail_out = *remaining; 905 906 /* Process next block of data */ 907 if (deflate(&hibernate_state->hib_stream, Z_PARTIAL_FLUSH) != Z_OK) 908 panic("hibernate zlib deflate error"); 909 910 /* Update pointers and return number of bytes consumed */ 911 *remaining = hibernate_state->hib_stream.avail_out; 912 return (PAGE_SIZE - (src & PAGE_MASK)) - 913 hibernate_state->hib_stream.avail_in; 914 } 915 916 /* 917 * Write the hibernation information specified in hiber_info 918 * to the location in swap previously calculated (last block of 919 * swap), called the "signature block". 920 */ 921 int 922 hibernate_write_signature(union hibernate_info *hiber_info) 923 { 924 /* Write hibernate info to disk */ 925 return (hiber_info->io_func(hiber_info->device, hiber_info->sig_offset, 926 (vaddr_t)hiber_info, hiber_info->secsize, HIB_W, 927 hiber_info->io_page)); 928 } 929 930 /* 931 * Write the memory chunk table to the area in swap immediately 932 * preceding the signature block. The chunk table is stored 933 * in the piglet when this function is called. 934 */ 935 int 936 hibernate_write_chunktable(union hibernate_info *hiber_info) 937 { 938 struct hibernate_disk_chunk *chunks; 939 vaddr_t hibernate_chunk_table_start; 940 size_t hibernate_chunk_table_size; 941 daddr_t chunkbase; 942 int i; 943 944 hibernate_chunk_table_size = HIBERNATE_CHUNK_TABLE_SIZE; 945 946 chunkbase = hiber_info->sig_offset - 947 (hibernate_chunk_table_size / hiber_info->secsize); 948 949 hibernate_chunk_table_start = hiber_info->piglet_va + 950 HIBERNATE_CHUNK_SIZE; 951 952 chunks = (struct hibernate_disk_chunk *)(hiber_info->piglet_va + 953 HIBERNATE_CHUNK_SIZE); 954 955 /* Write chunk table */ 956 for (i = 0; i < hibernate_chunk_table_size; i += MAXPHYS) { 957 if (hiber_info->io_func(hiber_info->device, 958 chunkbase + (i/hiber_info->secsize), 959 (vaddr_t)(hibernate_chunk_table_start + i), 960 MAXPHYS, HIB_W, hiber_info->io_page)) 961 return (1); 962 } 963 964 return (0); 965 } 966 967 /* 968 * Write an empty hiber_info to the swap signature block, which is 969 * guaranteed to not match any valid hiber_info. 970 */ 971 int 972 hibernate_clear_signature(void) 973 { 974 union hibernate_info blank_hiber_info; 975 union hibernate_info hiber_info; 976 977 /* Zero out a blank hiber_info */ 978 bzero(&blank_hiber_info, sizeof(hiber_info)); 979 980 if (get_hibernate_info(&hiber_info, 0)) 981 return (1); 982 983 /* Write (zeroed) hibernate info to disk */ 984 if (hibernate_block_io(&hiber_info, 985 hiber_info.sig_offset - hiber_info.swap_offset, 986 hiber_info.secsize, (vaddr_t)&blank_hiber_info, 1)) 987 panic("error hibernate write 6"); 988 989 return (0); 990 } 991 992 /* 993 * Check chunk range overlap when calculating whether or not to copy a 994 * compressed chunk to the piglet area before decompressing. 995 * 996 * returns zero if the ranges do not overlap, non-zero otherwise. 997 */ 998 int 999 hibernate_check_overlap(paddr_t r1s, paddr_t r1e, paddr_t r2s, paddr_t r2e) 1000 { 1001 /* case A : end of r1 overlaps start of r2 */ 1002 if (r1s < r2s && r1e > r2s) 1003 return (1); 1004 1005 /* case B : r1 entirely inside r2 */ 1006 if (r1s >= r2s && r1e <= r2e) 1007 return (1); 1008 1009 /* case C : r2 entirely inside r1 */ 1010 if (r2s >= r1s && r2e <= r1e) 1011 return (1); 1012 1013 /* case D : end of r2 overlaps start of r1 */ 1014 if (r2s < r1s && r2e > r1s) 1015 return (1); 1016 1017 return (0); 1018 } 1019 1020 /* 1021 * Compare two hibernate_infos to determine if they are the same (eg, 1022 * we should be performing a hibernate resume on this machine. 1023 * Not all fields are checked - just enough to verify that the machine 1024 * has the same memory configuration and kernel as the one that 1025 * wrote the signature previously. 1026 */ 1027 int 1028 hibernate_compare_signature(union hibernate_info *mine, 1029 union hibernate_info *disk) 1030 { 1031 u_int i; 1032 1033 if (mine->nranges != disk->nranges) 1034 return (1); 1035 1036 if (strcmp(mine->kernel_version, disk->kernel_version) != 0) 1037 return (1); 1038 1039 for (i = 0; i < mine->nranges; i++) { 1040 if ((mine->ranges[i].base != disk->ranges[i].base) || 1041 (mine->ranges[i].end != disk->ranges[i].end) ) 1042 return (1); 1043 } 1044 1045 return (0); 1046 } 1047 1048 /* 1049 * Transfers xfer_size bytes between the hibernate device specified in 1050 * hib_info at offset blkctr and the vaddr specified at dest. 1051 * 1052 * Separate offsets and pages are used to handle misaligned reads (reads 1053 * that span a page boundary). 1054 * 1055 * blkctr specifies a relative offset (relative to the start of swap), 1056 * not an absolute disk offset 1057 * 1058 */ 1059 int 1060 hibernate_block_io(union hibernate_info *hib_info, daddr_t blkctr, 1061 size_t xfer_size, vaddr_t dest, int iswrite) 1062 { 1063 struct buf *bp; 1064 struct bdevsw *bdsw; 1065 int error; 1066 1067 bp = geteblk(xfer_size); 1068 bdsw = &bdevsw[major(hib_info->device)]; 1069 1070 error = (*bdsw->d_open)(hib_info->device, FREAD, S_IFCHR, curproc); 1071 if (error) { 1072 printf("hibernate_block_io open failed\n"); 1073 return (1); 1074 } 1075 1076 if (iswrite) 1077 bcopy((caddr_t)dest, bp->b_data, xfer_size); 1078 1079 bp->b_bcount = xfer_size; 1080 bp->b_blkno = blkctr; 1081 CLR(bp->b_flags, B_READ | B_WRITE | B_DONE); 1082 SET(bp->b_flags, B_BUSY | (iswrite ? B_WRITE : B_READ) | B_RAW); 1083 bp->b_dev = hib_info->device; 1084 bp->b_cylinder = 0; 1085 (*bdsw->d_strategy)(bp); 1086 1087 error = biowait(bp); 1088 if (error) { 1089 printf("hibernate_block_io biowait failed %d\n", error); 1090 error = (*bdsw->d_close)(hib_info->device, 0, S_IFCHR, 1091 curproc); 1092 if (error) 1093 printf("hibernate_block_io error close failed\n"); 1094 return (1); 1095 } 1096 1097 error = (*bdsw->d_close)(hib_info->device, FREAD, S_IFCHR, curproc); 1098 if (error) { 1099 printf("hibernate_block_io close failed\n"); 1100 return (1); 1101 } 1102 1103 if (!iswrite) 1104 bcopy(bp->b_data, (caddr_t)dest, xfer_size); 1105 1106 bp->b_flags |= B_INVAL; 1107 brelse(bp); 1108 1109 return (0); 1110 } 1111 1112 /* 1113 * Reads the signature block from swap, checks against the current machine's 1114 * information. If the information matches, perform a resume by reading the 1115 * saved image into the pig area, and unpacking. 1116 */ 1117 void 1118 hibernate_resume(void) 1119 { 1120 union hibernate_info hiber_info; 1121 int s; 1122 1123 /* Get current running machine's hibernate info */ 1124 bzero(&hiber_info, sizeof(hiber_info)); 1125 if (get_hibernate_info(&hiber_info, 0)) 1126 return; 1127 1128 /* Read hibernate info from disk */ 1129 s = splbio(); 1130 1131 if (hibernate_block_io(&hiber_info, 1132 hiber_info.sig_offset - hiber_info.swap_offset, 1133 hiber_info.secsize, (vaddr_t)&disk_hiber_info, 0)) 1134 panic("error in hibernate read"); 1135 1136 /* 1137 * If on-disk and in-memory hibernate signatures match, 1138 * this means we should do a resume from hibernate. 1139 */ 1140 if (hibernate_compare_signature(&hiber_info, &disk_hiber_info)) { 1141 splx(s); 1142 return; 1143 } 1144 1145 printf("Unhibernating...\n"); 1146 1147 /* Read the image from disk into the image (pig) area */ 1148 if (hibernate_read_image(&disk_hiber_info)) 1149 goto fail; 1150 1151 if (config_suspend(TAILQ_FIRST(&alldevs), DVACT_QUIESCE) != 0) 1152 goto fail; 1153 1154 (void) splhigh(); 1155 disable_intr(); 1156 cold = 1; 1157 1158 if (config_suspend(TAILQ_FIRST(&alldevs), DVACT_SUSPEND) != 0) { 1159 cold = 0; 1160 enable_intr(); 1161 goto fail; 1162 } 1163 1164 pmap_kenter_pa(HIBERNATE_HIBALLOC_PAGE, HIBERNATE_HIBALLOC_PAGE, 1165 VM_PROT_ALL); 1166 pmap_activate(curproc); 1167 1168 /* Switch stacks */ 1169 hibernate_switch_stack_machdep(); 1170 1171 /* 1172 * Point of no return. Once we pass this point, only kernel code can 1173 * be accessed. No global variables or other kernel data structures 1174 * are guaranteed to be coherent after unpack starts. 1175 * 1176 * The image is now in high memory (pig area), we unpack from the pig 1177 * to the correct location in memory. We'll eventually end up copying 1178 * on top of ourself, but we are assured the kernel code here is the 1179 * same between the hibernated and resuming kernel, and we are running 1180 * on our own stack, so the overwrite is ok. 1181 */ 1182 hibernate_unpack_image(&disk_hiber_info); 1183 1184 /* 1185 * Resume the loaded kernel by jumping to the MD resume vector. 1186 * We won't be returning from this call. 1187 */ 1188 hibernate_resume_machdep(); 1189 1190 fail: 1191 splx(s); 1192 printf("Unable to resume hibernated image\n"); 1193 } 1194 1195 /* 1196 * Unpack image from pig area to original location by looping through the 1197 * list of output chunks in the order they should be restored (fchunks). 1198 */ 1199 void 1200 hibernate_unpack_image(union hibernate_info *hiber_info) 1201 { 1202 struct hibernate_disk_chunk *chunks; 1203 union hibernate_info local_hiber_info; 1204 paddr_t image_cur = global_pig_start; 1205 short i, *fchunks; 1206 char *pva = (char *)hiber_info->piglet_va; 1207 struct hibernate_zlib_state *hibernate_state; 1208 1209 hibernate_state = 1210 (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 1211 1212 /* Mask off based on arch-specific piglet page size */ 1213 pva = (char *)((paddr_t)pva & (PIGLET_PAGE_MASK)); 1214 fchunks = (short *)(pva + (4 * PAGE_SIZE)); 1215 1216 chunks = (struct hibernate_disk_chunk *)(pva + HIBERNATE_CHUNK_SIZE); 1217 1218 /* Can't use hiber_info that's passed in after this point */ 1219 bcopy(hiber_info, &local_hiber_info, sizeof(union hibernate_info)); 1220 1221 hibernate_activate_resume_pt_machdep(); 1222 1223 for (i = 0; i < local_hiber_info.chunk_ctr; i++) { 1224 /* Reset zlib for inflate */ 1225 if (hibernate_zlib_reset(&local_hiber_info, 0) != Z_OK) 1226 panic("hibernate failed to reset zlib for inflate"); 1227 1228 hibernate_process_chunk(&local_hiber_info, &chunks[fchunks[i]], 1229 image_cur); 1230 1231 image_cur += chunks[fchunks[i]].compressed_size; 1232 1233 } 1234 } 1235 1236 /* 1237 * Bounce a compressed image chunk to the piglet, entering mappings for the 1238 * copied pages as needed 1239 */ 1240 void 1241 hibernate_copy_chunk_to_piglet(paddr_t img_cur, vaddr_t piglet, size_t size) 1242 { 1243 size_t ct, ofs; 1244 paddr_t src = img_cur; 1245 vaddr_t dest = piglet; 1246 1247 /* Copy first partial page */ 1248 ct = (PAGE_SIZE) - (src & PAGE_MASK); 1249 ofs = (src & PAGE_MASK); 1250 1251 if (ct < PAGE_SIZE) { 1252 hibernate_enter_resume_mapping(HIBERNATE_INFLATE_PAGE, 1253 (src - ofs), 0); 1254 hibernate_flush(); 1255 bcopy((caddr_t)(HIBERNATE_INFLATE_PAGE + ofs), (caddr_t)dest, ct); 1256 src += ct; 1257 dest += ct; 1258 } 1259 1260 /* Copy remaining pages */ 1261 while (src < size + img_cur) { 1262 hibernate_enter_resume_mapping(HIBERNATE_INFLATE_PAGE, src, 0); 1263 hibernate_flush(); 1264 ct = PAGE_SIZE; 1265 bcopy((caddr_t)(HIBERNATE_INFLATE_PAGE), (caddr_t)dest, ct); 1266 hibernate_flush(); 1267 src += ct; 1268 dest += ct; 1269 } 1270 } 1271 1272 /* 1273 * Process a chunk by bouncing it to the piglet, followed by unpacking 1274 */ 1275 void 1276 hibernate_process_chunk(union hibernate_info *hiber_info, 1277 struct hibernate_disk_chunk *chunk, paddr_t img_cur) 1278 { 1279 char *pva = (char *)hiber_info->piglet_va; 1280 1281 hibernate_copy_chunk_to_piglet(img_cur, 1282 (vaddr_t)(pva + (HIBERNATE_CHUNK_SIZE * 2)), chunk->compressed_size); 1283 1284 hibernate_inflate_region(hiber_info, chunk->base, 1285 (vaddr_t)(pva + (HIBERNATE_CHUNK_SIZE * 2)), 1286 chunk->compressed_size); 1287 } 1288 1289 /* 1290 * Write a compressed version of this machine's memory to disk, at the 1291 * precalculated swap offset: 1292 * 1293 * end of swap - signature block size - chunk table size - memory size 1294 * 1295 * The function begins by looping through each phys mem range, cutting each 1296 * one into MD sized chunks. These chunks are then compressed individually 1297 * and written out to disk, in phys mem order. Some chunks might compress 1298 * more than others, and for this reason, each chunk's size is recorded 1299 * in the chunk table, which is written to disk after the image has 1300 * properly been compressed and written (in hibernate_write_chunktable). 1301 * 1302 * When this function is called, the machine is nearly suspended - most 1303 * devices are quiesced/suspended, interrupts are off, and cold has 1304 * been set. This means that there can be no side effects once the 1305 * write has started, and the write function itself can also have no 1306 * side effects. This also means no printfs are permitted (since printf 1307 * has side effects.) 1308 */ 1309 int 1310 hibernate_write_chunks(union hibernate_info *hiber_info) 1311 { 1312 paddr_t range_base, range_end, inaddr, temp_inaddr; 1313 size_t nblocks, out_remaining, used; 1314 struct hibernate_disk_chunk *chunks; 1315 vaddr_t hibernate_io_page = hiber_info->piglet_va + PAGE_SIZE; 1316 daddr_t blkctr = hiber_info->image_offset, offset = 0; 1317 int i; 1318 struct hibernate_zlib_state *hibernate_state; 1319 1320 hibernate_state = 1321 (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 1322 1323 hiber_info->chunk_ctr = 0; 1324 1325 /* 1326 * Allocate VA for the temp and copy page. 1327 * 1328 * These will become part of the suspended kernel and will 1329 * be freed in hibernate_free, upon resume. 1330 */ 1331 hibernate_temp_page = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, 1332 &kp_none, &kd_nowait); 1333 if (!hibernate_temp_page) 1334 return (1); 1335 1336 hibernate_copy_page = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, 1337 &kp_none, &kd_nowait); 1338 if (!hibernate_copy_page) 1339 return (1); 1340 1341 pmap_kenter_pa(hibernate_copy_page, 1342 (hiber_info->piglet_pa + 3*PAGE_SIZE), VM_PROT_ALL); 1343 1344 /* XXX - not needed on all archs */ 1345 pmap_activate(curproc); 1346 1347 chunks = (struct hibernate_disk_chunk *)(hiber_info->piglet_va + 1348 HIBERNATE_CHUNK_SIZE); 1349 1350 /* Calculate the chunk regions */ 1351 for (i = 0; i < hiber_info->nranges; i++) { 1352 range_base = hiber_info->ranges[i].base; 1353 range_end = hiber_info->ranges[i].end; 1354 1355 inaddr = range_base; 1356 1357 while (inaddr < range_end) { 1358 chunks[hiber_info->chunk_ctr].base = inaddr; 1359 if (inaddr + HIBERNATE_CHUNK_SIZE < range_end) 1360 chunks[hiber_info->chunk_ctr].end = inaddr + 1361 HIBERNATE_CHUNK_SIZE; 1362 else 1363 chunks[hiber_info->chunk_ctr].end = range_end; 1364 1365 inaddr += HIBERNATE_CHUNK_SIZE; 1366 hiber_info->chunk_ctr ++; 1367 } 1368 } 1369 1370 /* Compress and write the chunks in the chunktable */ 1371 for (i = 0; i < hiber_info->chunk_ctr; i++) { 1372 range_base = chunks[i].base; 1373 range_end = chunks[i].end; 1374 1375 chunks[i].offset = blkctr; 1376 1377 /* Reset zlib for deflate */ 1378 if (hibernate_zlib_reset(hiber_info, 1) != Z_OK) 1379 return (1); 1380 1381 inaddr = range_base; 1382 1383 /* 1384 * For each range, loop through its phys mem region 1385 * and write out the chunks (the last chunk might be 1386 * smaller than the chunk size). 1387 */ 1388 while (inaddr < range_end) { 1389 out_remaining = PAGE_SIZE; 1390 while (out_remaining > 0 && inaddr < range_end) { 1391 1392 /* 1393 * Adjust for regions that are not evenly 1394 * divisible by PAGE_SIZE or overflowed 1395 * pages from the previous iteration. 1396 */ 1397 temp_inaddr = (inaddr & PAGE_MASK) + 1398 hibernate_copy_page; 1399 1400 /* Deflate from temp_inaddr to IO page */ 1401 if (inaddr != range_end) { 1402 pmap_kenter_pa(hibernate_temp_page, 1403 inaddr & PMAP_PA_MASK, VM_PROT_ALL); 1404 1405 /* XXX - not needed on all archs */ 1406 pmap_activate(curproc); 1407 1408 bcopy((caddr_t)hibernate_temp_page, 1409 (caddr_t)hibernate_copy_page, 1410 PAGE_SIZE); 1411 inaddr += hibernate_deflate(hiber_info, 1412 temp_inaddr, &out_remaining); 1413 } 1414 1415 if (out_remaining == 0) { 1416 /* Filled up the page */ 1417 nblocks = 1418 PAGE_SIZE / hiber_info->secsize; 1419 1420 if (hiber_info->io_func( 1421 hiber_info->device, 1422 blkctr, (vaddr_t)hibernate_io_page, 1423 PAGE_SIZE, HIB_W, 1424 hiber_info->io_page)) 1425 return (1); 1426 1427 blkctr += nblocks; 1428 } 1429 } 1430 } 1431 1432 if (inaddr != range_end) 1433 return (1); 1434 1435 /* 1436 * End of range. Round up to next secsize bytes 1437 * after finishing compress 1438 */ 1439 if (out_remaining == 0) 1440 out_remaining = PAGE_SIZE; 1441 1442 /* Finish compress */ 1443 hibernate_state->hib_stream.next_in = (caddr_t)inaddr; 1444 hibernate_state->hib_stream.avail_in = 0; 1445 hibernate_state->hib_stream.next_out = 1446 (caddr_t)hibernate_io_page + (PAGE_SIZE - out_remaining); 1447 hibernate_state->hib_stream.avail_out = out_remaining; 1448 1449 if (deflate(&hibernate_state->hib_stream, Z_FINISH) != 1450 Z_STREAM_END) 1451 return (1); 1452 1453 out_remaining = hibernate_state->hib_stream.avail_out; 1454 1455 used = PAGE_SIZE - out_remaining; 1456 nblocks = used / hiber_info->secsize; 1457 1458 /* Round up to next block if needed */ 1459 if (used % hiber_info->secsize != 0) 1460 nblocks ++; 1461 1462 /* Write final block(s) for this chunk */ 1463 if (hiber_info->io_func(hiber_info->device, blkctr, 1464 (vaddr_t)hibernate_io_page, nblocks*hiber_info->secsize, 1465 HIB_W, hiber_info->io_page)) 1466 return (1); 1467 1468 blkctr += nblocks; 1469 1470 offset = blkctr; 1471 chunks[i].compressed_size = (offset - chunks[i].offset) * 1472 hiber_info->secsize; 1473 } 1474 1475 return (0); 1476 } 1477 1478 /* 1479 * Reset the zlib stream state and allocate a new hiballoc area for either 1480 * inflate or deflate. This function is called once for each hibernate chunk. 1481 * Calling hiballoc_init multiple times is acceptable since the memory it is 1482 * provided is unmanaged memory (stolen). We use the memory provided to us 1483 * by the piglet allocated via the supplied hiber_info. 1484 */ 1485 int 1486 hibernate_zlib_reset(union hibernate_info *hiber_info, int deflate) 1487 { 1488 vaddr_t hibernate_zlib_start; 1489 size_t hibernate_zlib_size; 1490 char *pva = (char *)hiber_info->piglet_va; 1491 struct hibernate_zlib_state *hibernate_state; 1492 1493 hibernate_state = 1494 (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 1495 1496 if(!deflate) 1497 pva = (char *)((paddr_t)pva & (PIGLET_PAGE_MASK)); 1498 1499 hibernate_zlib_start = (vaddr_t)(pva + (28 * PAGE_SIZE)); 1500 hibernate_zlib_size = 80 * PAGE_SIZE; 1501 1502 bzero((caddr_t)hibernate_zlib_start, hibernate_zlib_size); 1503 bzero((caddr_t)hibernate_state, PAGE_SIZE); 1504 1505 /* Set up stream structure */ 1506 hibernate_state->hib_stream.zalloc = (alloc_func)hibernate_zlib_alloc; 1507 hibernate_state->hib_stream.zfree = (free_func)hibernate_zlib_free; 1508 1509 /* Initialize the hiballoc arena for zlib allocs/frees */ 1510 hiballoc_init(&hibernate_state->hiballoc_arena, 1511 (caddr_t)hibernate_zlib_start, hibernate_zlib_size); 1512 1513 if (deflate) { 1514 return deflateInit(&hibernate_state->hib_stream, 1515 Z_BEST_SPEED); 1516 } else 1517 return inflateInit(&hibernate_state->hib_stream); 1518 } 1519 1520 /* 1521 * Reads the hibernated memory image from disk, whose location and 1522 * size are recorded in hiber_info. Begin by reading the persisted 1523 * chunk table, which records the original chunk placement location 1524 * and compressed size for each. Next, allocate a pig region of 1525 * sufficient size to hold the compressed image. Next, read the 1526 * chunks into the pig area (calling hibernate_read_chunks to do this), 1527 * and finally, if all of the above succeeds, clear the hibernate signature. 1528 * The function will then return to hibernate_resume, which will proceed 1529 * to unpack the pig image to the correct place in memory. 1530 */ 1531 int 1532 hibernate_read_image(union hibernate_info *hiber_info) 1533 { 1534 size_t compressed_size, disk_size, chunktable_size, pig_sz; 1535 paddr_t image_start, image_end, pig_start, pig_end; 1536 struct hibernate_disk_chunk *chunks; 1537 daddr_t blkctr; 1538 vaddr_t chunktable = (vaddr_t)NULL; 1539 paddr_t piglet_chunktable = hiber_info->piglet_pa + 1540 HIBERNATE_CHUNK_SIZE; 1541 int i; 1542 1543 pmap_activate(curproc); 1544 1545 /* Calculate total chunk table size in disk blocks */ 1546 chunktable_size = HIBERNATE_CHUNK_TABLE_SIZE / hiber_info->secsize; 1547 1548 blkctr = hiber_info->sig_offset - chunktable_size - 1549 hiber_info->swap_offset; 1550 1551 chunktable = (vaddr_t)km_alloc(HIBERNATE_CHUNK_TABLE_SIZE, &kv_any, 1552 &kp_none, &kd_nowait); 1553 1554 if (!chunktable) 1555 return (1); 1556 1557 /* Read the chunktable from disk into the piglet chunktable */ 1558 for (i = 0; i < HIBERNATE_CHUNK_TABLE_SIZE; 1559 i += PAGE_SIZE, blkctr += PAGE_SIZE/hiber_info->secsize) { 1560 pmap_kenter_pa(chunktable + i, piglet_chunktable + i, 1561 VM_PROT_ALL); 1562 pmap_update(pmap_kernel()); 1563 hibernate_block_io(hiber_info, blkctr, PAGE_SIZE, 1564 chunktable + i, 0); 1565 } 1566 1567 blkctr = hiber_info->image_offset; 1568 compressed_size = 0; 1569 1570 chunks = (struct hibernate_disk_chunk *)chunktable; 1571 1572 for (i = 0; i < hiber_info->chunk_ctr; i++) 1573 compressed_size += chunks[i].compressed_size; 1574 1575 disk_size = compressed_size; 1576 1577 /* Allocate the pig area */ 1578 pig_sz = compressed_size + HIBERNATE_CHUNK_SIZE; 1579 if (uvm_pmr_alloc_pig(&pig_start, pig_sz) == ENOMEM) 1580 return (1); 1581 1582 pig_end = pig_start + pig_sz; 1583 1584 /* Calculate image extents. Pig image must end on a chunk boundary. */ 1585 image_end = pig_end & ~(HIBERNATE_CHUNK_SIZE - 1); 1586 image_start = pig_start; 1587 1588 image_start = image_end - disk_size; 1589 1590 hibernate_read_chunks(hiber_info, image_start, image_end, disk_size, 1591 chunks); 1592 1593 pmap_kremove(chunktable, PAGE_SIZE); 1594 pmap_update(pmap_kernel()); 1595 1596 /* Prepare the resume time pmap/page table */ 1597 hibernate_populate_resume_pt(hiber_info, image_start, image_end); 1598 1599 /* Read complete, clear the signature and return */ 1600 return hibernate_clear_signature(); 1601 } 1602 1603 /* 1604 * Read the hibernated memory chunks from disk (chunk information at this 1605 * point is stored in the piglet) into the pig area specified by 1606 * [pig_start .. pig_end]. Order the chunks so that the final chunk is the 1607 * only chunk with overlap possibilities. 1608 */ 1609 int 1610 hibernate_read_chunks(union hibernate_info *hib_info, paddr_t pig_start, 1611 paddr_t pig_end, size_t image_compr_size, 1612 struct hibernate_disk_chunk *chunks) 1613 { 1614 paddr_t img_index, img_cur, r1s, r1e, r2s, r2e; 1615 paddr_t copy_start, copy_end, piglet_cur; 1616 paddr_t piglet_base = hib_info->piglet_pa; 1617 paddr_t piglet_end = piglet_base + HIBERNATE_CHUNK_SIZE; 1618 daddr_t blkctr; 1619 size_t processed, compressed_size, read_size; 1620 int overlap, found, nchunks, nochunks = 0, nfchunks = 0, npchunks = 0; 1621 short *ochunks, *pchunks, *fchunks, i, j; 1622 vaddr_t tempva = (vaddr_t)NULL, hibernate_fchunk_area = (vaddr_t)NULL; 1623 1624 global_pig_start = pig_start; 1625 1626 /* XXX - dont need this on all archs */ 1627 pmap_activate(curproc); 1628 1629 /* 1630 * These mappings go into the resuming kernel's page table, and are 1631 * used only during image read. They dissappear from existence 1632 * when the suspended kernel is unpacked on top of us. 1633 */ 1634 tempva = (vaddr_t)km_alloc(2*PAGE_SIZE, &kv_any, &kp_none, &kd_nowait); 1635 if (!tempva) 1636 return (1); 1637 hibernate_fchunk_area = (vaddr_t)km_alloc(24*PAGE_SIZE, &kv_any, 1638 &kp_none, &kd_nowait); 1639 if (!hibernate_fchunk_area) 1640 return (1); 1641 1642 /* Final output chunk ordering VA */ 1643 fchunks = (short *)hibernate_fchunk_area; 1644 1645 /* Piglet chunk ordering VA */ 1646 pchunks = (short *)(hibernate_fchunk_area + (8*PAGE_SIZE)); 1647 1648 /* Final chunk ordering VA */ 1649 ochunks = (short *)(hibernate_fchunk_area + (16*PAGE_SIZE)); 1650 1651 /* Map the chunk ordering region */ 1652 for(i=0; i<24 ; i++) { 1653 pmap_kenter_pa(hibernate_fchunk_area + (i*PAGE_SIZE), 1654 piglet_base + ((4+i)*PAGE_SIZE), VM_PROT_ALL); 1655 pmap_update(pmap_kernel()); 1656 } 1657 1658 nchunks = hib_info->chunk_ctr; 1659 1660 /* Initially start all chunks as unplaced */ 1661 for (i = 0; i < nchunks; i++) 1662 chunks[i].flags = 0; 1663 1664 /* 1665 * Search the list for chunks that are outside the pig area. These 1666 * can be placed first in the final output list. 1667 */ 1668 for (i = 0; i < nchunks; i++) { 1669 if (chunks[i].end <= pig_start || chunks[i].base >= pig_end) { 1670 ochunks[nochunks] = i; 1671 fchunks[nfchunks] = i; 1672 nochunks++; 1673 nfchunks++; 1674 chunks[i].flags |= HIBERNATE_CHUNK_USED; 1675 } 1676 } 1677 1678 /* 1679 * Walk the ordering, place the chunks in ascending memory order. 1680 * Conflicts might arise, these are handled next. 1681 */ 1682 do { 1683 img_index = -1; 1684 found = 0; 1685 j = -1; 1686 for (i = 0; i < nchunks; i++) 1687 if (chunks[i].base < img_index && 1688 chunks[i].flags == 0 ) { 1689 j = i; 1690 img_index = chunks[i].base; 1691 } 1692 1693 if (j != -1) { 1694 found = 1; 1695 ochunks[nochunks] = j; 1696 nochunks++; 1697 chunks[j].flags |= HIBERNATE_CHUNK_PLACED; 1698 } 1699 } while (found); 1700 1701 img_index = pig_start; 1702 1703 /* 1704 * Identify chunk output conflicts (chunks whose pig load area 1705 * corresponds to their original memory placement location) 1706 */ 1707 for (i = 0; i < nochunks ; i++) { 1708 overlap = 0; 1709 r1s = img_index; 1710 r1e = img_index + chunks[ochunks[i]].compressed_size; 1711 r2s = chunks[ochunks[i]].base; 1712 r2e = chunks[ochunks[i]].end; 1713 1714 overlap = hibernate_check_overlap(r1s, r1e, r2s, r2e); 1715 if (overlap) 1716 chunks[ochunks[i]].flags |= HIBERNATE_CHUNK_CONFLICT; 1717 img_index += chunks[ochunks[i]].compressed_size; 1718 } 1719 1720 /* 1721 * Prepare the final output chunk list. Calculate an output 1722 * inflate strategy for overlapping chunks if needed. 1723 */ 1724 img_index = pig_start; 1725 for (i = 0; i < nochunks ; i++) { 1726 /* 1727 * If a conflict is detected, consume enough compressed 1728 * output chunks to fill the piglet 1729 */ 1730 if (chunks[ochunks[i]].flags & HIBERNATE_CHUNK_CONFLICT) { 1731 copy_start = piglet_base; 1732 copy_end = piglet_end; 1733 piglet_cur = piglet_base; 1734 npchunks = 0; 1735 j = i; 1736 1737 while (copy_start < copy_end && j < nochunks) { 1738 piglet_cur += 1739 chunks[ochunks[j]].compressed_size; 1740 pchunks[npchunks] = ochunks[j]; 1741 npchunks++; 1742 copy_start += 1743 chunks[ochunks[j]].compressed_size; 1744 img_index += chunks[ochunks[j]].compressed_size; 1745 i++; 1746 j++; 1747 } 1748 1749 piglet_cur = piglet_base; 1750 for (j = 0; j < npchunks; j++) { 1751 piglet_cur += 1752 chunks[pchunks[j]].compressed_size; 1753 fchunks[nfchunks] = pchunks[j]; 1754 chunks[pchunks[j]].flags |= 1755 HIBERNATE_CHUNK_USED; 1756 nfchunks++; 1757 } 1758 } else { 1759 /* 1760 * No conflict, chunk can be added without copying 1761 */ 1762 if ((chunks[ochunks[i]].flags & 1763 HIBERNATE_CHUNK_USED) == 0) { 1764 fchunks[nfchunks] = ochunks[i]; 1765 chunks[ochunks[i]].flags |= 1766 HIBERNATE_CHUNK_USED; 1767 nfchunks++; 1768 } 1769 img_index += chunks[ochunks[i]].compressed_size; 1770 } 1771 } 1772 1773 img_index = pig_start; 1774 for (i = 0; i < nfchunks; i++) { 1775 piglet_cur = piglet_base; 1776 img_index += chunks[fchunks[i]].compressed_size; 1777 } 1778 1779 img_cur = pig_start; 1780 1781 for (i = 0; i < nfchunks; i++) { 1782 blkctr = chunks[fchunks[i]].offset - hib_info->swap_offset; 1783 processed = 0; 1784 compressed_size = chunks[fchunks[i]].compressed_size; 1785 1786 while (processed < compressed_size) { 1787 pmap_kenter_pa(tempva, img_cur, VM_PROT_ALL); 1788 pmap_kenter_pa(tempva + PAGE_SIZE, img_cur+PAGE_SIZE, 1789 VM_PROT_ALL); 1790 pmap_update(pmap_kernel()); 1791 1792 if (compressed_size - processed >= PAGE_SIZE) 1793 read_size = PAGE_SIZE; 1794 else 1795 read_size = compressed_size - processed; 1796 1797 hibernate_block_io(hib_info, blkctr, read_size, 1798 tempva + (img_cur & PAGE_MASK), 0); 1799 1800 blkctr += (read_size / hib_info->secsize); 1801 1802 hibernate_flush(); 1803 pmap_kremove(tempva, PAGE_SIZE); 1804 pmap_kremove(tempva + PAGE_SIZE, PAGE_SIZE); 1805 processed += read_size; 1806 img_cur += read_size; 1807 } 1808 } 1809 1810 pmap_kremove(hibernate_fchunk_area, PAGE_SIZE); 1811 pmap_kremove((vaddr_t)pchunks, PAGE_SIZE); 1812 pmap_kremove((vaddr_t)fchunks, PAGE_SIZE); 1813 pmap_update(pmap_kernel()); 1814 1815 return (0); 1816 } 1817 1818 /* 1819 * Hibernating a machine comprises the following operations: 1820 * 1. Calculating this machine's hibernate_info information 1821 * 2. Allocating a piglet and saving the piglet's physaddr 1822 * 3. Calculating the memory chunks 1823 * 4. Writing the compressed chunks to disk 1824 * 5. Writing the chunk table 1825 * 6. Writing the signature block (hibernate_info) 1826 * 1827 * On most architectures, the function calling hibernate_suspend would 1828 * then power off the machine using some MD-specific implementation. 1829 */ 1830 int 1831 hibernate_suspend(void) 1832 { 1833 union hibernate_info hib_info; 1834 size_t swap_size; 1835 1836 /* 1837 * Calculate memory ranges, swap offsets, etc. 1838 * This also allocates a piglet whose physaddr is stored in 1839 * hib_info->piglet_pa and vaddr stored in hib_info->piglet_va 1840 */ 1841 if (get_hibernate_info(&hib_info, 1)) 1842 return (1); 1843 1844 swap_size = hib_info.image_size + hib_info.secsize + 1845 HIBERNATE_CHUNK_TABLE_SIZE; 1846 1847 if (uvm_swap_check_range(hib_info.device, swap_size)) { 1848 printf("insufficient swap space for hibernate\n"); 1849 return (1); 1850 } 1851 1852 pmap_kenter_pa(HIBERNATE_HIBALLOC_PAGE, HIBERNATE_HIBALLOC_PAGE, 1853 VM_PROT_ALL); 1854 pmap_activate(curproc); 1855 1856 /* Stash the piglet VA so we can free it in the resuming kernel */ 1857 global_piglet_va = hib_info.piglet_va; 1858 1859 if (hibernate_write_chunks(&hib_info)) 1860 return (1); 1861 1862 if (hibernate_write_chunktable(&hib_info)) 1863 return (1); 1864 1865 if (hibernate_write_signature(&hib_info)) 1866 return (1); 1867 1868 /* Allow the disk to settle */ 1869 delay(500000); 1870 1871 /* 1872 * Give the device-specific I/O function a notification that we're 1873 * done, and that it can clean up or shutdown as needed. 1874 */ 1875 hib_info.io_func(hib_info.device, 0, (vaddr_t)NULL, 0, 1876 HIB_DONE, hib_info.io_page); 1877 1878 return (0); 1879 } 1880 1881 /* 1882 * Free items allocated by hibernate_suspend() 1883 */ 1884 void 1885 hibernate_free(void) 1886 { 1887 if (global_piglet_va) 1888 uvm_pmr_free_piglet(global_piglet_va, 1889 3*HIBERNATE_CHUNK_SIZE); 1890 1891 if (hibernate_copy_page) 1892 pmap_kremove(hibernate_copy_page, PAGE_SIZE); 1893 if (hibernate_temp_page) 1894 pmap_kremove(hibernate_temp_page, PAGE_SIZE); 1895 1896 pmap_update(pmap_kernel()); 1897 1898 if (hibernate_copy_page) 1899 km_free((void *)hibernate_copy_page, PAGE_SIZE, 1900 &kv_any, &kp_none); 1901 if (hibernate_temp_page) 1902 km_free((void *)hibernate_temp_page, PAGE_SIZE, 1903 &kv_any, &kp_none); 1904 1905 global_piglet_va = 0; 1906 hibernate_copy_page = 0; 1907 hibernate_temp_page = 0; 1908 } 1909