1 /* $OpenBSD: subr_hibernate.c,v 1.35 2012/06/20 17:31:55 mlarkin Exp $ */ 2 3 /* 4 * Copyright (c) 2011 Ariane van der Steldt <ariane@stack.nl> 5 * Copyright (c) 2011 Mike Larkin <mlarkin@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <sys/hibernate.h> 21 #include <sys/malloc.h> 22 #include <sys/param.h> 23 #include <sys/tree.h> 24 #include <sys/types.h> 25 #include <sys/systm.h> 26 #include <sys/disklabel.h> 27 #include <sys/disk.h> 28 #include <sys/conf.h> 29 #include <sys/buf.h> 30 #include <sys/fcntl.h> 31 #include <sys/stat.h> 32 #include <uvm/uvm.h> 33 #include <machine/hibernate.h> 34 35 /* Temporary vaddr ranges used during hibernate */ 36 vaddr_t hibernate_temp_page; 37 vaddr_t hibernate_copy_page; 38 39 /* Hibernate info as read from disk during resume */ 40 union hibernate_info disk_hiber_info; 41 paddr_t global_pig_start; 42 vaddr_t global_piglet_va; 43 44 /* 45 * Hib alloc enforced alignment. 46 */ 47 #define HIB_ALIGN 8 /* bytes alignment */ 48 49 /* 50 * sizeof builtin operation, but with alignment constraint. 51 */ 52 #define HIB_SIZEOF(_type) roundup(sizeof(_type), HIB_ALIGN) 53 54 struct hiballoc_entry { 55 size_t hibe_use; 56 size_t hibe_space; 57 RB_ENTRY(hiballoc_entry) hibe_entry; 58 }; 59 60 /* 61 * Compare hiballoc entries based on the address they manage. 62 * 63 * Since the address is fixed, relative to struct hiballoc_entry, 64 * we just compare the hiballoc_entry pointers. 65 */ 66 static __inline int 67 hibe_cmp(struct hiballoc_entry *l, struct hiballoc_entry *r) 68 { 69 return l < r ? -1 : (l > r); 70 } 71 72 RB_PROTOTYPE(hiballoc_addr, hiballoc_entry, hibe_entry, hibe_cmp) 73 74 /* 75 * Given a hiballoc entry, return the address it manages. 76 */ 77 static __inline void * 78 hib_entry_to_addr(struct hiballoc_entry *entry) 79 { 80 caddr_t addr; 81 82 addr = (caddr_t)entry; 83 addr += HIB_SIZEOF(struct hiballoc_entry); 84 return addr; 85 } 86 87 /* 88 * Given an address, find the hiballoc that corresponds. 89 */ 90 static __inline struct hiballoc_entry* 91 hib_addr_to_entry(void *addr_param) 92 { 93 caddr_t addr; 94 95 addr = (caddr_t)addr_param; 96 addr -= HIB_SIZEOF(struct hiballoc_entry); 97 return (struct hiballoc_entry*)addr; 98 } 99 100 RB_GENERATE(hiballoc_addr, hiballoc_entry, hibe_entry, hibe_cmp) 101 102 /* 103 * Allocate memory from the arena. 104 * 105 * Returns NULL if no memory is available. 106 */ 107 void * 108 hib_alloc(struct hiballoc_arena *arena, size_t alloc_sz) 109 { 110 struct hiballoc_entry *entry, *new_entry; 111 size_t find_sz; 112 113 /* 114 * Enforce alignment of HIB_ALIGN bytes. 115 * 116 * Note that, because the entry is put in front of the allocation, 117 * 0-byte allocations are guaranteed a unique address. 118 */ 119 alloc_sz = roundup(alloc_sz, HIB_ALIGN); 120 121 /* 122 * Find an entry with hibe_space >= find_sz. 123 * 124 * If the root node is not large enough, we switch to tree traversal. 125 * Because all entries are made at the bottom of the free space, 126 * traversal from the end has a slightly better chance of yielding 127 * a sufficiently large space. 128 */ 129 find_sz = alloc_sz + HIB_SIZEOF(struct hiballoc_entry); 130 entry = RB_ROOT(&arena->hib_addrs); 131 if (entry != NULL && entry->hibe_space < find_sz) { 132 RB_FOREACH_REVERSE(entry, hiballoc_addr, &arena->hib_addrs) { 133 if (entry->hibe_space >= find_sz) 134 break; 135 } 136 } 137 138 /* 139 * Insufficient or too fragmented memory. 140 */ 141 if (entry == NULL) 142 return NULL; 143 144 /* 145 * Create new entry in allocated space. 146 */ 147 new_entry = (struct hiballoc_entry*)( 148 (caddr_t)hib_entry_to_addr(entry) + entry->hibe_use); 149 new_entry->hibe_space = entry->hibe_space - find_sz; 150 new_entry->hibe_use = alloc_sz; 151 152 /* 153 * Insert entry. 154 */ 155 if (RB_INSERT(hiballoc_addr, &arena->hib_addrs, new_entry) != NULL) 156 panic("hib_alloc: insert failure"); 157 entry->hibe_space = 0; 158 159 /* Return address managed by entry. */ 160 return hib_entry_to_addr(new_entry); 161 } 162 163 /* 164 * Free a pointer previously allocated from this arena. 165 * 166 * If addr is NULL, this will be silently accepted. 167 */ 168 void 169 hib_free(struct hiballoc_arena *arena, void *addr) 170 { 171 struct hiballoc_entry *entry, *prev; 172 173 if (addr == NULL) 174 return; 175 176 /* 177 * Derive entry from addr and check it is really in this arena. 178 */ 179 entry = hib_addr_to_entry(addr); 180 if (RB_FIND(hiballoc_addr, &arena->hib_addrs, entry) != entry) 181 panic("hib_free: freed item %p not in hib arena", addr); 182 183 /* 184 * Give the space in entry to its predecessor. 185 * 186 * If entry has no predecessor, change its used space into free space 187 * instead. 188 */ 189 prev = RB_PREV(hiballoc_addr, &arena->hib_addrs, entry); 190 if (prev != NULL && 191 (void *)((caddr_t)prev + HIB_SIZEOF(struct hiballoc_entry) + 192 prev->hibe_use + prev->hibe_space) == entry) { 193 /* Merge entry. */ 194 RB_REMOVE(hiballoc_addr, &arena->hib_addrs, entry); 195 prev->hibe_space += HIB_SIZEOF(struct hiballoc_entry) + 196 entry->hibe_use + entry->hibe_space; 197 } else { 198 /* Flip used memory to free space. */ 199 entry->hibe_space += entry->hibe_use; 200 entry->hibe_use = 0; 201 } 202 } 203 204 /* 205 * Initialize hiballoc. 206 * 207 * The allocator will manage memmory at ptr, which is len bytes. 208 */ 209 int 210 hiballoc_init(struct hiballoc_arena *arena, void *p_ptr, size_t p_len) 211 { 212 struct hiballoc_entry *entry; 213 caddr_t ptr; 214 size_t len; 215 216 RB_INIT(&arena->hib_addrs); 217 218 /* 219 * Hib allocator enforces HIB_ALIGN alignment. 220 * Fixup ptr and len. 221 */ 222 ptr = (caddr_t)roundup((vaddr_t)p_ptr, HIB_ALIGN); 223 len = p_len - ((size_t)ptr - (size_t)p_ptr); 224 len &= ~((size_t)HIB_ALIGN - 1); 225 226 /* 227 * Insufficient memory to be able to allocate and also do bookkeeping. 228 */ 229 if (len <= HIB_SIZEOF(struct hiballoc_entry)) 230 return ENOMEM; 231 232 /* 233 * Create entry describing space. 234 */ 235 entry = (struct hiballoc_entry*)ptr; 236 entry->hibe_use = 0; 237 entry->hibe_space = len - HIB_SIZEOF(struct hiballoc_entry); 238 RB_INSERT(hiballoc_addr, &arena->hib_addrs, entry); 239 240 return 0; 241 } 242 243 /* 244 * Zero all free memory. 245 */ 246 void 247 uvm_pmr_zero_everything(void) 248 { 249 struct uvm_pmemrange *pmr; 250 struct vm_page *pg; 251 int i; 252 253 uvm_lock_fpageq(); 254 TAILQ_FOREACH(pmr, &uvm.pmr_control.use, pmr_use) { 255 /* Zero single pages. */ 256 while ((pg = TAILQ_FIRST(&pmr->single[UVM_PMR_MEMTYPE_DIRTY])) 257 != NULL) { 258 uvm_pmr_remove(pmr, pg); 259 uvm_pagezero(pg); 260 atomic_setbits_int(&pg->pg_flags, PG_ZERO); 261 uvmexp.zeropages++; 262 uvm_pmr_insert(pmr, pg, 0); 263 } 264 265 /* Zero multi page ranges. */ 266 while ((pg = RB_ROOT(&pmr->size[UVM_PMR_MEMTYPE_DIRTY])) 267 != NULL) { 268 pg--; /* Size tree always has second page. */ 269 uvm_pmr_remove(pmr, pg); 270 for (i = 0; i < pg->fpgsz; i++) { 271 uvm_pagezero(&pg[i]); 272 atomic_setbits_int(&pg[i].pg_flags, PG_ZERO); 273 uvmexp.zeropages++; 274 } 275 uvm_pmr_insert(pmr, pg, 0); 276 } 277 } 278 uvm_unlock_fpageq(); 279 } 280 281 /* 282 * Mark all memory as dirty. 283 * 284 * Used to inform the system that the clean memory isn't clean for some 285 * reason, for example because we just came back from hibernate. 286 */ 287 void 288 uvm_pmr_dirty_everything(void) 289 { 290 struct uvm_pmemrange *pmr; 291 struct vm_page *pg; 292 int i; 293 294 uvm_lock_fpageq(); 295 TAILQ_FOREACH(pmr, &uvm.pmr_control.use, pmr_use) { 296 /* Dirty single pages. */ 297 while ((pg = TAILQ_FIRST(&pmr->single[UVM_PMR_MEMTYPE_ZERO])) 298 != NULL) { 299 uvm_pmr_remove(pmr, pg); 300 atomic_clearbits_int(&pg->pg_flags, PG_ZERO); 301 uvm_pmr_insert(pmr, pg, 0); 302 } 303 304 /* Dirty multi page ranges. */ 305 while ((pg = RB_ROOT(&pmr->size[UVM_PMR_MEMTYPE_ZERO])) 306 != NULL) { 307 pg--; /* Size tree always has second page. */ 308 uvm_pmr_remove(pmr, pg); 309 for (i = 0; i < pg->fpgsz; i++) 310 atomic_clearbits_int(&pg[i].pg_flags, PG_ZERO); 311 uvm_pmr_insert(pmr, pg, 0); 312 } 313 } 314 315 uvmexp.zeropages = 0; 316 uvm_unlock_fpageq(); 317 } 318 319 /* 320 * Allocate the highest address that can hold sz. 321 * 322 * sz in bytes. 323 */ 324 int 325 uvm_pmr_alloc_pig(paddr_t *addr, psize_t sz) 326 { 327 struct uvm_pmemrange *pmr; 328 struct vm_page *pig_pg, *pg; 329 330 /* 331 * Convert sz to pages, since that is what pmemrange uses internally. 332 */ 333 sz = atop(round_page(sz)); 334 335 uvm_lock_fpageq(); 336 337 TAILQ_FOREACH(pmr, &uvm.pmr_control.use, pmr_use) { 338 RB_FOREACH_REVERSE(pig_pg, uvm_pmr_addr, &pmr->addr) { 339 if (pig_pg->fpgsz >= sz) { 340 goto found; 341 } 342 } 343 } 344 345 /* 346 * Allocation failure. 347 */ 348 uvm_unlock_fpageq(); 349 return ENOMEM; 350 351 found: 352 /* Remove page from freelist. */ 353 uvm_pmr_remove_size(pmr, pig_pg); 354 pig_pg->fpgsz -= sz; 355 pg = pig_pg + pig_pg->fpgsz; 356 if (pig_pg->fpgsz == 0) 357 uvm_pmr_remove_addr(pmr, pig_pg); 358 else 359 uvm_pmr_insert_size(pmr, pig_pg); 360 361 uvmexp.free -= sz; 362 *addr = VM_PAGE_TO_PHYS(pg); 363 364 /* 365 * Update pg flags. 366 * 367 * Note that we trash the sz argument now. 368 */ 369 while (sz > 0) { 370 KASSERT(pg->pg_flags & PQ_FREE); 371 372 atomic_clearbits_int(&pg->pg_flags, 373 PG_PMAP0|PG_PMAP1|PG_PMAP2|PG_PMAP3); 374 375 if (pg->pg_flags & PG_ZERO) 376 uvmexp.zeropages -= sz; 377 atomic_clearbits_int(&pg->pg_flags, 378 PG_ZERO|PQ_FREE); 379 380 pg->uobject = NULL; 381 pg->uanon = NULL; 382 pg->pg_version++; 383 384 /* 385 * Next. 386 */ 387 pg++; 388 sz--; 389 } 390 391 /* Return. */ 392 uvm_unlock_fpageq(); 393 return 0; 394 } 395 396 /* 397 * Allocate a piglet area. 398 * 399 * This is as low as possible. 400 * Piglets are aligned. 401 * 402 * sz and align in bytes. 403 * 404 * The call will sleep for the pagedaemon to attempt to free memory. 405 * The pagedaemon may decide its not possible to free enough memory, causing 406 * the allocation to fail. 407 */ 408 int 409 uvm_pmr_alloc_piglet(vaddr_t *va, paddr_t *pa, vsize_t sz, paddr_t align) 410 { 411 paddr_t pg_addr, piglet_addr; 412 struct uvm_pmemrange *pmr; 413 struct vm_page *pig_pg, *pg; 414 struct pglist pageq; 415 int pdaemon_woken; 416 vaddr_t piglet_va; 417 418 KASSERT((align & (align - 1)) == 0); 419 pdaemon_woken = 0; /* Didn't wake the pagedaemon. */ 420 421 /* 422 * Fixup arguments: align must be at least PAGE_SIZE, 423 * sz will be converted to pagecount, since that is what 424 * pmemrange uses internally. 425 */ 426 if (align < PAGE_SIZE) 427 align = PAGE_SIZE; 428 sz = round_page(sz); 429 430 uvm_lock_fpageq(); 431 432 TAILQ_FOREACH_REVERSE(pmr, &uvm.pmr_control.use, uvm_pmemrange_use, 433 pmr_use) { 434 retry: 435 /* 436 * Search for a range with enough space. 437 * Use the address tree, to ensure the range is as low as 438 * possible. 439 */ 440 RB_FOREACH(pig_pg, uvm_pmr_addr, &pmr->addr) { 441 pg_addr = VM_PAGE_TO_PHYS(pig_pg); 442 piglet_addr = (pg_addr + (align - 1)) & ~(align - 1); 443 444 if (atop(pg_addr) + pig_pg->fpgsz >= 445 atop(piglet_addr) + atop(sz)) 446 goto found; 447 } 448 } 449 450 /* 451 * Try to coerse the pagedaemon into freeing memory 452 * for the piglet. 453 * 454 * pdaemon_woken is set to prevent the code from 455 * falling into an endless loop. 456 */ 457 if (!pdaemon_woken) { 458 pdaemon_woken = 1; 459 if (uvm_wait_pla(ptoa(pmr->low), ptoa(pmr->high) - 1, 460 sz, UVM_PLA_FAILOK) == 0) 461 goto retry; 462 } 463 464 /* Return failure. */ 465 uvm_unlock_fpageq(); 466 return ENOMEM; 467 468 found: 469 /* 470 * Extract piglet from pigpen. 471 */ 472 TAILQ_INIT(&pageq); 473 uvm_pmr_extract_range(pmr, pig_pg, 474 atop(piglet_addr), atop(piglet_addr) + atop(sz), &pageq); 475 476 *pa = piglet_addr; 477 uvmexp.free -= atop(sz); 478 479 /* 480 * Update pg flags. 481 * 482 * Note that we trash the sz argument now. 483 */ 484 TAILQ_FOREACH(pg, &pageq, pageq) { 485 KASSERT(pg->pg_flags & PQ_FREE); 486 487 atomic_clearbits_int(&pg->pg_flags, 488 PG_PMAP0|PG_PMAP1|PG_PMAP2|PG_PMAP3); 489 490 if (pg->pg_flags & PG_ZERO) 491 uvmexp.zeropages--; 492 atomic_clearbits_int(&pg->pg_flags, 493 PG_ZERO|PQ_FREE); 494 495 pg->uobject = NULL; 496 pg->uanon = NULL; 497 pg->pg_version++; 498 } 499 500 uvm_unlock_fpageq(); 501 502 /* 503 * Now allocate a va. 504 * Use direct mappings for the pages. 505 */ 506 507 piglet_va = *va = (vaddr_t)km_alloc(sz, &kv_any, &kp_none, &kd_waitok); 508 if (!piglet_va) { 509 uvm_pglistfree(&pageq); 510 return ENOMEM; 511 } 512 513 /* 514 * Map piglet to va. 515 */ 516 TAILQ_FOREACH(pg, &pageq, pageq) { 517 pmap_kenter_pa(piglet_va, VM_PAGE_TO_PHYS(pg), UVM_PROT_RW); 518 piglet_va += PAGE_SIZE; 519 } 520 pmap_update(pmap_kernel()); 521 522 return 0; 523 } 524 525 /* 526 * Free a piglet area. 527 */ 528 void 529 uvm_pmr_free_piglet(vaddr_t va, vsize_t sz) 530 { 531 paddr_t pa; 532 struct vm_page *pg; 533 534 /* 535 * Fix parameters. 536 */ 537 sz = round_page(sz); 538 539 /* 540 * Find the first page in piglet. 541 * Since piglets are contiguous, the first pg is all we need. 542 */ 543 if (!pmap_extract(pmap_kernel(), va, &pa)) 544 panic("uvm_pmr_free_piglet: piglet 0x%lx has no pages", va); 545 pg = PHYS_TO_VM_PAGE(pa); 546 if (pg == NULL) 547 panic("uvm_pmr_free_piglet: unmanaged page 0x%lx", pa); 548 549 /* 550 * Unmap. 551 */ 552 pmap_kremove(va, sz); 553 pmap_update(pmap_kernel()); 554 555 /* 556 * Free the physical and virtual memory. 557 */ 558 uvm_pmr_freepages(pg, atop(sz)); 559 km_free((void *)va, sz, &kv_any, &kp_none); 560 } 561 562 /* 563 * Physmem RLE compression support. 564 * 565 * Given a physical page address, it will return the number of pages 566 * starting at the address, that are free. Clamps to the number of pages in 567 * HIBERNATE_CHUNK_SIZE. Returns 0 if the page at addr is not free. 568 */ 569 int 570 uvm_page_rle(paddr_t addr) 571 { 572 struct vm_page *pg, *pg_end; 573 struct vm_physseg *vmp; 574 int pseg_idx, off_idx; 575 576 pseg_idx = vm_physseg_find(atop(addr), &off_idx); 577 if (pseg_idx == -1) 578 return 0; 579 580 vmp = &vm_physmem[pseg_idx]; 581 pg = &vmp->pgs[off_idx]; 582 if (!(pg->pg_flags & PQ_FREE)) 583 return 0; 584 585 /* 586 * Search for the first non-free page after pg. 587 * Note that the page may not be the first page in a free pmemrange, 588 * therefore pg->fpgsz cannot be used. 589 */ 590 for (pg_end = pg; pg_end <= vmp->lastpg && 591 (pg_end->pg_flags & PQ_FREE) == PQ_FREE; pg_end++) 592 ; 593 return min((pg_end - pg), HIBERNATE_CHUNK_SIZE/PAGE_SIZE); 594 } 595 596 /* 597 * Fills out the hibernate_info union pointed to by hiber_info 598 * with information about this machine (swap signature block 599 * offsets, number of memory ranges, kernel in use, etc) 600 */ 601 int 602 get_hibernate_info(union hibernate_info *hiber_info, int suspend) 603 { 604 int chunktable_size; 605 struct disklabel dl; 606 char err_string[128], *dl_ret; 607 608 /* Determine I/O function to use */ 609 hiber_info->io_func = get_hibernate_io_function(); 610 if (hiber_info->io_func == NULL) 611 return (1); 612 613 /* Calculate hibernate device */ 614 hiber_info->device = swdevt[0].sw_dev; 615 616 /* Read disklabel (used to calculate signature and image offsets) */ 617 dl_ret = disk_readlabel(&dl, hiber_info->device, err_string, 128); 618 619 if (dl_ret) { 620 printf("Hibernate error reading disklabel: %s\n", dl_ret); 621 return (1); 622 } 623 624 hiber_info->secsize = dl.d_secsize; 625 626 /* Make sure the signature can fit in one block */ 627 KASSERT(sizeof(union hibernate_info)/hiber_info->secsize == 1); 628 629 /* Calculate swap offset from start of disk */ 630 hiber_info->swap_offset = dl.d_partitions[1].p_offset; 631 632 /* Calculate signature block location */ 633 hiber_info->sig_offset = dl.d_partitions[1].p_offset + 634 dl.d_partitions[1].p_size - 635 sizeof(union hibernate_info)/hiber_info->secsize; 636 637 chunktable_size = HIBERNATE_CHUNK_TABLE_SIZE / hiber_info->secsize; 638 639 /* Stash kernel version information */ 640 bzero(&hiber_info->kernel_version, 128); 641 bcopy(version, &hiber_info->kernel_version, 642 min(strlen(version), sizeof(hiber_info->kernel_version)-1)); 643 644 if (suspend) { 645 /* Allocate piglet region */ 646 if (uvm_pmr_alloc_piglet(&hiber_info->piglet_va, 647 &hiber_info->piglet_pa, HIBERNATE_CHUNK_SIZE*3, 648 HIBERNATE_CHUNK_SIZE)) { 649 printf("Hibernate failed to allocate the piglet\n"); 650 return (1); 651 } 652 hiber_info->io_page = (void *)hiber_info->piglet_va; 653 } else { 654 /* 655 * Resuming kernels use a regular I/O page since we won't 656 * have access to the suspended kernel's piglet VA at this 657 * point. No need to free this I/O page as it will vanish 658 * as part of the resume. 659 */ 660 hiber_info->io_page = malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT); 661 if (!hiber_info->io_page) 662 return (1); 663 } 664 665 666 /* 667 * Initialize of the hibernate IO function (for drivers which 668 * need that) 669 */ 670 if (hiber_info->io_func(hiber_info->device, 0, 671 (vaddr_t)NULL, 0, HIB_INIT, hiber_info->io_page)) 672 goto fail; 673 674 if (get_hibernate_info_md(hiber_info)) 675 goto fail; 676 677 /* Calculate memory image location */ 678 hiber_info->image_offset = dl.d_partitions[1].p_offset + 679 dl.d_partitions[1].p_size - 680 (hiber_info->image_size / hiber_info->secsize) - 681 sizeof(union hibernate_info)/hiber_info->secsize - 682 chunktable_size; 683 684 return (0); 685 fail: 686 if (suspend) 687 uvm_pmr_free_piglet(hiber_info->piglet_va, HIBERNATE_CHUNK_SIZE*3); 688 689 return (1); 690 } 691 692 /* 693 * Allocate nitems*size bytes from the hiballoc area presently in use 694 */ 695 void 696 *hibernate_zlib_alloc(void *unused, int nitems, int size) 697 { 698 struct hibernate_zlib_state *hibernate_state; 699 700 hibernate_state = (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 701 702 return hib_alloc(&hibernate_state->hiballoc_arena, nitems*size); 703 } 704 705 /* 706 * Free the memory pointed to by addr in the hiballoc area presently in 707 * use 708 */ 709 void 710 hibernate_zlib_free(void *unused, void *addr) 711 { 712 struct hibernate_zlib_state *hibernate_state; 713 714 hibernate_state = (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 715 716 hib_free(&hibernate_state->hiballoc_arena, addr); 717 } 718 719 /* 720 * Inflate size bytes from src into dest, skipping any pages in 721 * [src..dest] that are special (see hibernate_inflate_skip) 722 * 723 * This function executes while using the resume-time stack 724 * and pmap, and therefore cannot use ddb/printf/etc. Doing so 725 * will likely hang or reset the machine. 726 */ 727 void 728 hibernate_inflate(union hibernate_info *hiber_info, paddr_t dest, 729 paddr_t src, size_t size) 730 { 731 int i, rle; 732 struct hibernate_zlib_state *hibernate_state; 733 734 hibernate_state = (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 735 736 hibernate_state->hib_stream.next_in = (char *)src; 737 hibernate_state->hib_stream.avail_in = size; 738 739 do { 740 /* Flush cache and TLB */ 741 hibernate_flush(); 742 743 do { 744 /* Read RLE code */ 745 hibernate_state->hib_stream.next_out = (char *)&rle; 746 hibernate_state->hib_stream.avail_out = sizeof(rle); 747 748 i = inflate(&hibernate_state->hib_stream, Z_FULL_FLUSH); 749 if (i != Z_OK && i != Z_STREAM_END) { 750 /* 751 * XXX - this will likely reboot/hang most machines, 752 * but there's not much else we can do here. 753 */ 754 panic("inflate rle error"); 755 } 756 757 /* Sanity check what RLE value we got */ 758 if (rle > HIBERNATE_CHUNK_SIZE/PAGE_SIZE || rle < 0) 759 panic("inflate rle error 3"); 760 761 if (rle != 0) 762 dest += (rle * PAGE_SIZE); 763 if (i == Z_STREAM_END) 764 goto next_page; 765 766 } while (rle != 0); 767 768 /* 769 * Is this a special page? If yes, redirect the 770 * inflate output to a scratch page (eg, discard it) 771 */ 772 if (hibernate_inflate_skip(hiber_info, dest)) { 773 hibernate_enter_resume_mapping( 774 HIBERNATE_INFLATE_PAGE, 775 HIBERNATE_INFLATE_PAGE, 0); 776 } else { 777 hibernate_enter_resume_mapping( 778 HIBERNATE_INFLATE_PAGE, dest, 0); 779 } 780 781 hibernate_flush(); 782 783 /* Set up the stream for inflate */ 784 hibernate_state->hib_stream.next_out = 785 (char *)HIBERNATE_INFLATE_PAGE; 786 hibernate_state->hib_stream.avail_out = PAGE_SIZE; 787 788 /* Process next block of data */ 789 i = inflate(&hibernate_state->hib_stream, Z_PARTIAL_FLUSH); 790 if (i != Z_OK && i != Z_STREAM_END) { 791 /* 792 * XXX - this will likely reboot/hang most machines, 793 * but there's not much else we can do here. 794 */ 795 796 panic("inflate error"); 797 } 798 799 next_page: 800 dest += PAGE_SIZE - hibernate_state->hib_stream.avail_out; 801 } while (i != Z_STREAM_END); 802 } 803 804 /* 805 * deflate from src into the I/O page, up to 'remaining' bytes 806 * 807 * Returns number of input bytes consumed, and may reset 808 * the 'remaining' parameter if not all the output space was consumed 809 * (this information is needed to know how much to write to disk 810 */ 811 size_t 812 hibernate_deflate(union hibernate_info *hiber_info, paddr_t src, 813 size_t *remaining) 814 { 815 vaddr_t hibernate_io_page = hiber_info->piglet_va + PAGE_SIZE; 816 struct hibernate_zlib_state *hibernate_state; 817 818 hibernate_state = (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 819 820 /* Set up the stream for deflate */ 821 hibernate_state->hib_stream.next_in = (caddr_t)src; 822 hibernate_state->hib_stream.avail_in = PAGE_SIZE - (src & PAGE_MASK); 823 hibernate_state->hib_stream.next_out = (caddr_t)hibernate_io_page + 824 (PAGE_SIZE - *remaining); 825 hibernate_state->hib_stream.avail_out = *remaining; 826 827 /* Process next block of data */ 828 if (deflate(&hibernate_state->hib_stream, Z_PARTIAL_FLUSH) != Z_OK) 829 panic("hibernate zlib deflate error"); 830 831 /* Update pointers and return number of bytes consumed */ 832 *remaining = hibernate_state->hib_stream.avail_out; 833 return (PAGE_SIZE - (src & PAGE_MASK)) - 834 hibernate_state->hib_stream.avail_in; 835 } 836 837 /* 838 * Write the hibernation information specified in hiber_info 839 * to the location in swap previously calculated (last block of 840 * swap), called the "signature block". 841 * 842 * Write the memory chunk table to the area in swap immediately 843 * preceding the signature block. 844 */ 845 int 846 hibernate_write_signature(union hibernate_info *hiber_info) 847 { 848 /* Write hibernate info to disk */ 849 return (hiber_info->io_func(hiber_info->device, hiber_info->sig_offset, 850 (vaddr_t)hiber_info, hiber_info->secsize, HIB_W, 851 hiber_info->io_page)); 852 } 853 854 /* 855 * Write the memory chunk table to the area in swap immediately 856 * preceding the signature block. The chunk table is stored 857 * in the piglet when this function is called. 858 */ 859 int 860 hibernate_write_chunktable(union hibernate_info *hiber_info) 861 { 862 struct hibernate_disk_chunk *chunks; 863 vaddr_t hibernate_chunk_table_start; 864 size_t hibernate_chunk_table_size; 865 daddr_t chunkbase; 866 int i; 867 868 hibernate_chunk_table_size = HIBERNATE_CHUNK_TABLE_SIZE; 869 870 chunkbase = hiber_info->sig_offset - 871 (hibernate_chunk_table_size / hiber_info->secsize); 872 873 hibernate_chunk_table_start = hiber_info->piglet_va + 874 HIBERNATE_CHUNK_SIZE; 875 876 chunks = (struct hibernate_disk_chunk *)(hiber_info->piglet_va + 877 HIBERNATE_CHUNK_SIZE); 878 879 /* Write chunk table */ 880 for (i = 0; i < hibernate_chunk_table_size; i += MAXPHYS) { 881 if (hiber_info->io_func(hiber_info->device, 882 chunkbase + (i/hiber_info->secsize), 883 (vaddr_t)(hibernate_chunk_table_start + i), 884 MAXPHYS, HIB_W, hiber_info->io_page)) 885 return (1); 886 } 887 888 return (0); 889 } 890 891 /* 892 * Write an empty hiber_info to the swap signature block, which is 893 * guaranteed to not match any valid hiber_info. 894 */ 895 int 896 hibernate_clear_signature(void) 897 { 898 union hibernate_info blank_hiber_info; 899 union hibernate_info hiber_info; 900 901 /* Zero out a blank hiber_info */ 902 bzero(&blank_hiber_info, sizeof(hiber_info)); 903 904 if (get_hibernate_info(&hiber_info, 0)) 905 return (1); 906 907 /* Write (zeroed) hibernate info to disk */ 908 /* XXX - use regular kernel write routine for this */ 909 if (hiber_info.io_func(hiber_info.device, hiber_info.sig_offset, 910 (vaddr_t)&blank_hiber_info, hiber_info.secsize, HIB_W, 911 hiber_info.io_page)) 912 panic("error hibernate write 6"); 913 914 return (0); 915 } 916 917 /* 918 * Check chunk range overlap when calculating whether or not to copy a 919 * compressed chunk to the piglet area before decompressing. 920 * 921 * returns zero if the ranges do not overlap, non-zero otherwise. 922 */ 923 int 924 hibernate_check_overlap(paddr_t r1s, paddr_t r1e, paddr_t r2s, paddr_t r2e) 925 { 926 /* case A : end of r1 overlaps start of r2 */ 927 if (r1s < r2s && r1e > r2s) 928 return (1); 929 930 /* case B : r1 entirely inside r2 */ 931 if (r1s >= r2s && r1e <= r2e) 932 return (1); 933 934 /* case C : r2 entirely inside r1 */ 935 if (r2s >= r1s && r2e <= r1e) 936 return (1); 937 938 /* case D : end of r2 overlaps start of r1 */ 939 if (r2s < r1s && r2e > r1s) 940 return (1); 941 942 return (0); 943 } 944 945 /* 946 * Compare two hibernate_infos to determine if they are the same (eg, 947 * we should be performing a hibernate resume on this machine. 948 * Not all fields are checked - just enough to verify that the machine 949 * has the same memory configuration and kernel as the one that 950 * wrote the signature previously. 951 */ 952 int 953 hibernate_compare_signature(union hibernate_info *mine, 954 union hibernate_info *disk) 955 { 956 u_int i; 957 958 if (mine->nranges != disk->nranges) 959 return (1); 960 961 if (strcmp(mine->kernel_version, disk->kernel_version) != 0) 962 return (1); 963 964 for (i = 0; i < mine->nranges; i++) { 965 if ((mine->ranges[i].base != disk->ranges[i].base) || 966 (mine->ranges[i].end != disk->ranges[i].end) ) 967 return (1); 968 } 969 970 return (0); 971 } 972 973 /* 974 * Reads read_size bytes from the hibernate device specified in 975 * hib_info at offset blkctr. Output is placed into the vaddr specified 976 * at dest. 977 * 978 * Separate offsets and pages are used to handle misaligned reads (reads 979 * that span a page boundary). 980 * 981 * blkctr specifies a relative offset (relative to the start of swap), 982 * not an absolute disk offset 983 * 984 */ 985 int 986 hibernate_read_block(union hibernate_info *hib_info, daddr_t blkctr, 987 size_t read_size, vaddr_t dest) 988 { 989 struct buf *bp; 990 struct bdevsw *bdsw; 991 int error; 992 993 bp = geteblk(read_size); 994 bdsw = &bdevsw[major(hib_info->device)]; 995 996 error = (*bdsw->d_open)(hib_info->device, FREAD, S_IFCHR, curproc); 997 if (error) { 998 printf("hibernate_read_block open failed\n"); 999 return (1); 1000 } 1001 1002 bp->b_bcount = read_size; 1003 bp->b_blkno = blkctr; 1004 CLR(bp->b_flags, B_READ | B_WRITE | B_DONE); 1005 SET(bp->b_flags, B_BUSY | B_READ | B_RAW); 1006 bp->b_dev = hib_info->device; 1007 bp->b_cylinder = 0; 1008 (*bdsw->d_strategy)(bp); 1009 1010 error = biowait(bp); 1011 if (error) { 1012 printf("hibernate_read_block biowait failed %d\n", error); 1013 error = (*bdsw->d_close)(hib_info->device, 0, S_IFCHR, 1014 curproc); 1015 if (error) 1016 printf("hibernate_read_block error close failed\n"); 1017 return (1); 1018 } 1019 1020 error = (*bdsw->d_close)(hib_info->device, FREAD, S_IFCHR, curproc); 1021 if (error) { 1022 printf("hibernate_read_block close failed\n"); 1023 return (1); 1024 } 1025 1026 bcopy(bp->b_data, (caddr_t)dest, read_size); 1027 1028 bp->b_flags |= B_INVAL; 1029 brelse(bp); 1030 1031 return (0); 1032 } 1033 1034 /* 1035 * Reads the signature block from swap, checks against the current machine's 1036 * information. If the information matches, perform a resume by reading the 1037 * saved image into the pig area, and unpacking. 1038 */ 1039 void 1040 hibernate_resume(void) 1041 { 1042 union hibernate_info hiber_info; 1043 int s; 1044 1045 /* Get current running machine's hibernate info */ 1046 bzero(&hiber_info, sizeof(hiber_info)); 1047 if (get_hibernate_info(&hiber_info, 0)) 1048 return; 1049 1050 /* Read hibernate info from disk */ 1051 s = splbio(); 1052 1053 /* XXX use regular kernel read routine here */ 1054 if (hiber_info.io_func(hiber_info.device, hiber_info.sig_offset, 1055 (vaddr_t)&disk_hiber_info, hiber_info.secsize, HIB_R, 1056 hiber_info.io_page)) 1057 panic("error in hibernate read"); 1058 1059 /* 1060 * If on-disk and in-memory hibernate signatures match, 1061 * this means we should do a resume from hibernate. 1062 */ 1063 if (hibernate_compare_signature(&hiber_info, &disk_hiber_info)) 1064 return; 1065 1066 uvm_pmr_zero_everything(); 1067 1068 /* Read the image from disk into the image (pig) area */ 1069 if (hibernate_read_image(&disk_hiber_info)) 1070 goto fail; 1071 1072 /* Point of no return ... */ 1073 1074 disable_intr(); 1075 cold = 1; 1076 1077 pmap_kenter_pa(HIBERNATE_HIBALLOC_PAGE, HIBERNATE_HIBALLOC_PAGE, VM_PROT_ALL); 1078 pmap_activate(curproc); 1079 1080 /* Switch stacks */ 1081 hibernate_switch_stack_machdep(); 1082 1083 /* 1084 * Image is now in high memory (pig area), copy to correct location 1085 * in memory. We'll eventually end up copying on top of ourself, but 1086 * we are assured the kernel code here is the same between the 1087 * hibernated and resuming kernel, and we are running on our own 1088 * stack, so the overwrite is ok. 1089 */ 1090 hibernate_unpack_image(&disk_hiber_info); 1091 1092 /* 1093 * Resume the loaded kernel by jumping to the MD resume vector. 1094 * We won't be returning from this call. 1095 */ 1096 hibernate_resume_machdep(); 1097 1098 fail: 1099 printf("Unable to resume hibernated image\n"); 1100 } 1101 1102 /* 1103 * Unpack image from pig area to original location by looping through the 1104 * list of output chunks in the order they should be restored (fchunks). 1105 * This ordering is used to avoid having inflate overwrite a chunk in the 1106 * middle of processing that chunk. This will, of course, happen during the 1107 * final output chunk, where we copy the chunk to the piglet area first, 1108 * before inflating. 1109 */ 1110 void 1111 hibernate_unpack_image(union hibernate_info *hiber_info) 1112 { 1113 struct hibernate_disk_chunk *chunks; 1114 union hibernate_info local_hiber_info; 1115 paddr_t image_cur = global_pig_start; 1116 int *fchunks, i; 1117 char *pva = (char *)hiber_info->piglet_va; 1118 struct hibernate_zlib_state *hibernate_state; 1119 1120 hibernate_state = (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 1121 1122 /* Mask off based on arch-specific piglet page size */ 1123 pva = (char *)((paddr_t)pva & (PIGLET_PAGE_MASK)); 1124 fchunks = (int *)(pva + (6 * PAGE_SIZE)); 1125 1126 chunks = (struct hibernate_disk_chunk *)(pva + HIBERNATE_CHUNK_SIZE); 1127 1128 /* Can't use hiber_info that's passed in after here */ 1129 bcopy(hiber_info, &local_hiber_info, sizeof(union hibernate_info)); 1130 1131 hibernate_activate_resume_pt_machdep(); 1132 1133 for (i = 0; i < local_hiber_info.chunk_ctr; i++) { 1134 /* Reset zlib for inflate */ 1135 if (hibernate_zlib_reset(&local_hiber_info, 0) != Z_OK) 1136 panic("hibernate failed to reset zlib for inflate"); 1137 1138 /* 1139 * If there is a conflict, copy the chunk to the piglet area 1140 * before unpacking it to its original location. 1141 */ 1142 if ((chunks[fchunks[i]].flags & HIBERNATE_CHUNK_CONFLICT) == 0) 1143 hibernate_inflate(&local_hiber_info, 1144 chunks[fchunks[i]].base, image_cur, 1145 chunks[fchunks[i]].compressed_size); 1146 else { 1147 bcopy((caddr_t)image_cur, 1148 pva + (HIBERNATE_CHUNK_SIZE * 2), 1149 chunks[fchunks[i]].compressed_size); 1150 hibernate_inflate(&local_hiber_info, 1151 chunks[fchunks[i]].base, 1152 (vaddr_t)(pva + (HIBERNATE_CHUNK_SIZE * 2)), 1153 chunks[fchunks[i]].compressed_size); 1154 } 1155 image_cur += chunks[fchunks[i]].compressed_size; 1156 } 1157 } 1158 1159 /* 1160 * Write a compressed version of this machine's memory to disk, at the 1161 * precalculated swap offset: 1162 * 1163 * end of swap - signature block size - chunk table size - memory size 1164 * 1165 * The function begins by looping through each phys mem range, cutting each 1166 * one into 4MB chunks. These chunks are then compressed individually 1167 * and written out to disk, in phys mem order. Some chunks might compress 1168 * more than others, and for this reason, each chunk's size is recorded 1169 * in the chunk table, which is written to disk after the image has 1170 * properly been compressed and written (in hibernate_write_chunktable). 1171 * 1172 * When this function is called, the machine is nearly suspended - most 1173 * devices are quiesced/suspended, interrupts are off, and cold has 1174 * been set. This means that there can be no side effects once the 1175 * write has started, and the write function itself can also have no 1176 * side effects. 1177 * 1178 * This function uses the piglet area during this process as follows: 1179 * 1180 * offset from piglet base use 1181 * ----------------------- -------------------- 1182 * 0 i/o allocation area 1183 * PAGE_SIZE i/o write area 1184 * 2*PAGE_SIZE temp/scratch page 1185 * 3*PAGE_SIZE temp/scratch page 1186 * 4*PAGE_SIZE hiballoc arena 1187 * 5*PAGE_SIZE to 85*PAGE_SIZE zlib deflate area 1188 * ... 1189 * HIBERNATE_CHUNK_SIZE chunk table temporary area 1190 * 1191 * Some transient piglet content is saved as part of deflate, 1192 * but it is irrelevant during resume as it will be repurposed 1193 * at that time for other things. 1194 */ 1195 int 1196 hibernate_write_chunks(union hibernate_info *hiber_info) 1197 { 1198 paddr_t range_base, range_end, inaddr, temp_inaddr; 1199 size_t nblocks, out_remaining, used; 1200 struct hibernate_disk_chunk *chunks; 1201 vaddr_t hibernate_io_page = hiber_info->piglet_va + PAGE_SIZE; 1202 daddr_t blkctr = hiber_info->image_offset, offset = 0; 1203 int i, rle; 1204 struct hibernate_zlib_state *hibernate_state; 1205 1206 hibernate_state = (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 1207 1208 hiber_info->chunk_ctr = 0; 1209 1210 /* 1211 * Allocate VA for the temp and copy page. 1212 * These will becomee part of the suspended kernel and will 1213 * be freed in hibernate_free, upon resume. 1214 */ 1215 hibernate_temp_page = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, 1216 &kp_none, &kd_nowait); 1217 if (!hibernate_temp_page) 1218 return (1); 1219 1220 hibernate_copy_page = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, 1221 &kp_none, &kd_nowait); 1222 if (!hibernate_copy_page) 1223 return (1); 1224 1225 pmap_kenter_pa(hibernate_copy_page, 1226 (hiber_info->piglet_pa + 3*PAGE_SIZE), VM_PROT_ALL); 1227 1228 /* XXX - not needed on all archs */ 1229 pmap_activate(curproc); 1230 1231 chunks = (struct hibernate_disk_chunk *)(hiber_info->piglet_va + 1232 HIBERNATE_CHUNK_SIZE); 1233 1234 /* Calculate the chunk regions */ 1235 for (i = 0; i < hiber_info->nranges; i++) { 1236 range_base = hiber_info->ranges[i].base; 1237 range_end = hiber_info->ranges[i].end; 1238 1239 inaddr = range_base; 1240 1241 while (inaddr < range_end) { 1242 chunks[hiber_info->chunk_ctr].base = inaddr; 1243 if (inaddr + HIBERNATE_CHUNK_SIZE < range_end) 1244 chunks[hiber_info->chunk_ctr].end = inaddr + 1245 HIBERNATE_CHUNK_SIZE; 1246 else 1247 chunks[hiber_info->chunk_ctr].end = range_end; 1248 1249 inaddr += HIBERNATE_CHUNK_SIZE; 1250 hiber_info->chunk_ctr ++; 1251 } 1252 } 1253 1254 /* Compress and write the chunks in the chunktable */ 1255 for (i = 0; i < hiber_info->chunk_ctr; i++) { 1256 range_base = chunks[i].base; 1257 range_end = chunks[i].end; 1258 1259 chunks[i].offset = blkctr; 1260 1261 /* Reset zlib for deflate */ 1262 if (hibernate_zlib_reset(hiber_info, 1) != Z_OK) 1263 return (1); 1264 1265 inaddr = range_base; 1266 1267 /* 1268 * For each range, loop through its phys mem region 1269 * and write out the chunks (the last chunk might be 1270 * smaller than the chunk size). 1271 */ 1272 while (inaddr < range_end) { 1273 out_remaining = PAGE_SIZE; 1274 while (out_remaining > 0 && inaddr < range_end) { 1275 1276 /* 1277 * Adjust for regions that are not evenly 1278 * divisible by PAGE_SIZE or overflowed 1279 * pages from the previous iteration. 1280 */ 1281 temp_inaddr = (inaddr & PAGE_MASK) + 1282 hibernate_copy_page; 1283 1284 if (hibernate_inflate_skip(hiber_info, inaddr)) 1285 rle = 1; 1286 else 1287 rle = uvm_page_rle(inaddr); 1288 1289 while (rle != 0 && inaddr < range_end) { 1290 hibernate_state->hib_stream.next_in = 1291 (char *)&rle; 1292 hibernate_state->hib_stream.avail_in = 1293 sizeof(rle); 1294 hibernate_state->hib_stream.next_out = 1295 (caddr_t)hibernate_io_page + 1296 (PAGE_SIZE - out_remaining); 1297 hibernate_state->hib_stream.avail_out = 1298 out_remaining; 1299 1300 if (deflate(&hibernate_state->hib_stream, 1301 Z_PARTIAL_FLUSH) != Z_OK) 1302 return (1); 1303 1304 out_remaining = 1305 hibernate_state->hib_stream.avail_out; 1306 inaddr += (rle * PAGE_SIZE); 1307 if (inaddr > range_end) 1308 inaddr = range_end; 1309 else 1310 rle = uvm_page_rle(inaddr); 1311 } 1312 1313 if (out_remaining == 0) { 1314 /* Filled up the page */ 1315 nblocks = PAGE_SIZE / hiber_info->secsize; 1316 1317 if (hiber_info->io_func(hiber_info->device, 1318 blkctr, (vaddr_t)hibernate_io_page, 1319 PAGE_SIZE, HIB_W, hiber_info->io_page)) 1320 return (1); 1321 1322 blkctr += nblocks; 1323 out_remaining = PAGE_SIZE; 1324 } 1325 1326 /* Write '0' RLE code */ 1327 if (inaddr < range_end) { 1328 hibernate_state->hib_stream.next_in = 1329 (char *)&rle; 1330 hibernate_state->hib_stream.avail_in = 1331 sizeof(rle); 1332 hibernate_state->hib_stream.next_out = 1333 (caddr_t)hibernate_io_page + 1334 (PAGE_SIZE - out_remaining); 1335 hibernate_state->hib_stream.avail_out = 1336 out_remaining; 1337 1338 if (deflate(&hibernate_state->hib_stream, 1339 Z_PARTIAL_FLUSH) != Z_OK) 1340 return (1); 1341 1342 out_remaining = 1343 hibernate_state->hib_stream.avail_out; 1344 } 1345 1346 if (out_remaining == 0) { 1347 /* Filled up the page */ 1348 nblocks = PAGE_SIZE / hiber_info->secsize; 1349 1350 if (hiber_info->io_func(hiber_info->device, 1351 blkctr, (vaddr_t)hibernate_io_page, 1352 PAGE_SIZE, HIB_W, hiber_info->io_page)) 1353 return (1); 1354 1355 blkctr += nblocks; 1356 out_remaining = PAGE_SIZE; 1357 } 1358 1359 /* Deflate from temp_inaddr to IO page */ 1360 if (inaddr != range_end) { 1361 pmap_kenter_pa(hibernate_temp_page, 1362 inaddr & PMAP_PA_MASK, VM_PROT_ALL); 1363 1364 /* XXX - not needed on all archs */ 1365 pmap_activate(curproc); 1366 1367 bcopy((caddr_t)hibernate_temp_page, 1368 (caddr_t)hibernate_copy_page, PAGE_SIZE); 1369 inaddr += hibernate_deflate(hiber_info, 1370 temp_inaddr, &out_remaining); 1371 } 1372 } 1373 1374 if (out_remaining == 0) { 1375 /* Filled up the page */ 1376 nblocks = PAGE_SIZE / hiber_info->secsize; 1377 1378 if (hiber_info->io_func(hiber_info->device, 1379 blkctr, (vaddr_t)hibernate_io_page, 1380 PAGE_SIZE, HIB_W, hiber_info->io_page)) 1381 return (1); 1382 1383 blkctr += nblocks; 1384 } 1385 } 1386 1387 if (inaddr != range_end) 1388 return (1); 1389 1390 /* 1391 * End of range. Round up to next secsize bytes 1392 * after finishing compress 1393 */ 1394 if (out_remaining == 0) 1395 out_remaining = PAGE_SIZE; 1396 1397 /* Finish compress */ 1398 hibernate_state->hib_stream.next_in = (caddr_t)inaddr; 1399 hibernate_state->hib_stream.avail_in = 0; 1400 hibernate_state->hib_stream.next_out = 1401 (caddr_t)hibernate_io_page + (PAGE_SIZE - out_remaining); 1402 hibernate_state->hib_stream.avail_out = out_remaining; 1403 1404 if (deflate(&hibernate_state->hib_stream, Z_FINISH) != 1405 Z_STREAM_END) 1406 return (1); 1407 1408 out_remaining = hibernate_state->hib_stream.avail_out; 1409 1410 used = PAGE_SIZE - out_remaining; 1411 nblocks = used / hiber_info->secsize; 1412 1413 /* Round up to next block if needed */ 1414 if (used % hiber_info->secsize != 0) 1415 nblocks ++; 1416 1417 /* Write final block(s) for this chunk */ 1418 if (hiber_info->io_func(hiber_info->device, blkctr, 1419 (vaddr_t)hibernate_io_page, nblocks*hiber_info->secsize, 1420 HIB_W, hiber_info->io_page)) 1421 return (1); 1422 1423 blkctr += nblocks; 1424 1425 offset = blkctr; 1426 chunks[i].compressed_size = (offset - chunks[i].offset) * 1427 hiber_info->secsize; 1428 } 1429 1430 return (0); 1431 } 1432 1433 /* 1434 * Reset the zlib stream state and allocate a new hiballoc area for either 1435 * inflate or deflate. This function is called once for each hibernate chunk. 1436 * Calling hiballoc_init multiple times is acceptable since the memory it is 1437 * provided is unmanaged memory (stolen). We use the memory provided to us 1438 * by the piglet allocated via the supplied hiber_info. 1439 */ 1440 int 1441 hibernate_zlib_reset(union hibernate_info *hiber_info, int deflate) 1442 { 1443 vaddr_t hibernate_zlib_start; 1444 size_t hibernate_zlib_size; 1445 char *pva = (char *)hiber_info->piglet_va; 1446 struct hibernate_zlib_state *hibernate_state; 1447 1448 hibernate_state = (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 1449 1450 if(!deflate) 1451 pva = (char *)((paddr_t)pva & (PIGLET_PAGE_MASK)); 1452 1453 hibernate_zlib_start = (vaddr_t)(pva + (8 * PAGE_SIZE)); 1454 hibernate_zlib_size = 80 * PAGE_SIZE; 1455 1456 bzero((caddr_t)hibernate_zlib_start, hibernate_zlib_size); 1457 bzero((caddr_t)hibernate_state, PAGE_SIZE); 1458 1459 /* Set up stream structure */ 1460 hibernate_state->hib_stream.zalloc = (alloc_func)hibernate_zlib_alloc; 1461 hibernate_state->hib_stream.zfree = (free_func)hibernate_zlib_free; 1462 1463 /* Initialize the hiballoc arena for zlib allocs/frees */ 1464 hiballoc_init(&hibernate_state->hiballoc_arena, 1465 (caddr_t)hibernate_zlib_start, hibernate_zlib_size); 1466 1467 if (deflate) { 1468 return deflateInit(&hibernate_state->hib_stream, 1469 Z_BEST_SPEED); 1470 } else 1471 return inflateInit(&hibernate_state->hib_stream); 1472 } 1473 1474 /* 1475 * Reads the hibernated memory image from disk, whose location and 1476 * size are recorded in hiber_info. Begin by reading the persisted 1477 * chunk table, which records the original chunk placement location 1478 * and compressed size for each. Next, allocate a pig region of 1479 * sufficient size to hold the compressed image. Next, read the 1480 * chunks into the pig area (calling hibernate_read_chunks to do this), 1481 * and finally, if all of the above succeeds, clear the hibernate signature. 1482 * The function will then return to hibernate_resume, which will proceed 1483 * to unpack the pig image to the correct place in memory. 1484 */ 1485 int 1486 hibernate_read_image(union hibernate_info *hiber_info) 1487 { 1488 size_t compressed_size, disk_size, chunktable_size, pig_sz; 1489 paddr_t image_start, image_end, pig_start, pig_end; 1490 struct hibernate_disk_chunk *chunks; 1491 daddr_t blkctr; 1492 vaddr_t chunktable = (vaddr_t)NULL; 1493 paddr_t piglet_chunktable = hiber_info->piglet_pa + 1494 HIBERNATE_CHUNK_SIZE; 1495 int i; 1496 1497 pmap_activate(curproc); 1498 1499 /* Calculate total chunk table size in disk blocks */ 1500 chunktable_size = HIBERNATE_CHUNK_TABLE_SIZE / hiber_info->secsize; 1501 1502 blkctr = hiber_info->sig_offset - chunktable_size - 1503 hiber_info->swap_offset; 1504 1505 chunktable = (vaddr_t)km_alloc(HIBERNATE_CHUNK_TABLE_SIZE, &kv_any, 1506 &kp_none, &kd_nowait); 1507 1508 if (!chunktable) 1509 return (1); 1510 1511 /* Read the chunktable from disk into the piglet chunktable */ 1512 for (i = 0; i < HIBERNATE_CHUNK_TABLE_SIZE; 1513 i += PAGE_SIZE, blkctr += PAGE_SIZE/hiber_info->secsize) { 1514 pmap_kenter_pa(chunktable + i, piglet_chunktable + i, VM_PROT_ALL); 1515 pmap_update(pmap_kernel()); 1516 hibernate_read_block(hiber_info, blkctr, PAGE_SIZE, 1517 chunktable + i); 1518 } 1519 1520 blkctr = hiber_info->image_offset; 1521 compressed_size = 0; 1522 1523 chunks = (struct hibernate_disk_chunk *)chunktable; 1524 1525 for (i = 0; i < hiber_info->chunk_ctr; i++) 1526 compressed_size += chunks[i].compressed_size; 1527 1528 disk_size = compressed_size; 1529 1530 /* Allocate the pig area */ 1531 pig_sz = compressed_size + HIBERNATE_CHUNK_SIZE; 1532 if (uvm_pmr_alloc_pig(&pig_start, pig_sz) == ENOMEM) 1533 return (1); 1534 1535 pig_end = pig_start + pig_sz; 1536 1537 /* Calculate image extents. Pig image must end on a chunk boundary. */ 1538 image_end = pig_end & ~(HIBERNATE_CHUNK_SIZE - 1); 1539 image_start = pig_start; 1540 1541 image_start = image_end - disk_size; 1542 1543 hibernate_read_chunks(hiber_info, image_start, image_end, disk_size, 1544 chunks); 1545 1546 pmap_kremove(chunktable, PAGE_SIZE); 1547 pmap_update(pmap_kernel()); 1548 1549 /* Prepare the resume time pmap/page table */ 1550 hibernate_populate_resume_pt(hiber_info, image_start, image_end); 1551 1552 /* Read complete, clear the signature and return */ 1553 return hibernate_clear_signature(); 1554 } 1555 1556 /* 1557 * Read the hibernated memory chunks from disk (chunk information at this 1558 * point is stored in the piglet) into the pig area specified by 1559 * [pig_start .. pig_end]. Order the chunks so that the final chunk is the 1560 * only chunk with overlap possibilities. 1561 * 1562 * This function uses the piglet area during this process as follows: 1563 * 1564 * offset from piglet base use 1565 * ----------------------- -------------------- 1566 * 0 i/o allocation area 1567 * PAGE_SIZE i/o write area 1568 * 2*PAGE_SIZE temp/scratch page 1569 * 3*PAGE_SIZE temp/scratch page 1570 * 4*PAGE_SIZE to 6*PAGE_SIZE chunk ordering area 1571 * 7*PAGE_SIZE hiballoc arena 1572 * 8*PAGE_SIZE to 88*PAGE_SIZE zlib deflate area 1573 * ... 1574 * HIBERNATE_CHUNK_SIZE chunk table temporary area 1575 */ 1576 int 1577 hibernate_read_chunks(union hibernate_info *hib_info, paddr_t pig_start, 1578 paddr_t pig_end, size_t image_compr_size, 1579 struct hibernate_disk_chunk *chunks) 1580 { 1581 paddr_t img_index, img_cur, r1s, r1e, r2s, r2e; 1582 paddr_t copy_start, copy_end, piglet_cur; 1583 paddr_t piglet_base = hib_info->piglet_pa; 1584 paddr_t piglet_end = piglet_base + HIBERNATE_CHUNK_SIZE; 1585 daddr_t blkctr; 1586 size_t processed, compressed_size, read_size; 1587 int i, j, overlap, found, nchunks; 1588 int nochunks = 0, nfchunks = 0, npchunks = 0; 1589 int *ochunks, *pchunks, *fchunks; 1590 vaddr_t tempva = (vaddr_t)NULL, hibernate_fchunk_area = (vaddr_t)NULL; 1591 1592 global_pig_start = pig_start; 1593 1594 /* XXX - dont need this on all archs */ 1595 pmap_activate(curproc); 1596 1597 /* 1598 * These mappings go into the resuming kernel's page table, and are 1599 * used only during image read. They dissappear from existence 1600 * when the suspended kernel is unpacked on top of us. 1601 */ 1602 tempva = (vaddr_t)km_alloc(2*PAGE_SIZE, &kv_any, &kp_none, &kd_nowait); 1603 if (!tempva) 1604 return (1); 1605 hibernate_fchunk_area = (vaddr_t)km_alloc(3*PAGE_SIZE, &kv_any, 1606 &kp_none, &kd_nowait); 1607 if (!hibernate_fchunk_area) 1608 return (1); 1609 1610 /* Temporary output chunk ordering VA */ 1611 ochunks = (int *)hibernate_fchunk_area; 1612 1613 /* Piglet chunk ordering VA */ 1614 pchunks = (int *)(hibernate_fchunk_area + PAGE_SIZE); 1615 1616 /* Final chunk ordering VA */ 1617 fchunks = (int *)(hibernate_fchunk_area + (2*PAGE_SIZE)); 1618 1619 /* Map the chunk ordering region */ 1620 pmap_kenter_pa(hibernate_fchunk_area, 1621 piglet_base + (4*PAGE_SIZE), VM_PROT_ALL); 1622 pmap_update(pmap_kernel()); 1623 pmap_kenter_pa((vaddr_t)pchunks, piglet_base + (5*PAGE_SIZE), 1624 VM_PROT_ALL); 1625 pmap_update(pmap_kernel()); 1626 pmap_kenter_pa((vaddr_t)fchunks, piglet_base + (6*PAGE_SIZE), 1627 VM_PROT_ALL); 1628 pmap_update(pmap_kernel()); 1629 1630 nchunks = hib_info->chunk_ctr; 1631 1632 /* Initially start all chunks as unplaced */ 1633 for (i = 0; i < nchunks; i++) 1634 chunks[i].flags = 0; 1635 1636 /* 1637 * Search the list for chunks that are outside the pig area. These 1638 * can be placed first in the final output list. 1639 */ 1640 for (i = 0; i < nchunks; i++) { 1641 if (chunks[i].end <= pig_start || chunks[i].base >= pig_end) { 1642 ochunks[nochunks] = i; 1643 fchunks[nfchunks] = i; 1644 nochunks++; 1645 nfchunks++; 1646 chunks[i].flags |= HIBERNATE_CHUNK_USED; 1647 } 1648 } 1649 1650 /* 1651 * Walk the ordering, place the chunks in ascending memory order. 1652 * Conflicts might arise, these are handled next. 1653 */ 1654 do { 1655 img_index = -1; 1656 found = 0; 1657 j = -1; 1658 for (i = 0; i < nchunks; i++) 1659 if (chunks[i].base < img_index && 1660 chunks[i].flags == 0 ) { 1661 j = i; 1662 img_index = chunks[i].base; 1663 } 1664 1665 if (j != -1) { 1666 found = 1; 1667 ochunks[nochunks] = (short)j; 1668 nochunks++; 1669 chunks[j].flags |= HIBERNATE_CHUNK_PLACED; 1670 } 1671 } while (found); 1672 1673 img_index = pig_start; 1674 1675 /* 1676 * Identify chunk output conflicts (chunks whose pig load area 1677 * corresponds to their original memory placement location) 1678 */ 1679 for (i = 0; i < nochunks ; i++) { 1680 overlap = 0; 1681 r1s = img_index; 1682 r1e = img_index + chunks[ochunks[i]].compressed_size; 1683 r2s = chunks[ochunks[i]].base; 1684 r2e = chunks[ochunks[i]].end; 1685 1686 overlap = hibernate_check_overlap(r1s, r1e, r2s, r2e); 1687 if (overlap) 1688 chunks[ochunks[i]].flags |= HIBERNATE_CHUNK_CONFLICT; 1689 img_index += chunks[ochunks[i]].compressed_size; 1690 } 1691 1692 /* 1693 * Prepare the final output chunk list. Calculate an output 1694 * inflate strategy for overlapping chunks if needed. 1695 */ 1696 img_index = pig_start; 1697 for (i = 0; i < nochunks ; i++) { 1698 /* 1699 * If a conflict is detected, consume enough compressed 1700 * output chunks to fill the piglet 1701 */ 1702 if (chunks[ochunks[i]].flags & HIBERNATE_CHUNK_CONFLICT) { 1703 copy_start = piglet_base; 1704 copy_end = piglet_end; 1705 piglet_cur = piglet_base; 1706 npchunks = 0; 1707 j = i; 1708 1709 while (copy_start < copy_end && j < nochunks) { 1710 piglet_cur += chunks[ochunks[j]].compressed_size; 1711 pchunks[npchunks] = ochunks[j]; 1712 npchunks++; 1713 copy_start += chunks[ochunks[j]].compressed_size; 1714 img_index += chunks[ochunks[j]].compressed_size; 1715 i++; 1716 j++; 1717 } 1718 1719 piglet_cur = piglet_base; 1720 for (j = 0; j < npchunks; j++) { 1721 piglet_cur += chunks[pchunks[j]].compressed_size; 1722 fchunks[nfchunks] = pchunks[j]; 1723 chunks[pchunks[j]].flags |= HIBERNATE_CHUNK_USED; 1724 nfchunks++; 1725 } 1726 } else { 1727 /* 1728 * No conflict, chunk can be added without copying 1729 */ 1730 if ((chunks[ochunks[i]].flags & 1731 HIBERNATE_CHUNK_USED) == 0) { 1732 fchunks[nfchunks] = ochunks[i]; 1733 chunks[ochunks[i]].flags |= HIBERNATE_CHUNK_USED; 1734 nfchunks++; 1735 } 1736 img_index += chunks[ochunks[i]].compressed_size; 1737 } 1738 } 1739 1740 img_index = pig_start; 1741 for (i = 0; i < nfchunks; i++) { 1742 piglet_cur = piglet_base; 1743 img_index += chunks[fchunks[i]].compressed_size; 1744 } 1745 1746 img_cur = pig_start; 1747 1748 for (i = 0; i < nfchunks; i++) { 1749 blkctr = chunks[fchunks[i]].offset - hib_info->swap_offset; 1750 processed = 0; 1751 compressed_size = chunks[fchunks[i]].compressed_size; 1752 1753 while (processed < compressed_size) { 1754 pmap_kenter_pa(tempva, img_cur, VM_PROT_ALL); 1755 pmap_kenter_pa(tempva + PAGE_SIZE, img_cur+PAGE_SIZE, 1756 VM_PROT_ALL); 1757 pmap_update(pmap_kernel()); 1758 1759 if (compressed_size - processed >= PAGE_SIZE) 1760 read_size = PAGE_SIZE; 1761 else 1762 read_size = compressed_size - processed; 1763 1764 hibernate_read_block(hib_info, blkctr, read_size, 1765 tempva + (img_cur & PAGE_MASK)); 1766 1767 blkctr += (read_size / hib_info->secsize); 1768 1769 hibernate_flush(); 1770 pmap_kremove(tempva, PAGE_SIZE); 1771 pmap_kremove(tempva + PAGE_SIZE, PAGE_SIZE); 1772 processed += read_size; 1773 img_cur += read_size; 1774 } 1775 } 1776 1777 pmap_kremove(hibernate_fchunk_area, PAGE_SIZE); 1778 pmap_kremove((vaddr_t)pchunks, PAGE_SIZE); 1779 pmap_kremove((vaddr_t)fchunks, PAGE_SIZE); 1780 pmap_update(pmap_kernel()); 1781 1782 return (0); 1783 } 1784 1785 /* 1786 * Hibernating a machine comprises the following operations: 1787 * 1. Calculating this machine's hibernate_info information 1788 * 2. Allocating a piglet and saving the piglet's physaddr 1789 * 3. Calculating the memory chunks 1790 * 4. Writing the compressed chunks to disk 1791 * 5. Writing the chunk table 1792 * 6. Writing the signature block (hibernate_info) 1793 * 1794 * On most architectures, the function calling hibernate_suspend would 1795 * then power off the machine using some MD-specific implementation. 1796 */ 1797 int 1798 hibernate_suspend(void) 1799 { 1800 union hibernate_info hib_info; 1801 1802 /* 1803 * Calculate memory ranges, swap offsets, etc. 1804 * This also allocates a piglet whose physaddr is stored in 1805 * hib_info->piglet_pa and vaddr stored in hib_info->piglet_va 1806 */ 1807 if (get_hibernate_info(&hib_info, 1)) 1808 return (1); 1809 1810 pmap_kenter_pa(HIBERNATE_HIBALLOC_PAGE, HIBERNATE_HIBALLOC_PAGE, VM_PROT_ALL); 1811 pmap_activate(curproc); 1812 1813 global_piglet_va = hib_info.piglet_va; 1814 1815 if (hibernate_write_chunks(&hib_info)) 1816 return (1); 1817 1818 if (hibernate_write_chunktable(&hib_info)) 1819 return (1); 1820 1821 if (hibernate_write_signature(&hib_info)) 1822 return (1); 1823 1824 delay(500000); 1825 return (0); 1826 } 1827 1828 /* 1829 * Free items allocated during hibernate 1830 */ 1831 void 1832 hibernate_free(void) 1833 { 1834 uvm_pmr_free_piglet(global_piglet_va, 3*HIBERNATE_CHUNK_SIZE); 1835 1836 pmap_kremove(hibernate_copy_page, PAGE_SIZE); 1837 pmap_kremove(hibernate_temp_page, PAGE_SIZE); 1838 pmap_update(pmap_kernel()); 1839 1840 km_free((void *)hibernate_copy_page, PAGE_SIZE, &kv_any, &kp_none); 1841 km_free((void *)hibernate_temp_page, PAGE_SIZE, &kv_any, &kp_none); 1842 } 1843