1 /* $OpenBSD: subr_hibernate.c,v 1.43 2012/07/15 16:09:14 stsp Exp $ */ 2 3 /* 4 * Copyright (c) 2011 Ariane van der Steldt <ariane@stack.nl> 5 * Copyright (c) 2011 Mike Larkin <mlarkin@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <sys/hibernate.h> 21 #include <sys/malloc.h> 22 #include <sys/param.h> 23 #include <sys/tree.h> 24 #include <sys/types.h> 25 #include <sys/systm.h> 26 #include <sys/disklabel.h> 27 #include <sys/disk.h> 28 #include <sys/conf.h> 29 #include <sys/buf.h> 30 #include <sys/fcntl.h> 31 #include <sys/stat.h> 32 #include <uvm/uvm.h> 33 #include <uvm/uvm_swap.h> 34 #include <machine/hibernate.h> 35 36 /* Temporary vaddr ranges used during hibernate */ 37 vaddr_t hibernate_temp_page; 38 vaddr_t hibernate_copy_page; 39 40 /* Hibernate info as read from disk during resume */ 41 union hibernate_info disk_hiber_info; 42 paddr_t global_pig_start; 43 vaddr_t global_piglet_va; 44 45 /* 46 * Hib alloc enforced alignment. 47 */ 48 #define HIB_ALIGN 8 /* bytes alignment */ 49 50 /* 51 * sizeof builtin operation, but with alignment constraint. 52 */ 53 #define HIB_SIZEOF(_type) roundup(sizeof(_type), HIB_ALIGN) 54 55 struct hiballoc_entry { 56 size_t hibe_use; 57 size_t hibe_space; 58 RB_ENTRY(hiballoc_entry) hibe_entry; 59 }; 60 61 /* 62 * Compare hiballoc entries based on the address they manage. 63 * 64 * Since the address is fixed, relative to struct hiballoc_entry, 65 * we just compare the hiballoc_entry pointers. 66 */ 67 static __inline int 68 hibe_cmp(struct hiballoc_entry *l, struct hiballoc_entry *r) 69 { 70 return l < r ? -1 : (l > r); 71 } 72 73 RB_PROTOTYPE(hiballoc_addr, hiballoc_entry, hibe_entry, hibe_cmp) 74 75 /* 76 * Given a hiballoc entry, return the address it manages. 77 */ 78 static __inline void * 79 hib_entry_to_addr(struct hiballoc_entry *entry) 80 { 81 caddr_t addr; 82 83 addr = (caddr_t)entry; 84 addr += HIB_SIZEOF(struct hiballoc_entry); 85 return addr; 86 } 87 88 /* 89 * Given an address, find the hiballoc that corresponds. 90 */ 91 static __inline struct hiballoc_entry* 92 hib_addr_to_entry(void *addr_param) 93 { 94 caddr_t addr; 95 96 addr = (caddr_t)addr_param; 97 addr -= HIB_SIZEOF(struct hiballoc_entry); 98 return (struct hiballoc_entry*)addr; 99 } 100 101 RB_GENERATE(hiballoc_addr, hiballoc_entry, hibe_entry, hibe_cmp) 102 103 /* 104 * Allocate memory from the arena. 105 * 106 * Returns NULL if no memory is available. 107 */ 108 void * 109 hib_alloc(struct hiballoc_arena *arena, size_t alloc_sz) 110 { 111 struct hiballoc_entry *entry, *new_entry; 112 size_t find_sz; 113 114 /* 115 * Enforce alignment of HIB_ALIGN bytes. 116 * 117 * Note that, because the entry is put in front of the allocation, 118 * 0-byte allocations are guaranteed a unique address. 119 */ 120 alloc_sz = roundup(alloc_sz, HIB_ALIGN); 121 122 /* 123 * Find an entry with hibe_space >= find_sz. 124 * 125 * If the root node is not large enough, we switch to tree traversal. 126 * Because all entries are made at the bottom of the free space, 127 * traversal from the end has a slightly better chance of yielding 128 * a sufficiently large space. 129 */ 130 find_sz = alloc_sz + HIB_SIZEOF(struct hiballoc_entry); 131 entry = RB_ROOT(&arena->hib_addrs); 132 if (entry != NULL && entry->hibe_space < find_sz) { 133 RB_FOREACH_REVERSE(entry, hiballoc_addr, &arena->hib_addrs) { 134 if (entry->hibe_space >= find_sz) 135 break; 136 } 137 } 138 139 /* 140 * Insufficient or too fragmented memory. 141 */ 142 if (entry == NULL) 143 return NULL; 144 145 /* 146 * Create new entry in allocated space. 147 */ 148 new_entry = (struct hiballoc_entry*)( 149 (caddr_t)hib_entry_to_addr(entry) + entry->hibe_use); 150 new_entry->hibe_space = entry->hibe_space - find_sz; 151 new_entry->hibe_use = alloc_sz; 152 153 /* 154 * Insert entry. 155 */ 156 if (RB_INSERT(hiballoc_addr, &arena->hib_addrs, new_entry) != NULL) 157 panic("hib_alloc: insert failure"); 158 entry->hibe_space = 0; 159 160 /* Return address managed by entry. */ 161 return hib_entry_to_addr(new_entry); 162 } 163 164 /* 165 * Free a pointer previously allocated from this arena. 166 * 167 * If addr is NULL, this will be silently accepted. 168 */ 169 void 170 hib_free(struct hiballoc_arena *arena, void *addr) 171 { 172 struct hiballoc_entry *entry, *prev; 173 174 if (addr == NULL) 175 return; 176 177 /* 178 * Derive entry from addr and check it is really in this arena. 179 */ 180 entry = hib_addr_to_entry(addr); 181 if (RB_FIND(hiballoc_addr, &arena->hib_addrs, entry) != entry) 182 panic("hib_free: freed item %p not in hib arena", addr); 183 184 /* 185 * Give the space in entry to its predecessor. 186 * 187 * If entry has no predecessor, change its used space into free space 188 * instead. 189 */ 190 prev = RB_PREV(hiballoc_addr, &arena->hib_addrs, entry); 191 if (prev != NULL && 192 (void *)((caddr_t)prev + HIB_SIZEOF(struct hiballoc_entry) + 193 prev->hibe_use + prev->hibe_space) == entry) { 194 /* Merge entry. */ 195 RB_REMOVE(hiballoc_addr, &arena->hib_addrs, entry); 196 prev->hibe_space += HIB_SIZEOF(struct hiballoc_entry) + 197 entry->hibe_use + entry->hibe_space; 198 } else { 199 /* Flip used memory to free space. */ 200 entry->hibe_space += entry->hibe_use; 201 entry->hibe_use = 0; 202 } 203 } 204 205 /* 206 * Initialize hiballoc. 207 * 208 * The allocator will manage memmory at ptr, which is len bytes. 209 */ 210 int 211 hiballoc_init(struct hiballoc_arena *arena, void *p_ptr, size_t p_len) 212 { 213 struct hiballoc_entry *entry; 214 caddr_t ptr; 215 size_t len; 216 217 RB_INIT(&arena->hib_addrs); 218 219 /* 220 * Hib allocator enforces HIB_ALIGN alignment. 221 * Fixup ptr and len. 222 */ 223 ptr = (caddr_t)roundup((vaddr_t)p_ptr, HIB_ALIGN); 224 len = p_len - ((size_t)ptr - (size_t)p_ptr); 225 len &= ~((size_t)HIB_ALIGN - 1); 226 227 /* 228 * Insufficient memory to be able to allocate and also do bookkeeping. 229 */ 230 if (len <= HIB_SIZEOF(struct hiballoc_entry)) 231 return ENOMEM; 232 233 /* 234 * Create entry describing space. 235 */ 236 entry = (struct hiballoc_entry*)ptr; 237 entry->hibe_use = 0; 238 entry->hibe_space = len - HIB_SIZEOF(struct hiballoc_entry); 239 RB_INSERT(hiballoc_addr, &arena->hib_addrs, entry); 240 241 return 0; 242 } 243 244 /* 245 * Zero all free memory. 246 */ 247 void 248 uvm_pmr_zero_everything(void) 249 { 250 struct uvm_pmemrange *pmr; 251 struct vm_page *pg; 252 int i; 253 254 uvm_lock_fpageq(); 255 TAILQ_FOREACH(pmr, &uvm.pmr_control.use, pmr_use) { 256 /* Zero single pages. */ 257 while ((pg = TAILQ_FIRST(&pmr->single[UVM_PMR_MEMTYPE_DIRTY])) 258 != NULL) { 259 uvm_pmr_remove(pmr, pg); 260 uvm_pagezero(pg); 261 atomic_setbits_int(&pg->pg_flags, PG_ZERO); 262 uvmexp.zeropages++; 263 uvm_pmr_insert(pmr, pg, 0); 264 } 265 266 /* Zero multi page ranges. */ 267 while ((pg = RB_ROOT(&pmr->size[UVM_PMR_MEMTYPE_DIRTY])) 268 != NULL) { 269 pg--; /* Size tree always has second page. */ 270 uvm_pmr_remove(pmr, pg); 271 for (i = 0; i < pg->fpgsz; i++) { 272 uvm_pagezero(&pg[i]); 273 atomic_setbits_int(&pg[i].pg_flags, PG_ZERO); 274 uvmexp.zeropages++; 275 } 276 uvm_pmr_insert(pmr, pg, 0); 277 } 278 } 279 uvm_unlock_fpageq(); 280 } 281 282 /* 283 * Mark all memory as dirty. 284 * 285 * Used to inform the system that the clean memory isn't clean for some 286 * reason, for example because we just came back from hibernate. 287 */ 288 void 289 uvm_pmr_dirty_everything(void) 290 { 291 struct uvm_pmemrange *pmr; 292 struct vm_page *pg; 293 int i; 294 295 uvm_lock_fpageq(); 296 TAILQ_FOREACH(pmr, &uvm.pmr_control.use, pmr_use) { 297 /* Dirty single pages. */ 298 while ((pg = TAILQ_FIRST(&pmr->single[UVM_PMR_MEMTYPE_ZERO])) 299 != NULL) { 300 uvm_pmr_remove(pmr, pg); 301 atomic_clearbits_int(&pg->pg_flags, PG_ZERO); 302 uvm_pmr_insert(pmr, pg, 0); 303 } 304 305 /* Dirty multi page ranges. */ 306 while ((pg = RB_ROOT(&pmr->size[UVM_PMR_MEMTYPE_ZERO])) 307 != NULL) { 308 pg--; /* Size tree always has second page. */ 309 uvm_pmr_remove(pmr, pg); 310 for (i = 0; i < pg->fpgsz; i++) 311 atomic_clearbits_int(&pg[i].pg_flags, PG_ZERO); 312 uvm_pmr_insert(pmr, pg, 0); 313 } 314 } 315 316 uvmexp.zeropages = 0; 317 uvm_unlock_fpageq(); 318 } 319 320 /* 321 * Allocate the highest address that can hold sz. 322 * 323 * sz in bytes. 324 */ 325 int 326 uvm_pmr_alloc_pig(paddr_t *addr, psize_t sz) 327 { 328 struct uvm_pmemrange *pmr; 329 struct vm_page *pig_pg, *pg; 330 331 /* 332 * Convert sz to pages, since that is what pmemrange uses internally. 333 */ 334 sz = atop(round_page(sz)); 335 336 uvm_lock_fpageq(); 337 338 TAILQ_FOREACH(pmr, &uvm.pmr_control.use, pmr_use) { 339 RB_FOREACH_REVERSE(pig_pg, uvm_pmr_addr, &pmr->addr) { 340 if (pig_pg->fpgsz >= sz) { 341 goto found; 342 } 343 } 344 } 345 346 /* 347 * Allocation failure. 348 */ 349 uvm_unlock_fpageq(); 350 return ENOMEM; 351 352 found: 353 /* Remove page from freelist. */ 354 uvm_pmr_remove_size(pmr, pig_pg); 355 pig_pg->fpgsz -= sz; 356 pg = pig_pg + pig_pg->fpgsz; 357 if (pig_pg->fpgsz == 0) 358 uvm_pmr_remove_addr(pmr, pig_pg); 359 else 360 uvm_pmr_insert_size(pmr, pig_pg); 361 362 uvmexp.free -= sz; 363 *addr = VM_PAGE_TO_PHYS(pg); 364 365 /* 366 * Update pg flags. 367 * 368 * Note that we trash the sz argument now. 369 */ 370 while (sz > 0) { 371 KASSERT(pg->pg_flags & PQ_FREE); 372 373 atomic_clearbits_int(&pg->pg_flags, 374 PG_PMAP0|PG_PMAP1|PG_PMAP2|PG_PMAP3); 375 376 if (pg->pg_flags & PG_ZERO) 377 uvmexp.zeropages -= sz; 378 atomic_clearbits_int(&pg->pg_flags, 379 PG_ZERO|PQ_FREE); 380 381 pg->uobject = NULL; 382 pg->uanon = NULL; 383 pg->pg_version++; 384 385 /* 386 * Next. 387 */ 388 pg++; 389 sz--; 390 } 391 392 /* Return. */ 393 uvm_unlock_fpageq(); 394 return 0; 395 } 396 397 /* 398 * Allocate a piglet area. 399 * 400 * This is as low as possible. 401 * Piglets are aligned. 402 * 403 * sz and align in bytes. 404 * 405 * The call will sleep for the pagedaemon to attempt to free memory. 406 * The pagedaemon may decide its not possible to free enough memory, causing 407 * the allocation to fail. 408 */ 409 int 410 uvm_pmr_alloc_piglet(vaddr_t *va, paddr_t *pa, vsize_t sz, paddr_t align) 411 { 412 paddr_t pg_addr, piglet_addr; 413 struct uvm_pmemrange *pmr; 414 struct vm_page *pig_pg, *pg; 415 struct pglist pageq; 416 int pdaemon_woken; 417 vaddr_t piglet_va; 418 419 KASSERT((align & (align - 1)) == 0); 420 pdaemon_woken = 0; /* Didn't wake the pagedaemon. */ 421 422 /* 423 * Fixup arguments: align must be at least PAGE_SIZE, 424 * sz will be converted to pagecount, since that is what 425 * pmemrange uses internally. 426 */ 427 if (align < PAGE_SIZE) 428 align = PAGE_SIZE; 429 sz = round_page(sz); 430 431 uvm_lock_fpageq(); 432 433 TAILQ_FOREACH_REVERSE(pmr, &uvm.pmr_control.use, uvm_pmemrange_use, 434 pmr_use) { 435 retry: 436 /* 437 * Search for a range with enough space. 438 * Use the address tree, to ensure the range is as low as 439 * possible. 440 */ 441 RB_FOREACH(pig_pg, uvm_pmr_addr, &pmr->addr) { 442 pg_addr = VM_PAGE_TO_PHYS(pig_pg); 443 piglet_addr = (pg_addr + (align - 1)) & ~(align - 1); 444 445 if (atop(pg_addr) + pig_pg->fpgsz >= 446 atop(piglet_addr) + atop(sz)) 447 goto found; 448 } 449 } 450 451 /* 452 * Try to coerse the pagedaemon into freeing memory 453 * for the piglet. 454 * 455 * pdaemon_woken is set to prevent the code from 456 * falling into an endless loop. 457 */ 458 if (!pdaemon_woken) { 459 pdaemon_woken = 1; 460 if (uvm_wait_pla(ptoa(pmr->low), ptoa(pmr->high) - 1, 461 sz, UVM_PLA_FAILOK) == 0) 462 goto retry; 463 } 464 465 /* Return failure. */ 466 uvm_unlock_fpageq(); 467 return ENOMEM; 468 469 found: 470 /* 471 * Extract piglet from pigpen. 472 */ 473 TAILQ_INIT(&pageq); 474 uvm_pmr_extract_range(pmr, pig_pg, 475 atop(piglet_addr), atop(piglet_addr) + atop(sz), &pageq); 476 477 *pa = piglet_addr; 478 uvmexp.free -= atop(sz); 479 480 /* 481 * Update pg flags. 482 * 483 * Note that we trash the sz argument now. 484 */ 485 TAILQ_FOREACH(pg, &pageq, pageq) { 486 KASSERT(pg->pg_flags & PQ_FREE); 487 488 atomic_clearbits_int(&pg->pg_flags, 489 PG_PMAP0|PG_PMAP1|PG_PMAP2|PG_PMAP3); 490 491 if (pg->pg_flags & PG_ZERO) 492 uvmexp.zeropages--; 493 atomic_clearbits_int(&pg->pg_flags, 494 PG_ZERO|PQ_FREE); 495 496 pg->uobject = NULL; 497 pg->uanon = NULL; 498 pg->pg_version++; 499 } 500 501 uvm_unlock_fpageq(); 502 503 /* 504 * Now allocate a va. 505 * Use direct mappings for the pages. 506 */ 507 508 piglet_va = *va = (vaddr_t)km_alloc(sz, &kv_any, &kp_none, &kd_waitok); 509 if (!piglet_va) { 510 uvm_pglistfree(&pageq); 511 return ENOMEM; 512 } 513 514 /* 515 * Map piglet to va. 516 */ 517 TAILQ_FOREACH(pg, &pageq, pageq) { 518 pmap_kenter_pa(piglet_va, VM_PAGE_TO_PHYS(pg), UVM_PROT_RW); 519 piglet_va += PAGE_SIZE; 520 } 521 pmap_update(pmap_kernel()); 522 523 return 0; 524 } 525 526 /* 527 * Free a piglet area. 528 */ 529 void 530 uvm_pmr_free_piglet(vaddr_t va, vsize_t sz) 531 { 532 paddr_t pa; 533 struct vm_page *pg; 534 535 /* 536 * Fix parameters. 537 */ 538 sz = round_page(sz); 539 540 /* 541 * Find the first page in piglet. 542 * Since piglets are contiguous, the first pg is all we need. 543 */ 544 if (!pmap_extract(pmap_kernel(), va, &pa)) 545 panic("uvm_pmr_free_piglet: piglet 0x%lx has no pages", va); 546 pg = PHYS_TO_VM_PAGE(pa); 547 if (pg == NULL) 548 panic("uvm_pmr_free_piglet: unmanaged page 0x%lx", pa); 549 550 /* 551 * Unmap. 552 */ 553 pmap_kremove(va, sz); 554 pmap_update(pmap_kernel()); 555 556 /* 557 * Free the physical and virtual memory. 558 */ 559 uvm_pmr_freepages(pg, atop(sz)); 560 km_free((void *)va, sz, &kv_any, &kp_none); 561 } 562 563 /* 564 * Physmem RLE compression support. 565 * 566 * Given a physical page address, it will return the number of pages 567 * starting at the address, that are free. Clamps to the number of pages in 568 * HIBERNATE_CHUNK_SIZE. Returns 0 if the page at addr is not free. 569 */ 570 int 571 uvm_page_rle(paddr_t addr) 572 { 573 struct vm_page *pg, *pg_end; 574 struct vm_physseg *vmp; 575 int pseg_idx, off_idx; 576 577 pseg_idx = vm_physseg_find(atop(addr), &off_idx); 578 if (pseg_idx == -1) 579 return 0; 580 581 vmp = &vm_physmem[pseg_idx]; 582 pg = &vmp->pgs[off_idx]; 583 if (!(pg->pg_flags & PQ_FREE)) 584 return 0; 585 586 /* 587 * Search for the first non-free page after pg. 588 * Note that the page may not be the first page in a free pmemrange, 589 * therefore pg->fpgsz cannot be used. 590 */ 591 for (pg_end = pg; pg_end <= vmp->lastpg && 592 (pg_end->pg_flags & PQ_FREE) == PQ_FREE; pg_end++) 593 ; 594 return min((pg_end - pg), HIBERNATE_CHUNK_SIZE/PAGE_SIZE); 595 } 596 597 /* 598 * Fills out the hibernate_info union pointed to by hiber_info 599 * with information about this machine (swap signature block 600 * offsets, number of memory ranges, kernel in use, etc) 601 */ 602 int 603 get_hibernate_info(union hibernate_info *hiber_info, int suspend) 604 { 605 int chunktable_size; 606 struct disklabel dl; 607 char err_string[128], *dl_ret; 608 609 /* Determine I/O function to use */ 610 hiber_info->io_func = get_hibernate_io_function(); 611 if (hiber_info->io_func == NULL) 612 return (1); 613 614 /* Calculate hibernate device */ 615 hiber_info->device = swdevt[0].sw_dev; 616 617 /* Read disklabel (used to calculate signature and image offsets) */ 618 dl_ret = disk_readlabel(&dl, hiber_info->device, err_string, 128); 619 620 if (dl_ret) { 621 printf("Hibernate error reading disklabel: %s\n", dl_ret); 622 return (1); 623 } 624 625 hiber_info->secsize = dl.d_secsize; 626 627 /* Make sure the signature can fit in one block */ 628 KASSERT(sizeof(union hibernate_info)/hiber_info->secsize == 1); 629 630 /* Calculate swap offset from start of disk */ 631 hiber_info->swap_offset = dl.d_partitions[1].p_offset; 632 633 /* Calculate signature block location */ 634 hiber_info->sig_offset = dl.d_partitions[1].p_offset + 635 dl.d_partitions[1].p_size - 636 sizeof(union hibernate_info)/hiber_info->secsize; 637 638 chunktable_size = HIBERNATE_CHUNK_TABLE_SIZE / hiber_info->secsize; 639 640 /* Stash kernel version information */ 641 bzero(&hiber_info->kernel_version, 128); 642 bcopy(version, &hiber_info->kernel_version, 643 min(strlen(version), sizeof(hiber_info->kernel_version)-1)); 644 645 if (suspend) { 646 /* Allocate piglet region */ 647 if (uvm_pmr_alloc_piglet(&hiber_info->piglet_va, 648 &hiber_info->piglet_pa, HIBERNATE_CHUNK_SIZE*3, 649 HIBERNATE_CHUNK_SIZE)) { 650 printf("Hibernate failed to allocate the piglet\n"); 651 return (1); 652 } 653 hiber_info->io_page = (void *)hiber_info->piglet_va; 654 655 /* 656 * Initialize of the hibernate IO function (for drivers which 657 * need that) 658 */ 659 if (hiber_info->io_func(hiber_info->device, 0, 660 (vaddr_t)NULL, 0, HIB_INIT, hiber_info->io_page)) 661 goto fail; 662 663 } else { 664 /* 665 * Resuming kernels use a regular I/O page since we won't 666 * have access to the suspended kernel's piglet VA at this 667 * point. No need to free this I/O page as it will vanish 668 * as part of the resume. 669 */ 670 hiber_info->io_page = malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT); 671 if (!hiber_info->io_page) 672 return (1); 673 } 674 675 676 if (get_hibernate_info_md(hiber_info)) 677 goto fail; 678 679 /* Calculate memory image location */ 680 hiber_info->image_offset = dl.d_partitions[1].p_offset + 681 dl.d_partitions[1].p_size - 682 (hiber_info->image_size / hiber_info->secsize) - 683 sizeof(union hibernate_info)/hiber_info->secsize - 684 chunktable_size; 685 686 return (0); 687 fail: 688 if (suspend) 689 uvm_pmr_free_piglet(hiber_info->piglet_va, HIBERNATE_CHUNK_SIZE*3); 690 691 return (1); 692 } 693 694 /* 695 * Allocate nitems*size bytes from the hiballoc area presently in use 696 */ 697 void 698 *hibernate_zlib_alloc(void *unused, int nitems, int size) 699 { 700 struct hibernate_zlib_state *hibernate_state; 701 702 hibernate_state = (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 703 704 return hib_alloc(&hibernate_state->hiballoc_arena, nitems*size); 705 } 706 707 /* 708 * Free the memory pointed to by addr in the hiballoc area presently in 709 * use 710 */ 711 void 712 hibernate_zlib_free(void *unused, void *addr) 713 { 714 struct hibernate_zlib_state *hibernate_state; 715 716 hibernate_state = (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 717 718 hib_free(&hibernate_state->hiballoc_arena, addr); 719 } 720 721 /* 722 * Gets the next RLE value from the image stream 723 */ 724 int 725 hibernate_get_next_rle(void) 726 { 727 int rle, i; 728 struct hibernate_zlib_state *hibernate_state; 729 730 hibernate_state = (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 731 732 /* Read RLE code */ 733 hibernate_state->hib_stream.next_out = (char *)&rle; 734 hibernate_state->hib_stream.avail_out = sizeof(rle); 735 736 i = inflate(&hibernate_state->hib_stream, Z_FULL_FLUSH); 737 if (i != Z_OK && i != Z_STREAM_END) { 738 /* 739 * XXX - this will likely reboot/hang most machines, 740 * but there's not much else we can do here. 741 */ 742 panic("inflate rle error"); 743 } 744 745 /* Sanity check what RLE value we got */ 746 if (rle > HIBERNATE_CHUNK_SIZE/PAGE_SIZE || rle < 0) 747 panic("invalid RLE code"); 748 749 if (i == Z_STREAM_END) 750 rle = -1; 751 752 return rle; 753 } 754 755 /* 756 * Inflate next page of data from the image stream 757 */ 758 int 759 hibernate_inflate_page(void) 760 { 761 struct hibernate_zlib_state *hibernate_state; 762 int i; 763 764 hibernate_state = (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 765 766 /* Set up the stream for inflate */ 767 hibernate_state->hib_stream.next_out = (char *)HIBERNATE_INFLATE_PAGE; 768 hibernate_state->hib_stream.avail_out = PAGE_SIZE; 769 770 /* Process next block of data */ 771 i = inflate(&hibernate_state->hib_stream, Z_PARTIAL_FLUSH); 772 if (i != Z_OK && i != Z_STREAM_END) { 773 /* 774 * XXX - this will likely reboot/hang most machines, 775 * but there's not much else we can do here. 776 */ 777 778 panic("inflate error"); 779 } 780 781 /* We should always have extracted a full page ... */ 782 if (hibernate_state->hib_stream.avail_out != 0) 783 panic("incomplete page"); 784 785 return (i == Z_STREAM_END); 786 } 787 788 /* 789 * Inflate size bytes from src into dest, skipping any pages in 790 * [src..dest] that are special (see hibernate_inflate_skip) 791 * 792 * This function executes while using the resume-time stack 793 * and pmap, and therefore cannot use ddb/printf/etc. Doing so 794 * will likely hang or reset the machine. 795 */ 796 void 797 hibernate_inflate_region(union hibernate_info *hiber_info, paddr_t dest, 798 paddr_t src, size_t size) 799 { 800 int end_stream = 0 ; 801 struct hibernate_zlib_state *hibernate_state; 802 803 hibernate_state = (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 804 805 hibernate_state->hib_stream.next_in = (char *)src; 806 hibernate_state->hib_stream.avail_in = size; 807 808 do { 809 /* Flush cache and TLB */ 810 hibernate_flush(); 811 812 /* 813 * Is this a special page? If yes, redirect the 814 * inflate output to a scratch page (eg, discard it) 815 */ 816 if (hibernate_inflate_skip(hiber_info, dest)) { 817 hibernate_enter_resume_mapping( 818 HIBERNATE_INFLATE_PAGE, 819 HIBERNATE_INFLATE_PAGE, 0); 820 } else { 821 hibernate_enter_resume_mapping( 822 HIBERNATE_INFLATE_PAGE, dest, 0); 823 } 824 825 hibernate_flush(); 826 end_stream = hibernate_inflate_page(); 827 828 dest += PAGE_SIZE; 829 } while (!end_stream); 830 } 831 832 /* 833 * deflate from src into the I/O page, up to 'remaining' bytes 834 * 835 * Returns number of input bytes consumed, and may reset 836 * the 'remaining' parameter if not all the output space was consumed 837 * (this information is needed to know how much to write to disk 838 */ 839 size_t 840 hibernate_deflate(union hibernate_info *hiber_info, paddr_t src, 841 size_t *remaining) 842 { 843 vaddr_t hibernate_io_page = hiber_info->piglet_va + PAGE_SIZE; 844 struct hibernate_zlib_state *hibernate_state; 845 846 hibernate_state = (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 847 848 /* Set up the stream for deflate */ 849 hibernate_state->hib_stream.next_in = (caddr_t)src; 850 hibernate_state->hib_stream.avail_in = PAGE_SIZE - (src & PAGE_MASK); 851 hibernate_state->hib_stream.next_out = (caddr_t)hibernate_io_page + 852 (PAGE_SIZE - *remaining); 853 hibernate_state->hib_stream.avail_out = *remaining; 854 855 /* Process next block of data */ 856 if (deflate(&hibernate_state->hib_stream, Z_PARTIAL_FLUSH) != Z_OK) 857 panic("hibernate zlib deflate error"); 858 859 /* Update pointers and return number of bytes consumed */ 860 *remaining = hibernate_state->hib_stream.avail_out; 861 return (PAGE_SIZE - (src & PAGE_MASK)) - 862 hibernate_state->hib_stream.avail_in; 863 } 864 865 /* 866 * Write the hibernation information specified in hiber_info 867 * to the location in swap previously calculated (last block of 868 * swap), called the "signature block". 869 * 870 * Write the memory chunk table to the area in swap immediately 871 * preceding the signature block. 872 */ 873 int 874 hibernate_write_signature(union hibernate_info *hiber_info) 875 { 876 /* Write hibernate info to disk */ 877 return (hiber_info->io_func(hiber_info->device, hiber_info->sig_offset, 878 (vaddr_t)hiber_info, hiber_info->secsize, HIB_W, 879 hiber_info->io_page)); 880 } 881 882 /* 883 * Write the memory chunk table to the area in swap immediately 884 * preceding the signature block. The chunk table is stored 885 * in the piglet when this function is called. 886 */ 887 int 888 hibernate_write_chunktable(union hibernate_info *hiber_info) 889 { 890 struct hibernate_disk_chunk *chunks; 891 vaddr_t hibernate_chunk_table_start; 892 size_t hibernate_chunk_table_size; 893 daddr_t chunkbase; 894 int i; 895 896 hibernate_chunk_table_size = HIBERNATE_CHUNK_TABLE_SIZE; 897 898 chunkbase = hiber_info->sig_offset - 899 (hibernate_chunk_table_size / hiber_info->secsize); 900 901 hibernate_chunk_table_start = hiber_info->piglet_va + 902 HIBERNATE_CHUNK_SIZE; 903 904 chunks = (struct hibernate_disk_chunk *)(hiber_info->piglet_va + 905 HIBERNATE_CHUNK_SIZE); 906 907 /* Write chunk table */ 908 for (i = 0; i < hibernate_chunk_table_size; i += MAXPHYS) { 909 if (hiber_info->io_func(hiber_info->device, 910 chunkbase + (i/hiber_info->secsize), 911 (vaddr_t)(hibernate_chunk_table_start + i), 912 MAXPHYS, HIB_W, hiber_info->io_page)) 913 return (1); 914 } 915 916 return (0); 917 } 918 919 /* 920 * Write an empty hiber_info to the swap signature block, which is 921 * guaranteed to not match any valid hiber_info. 922 */ 923 int 924 hibernate_clear_signature(void) 925 { 926 union hibernate_info blank_hiber_info; 927 union hibernate_info hiber_info; 928 929 /* Zero out a blank hiber_info */ 930 bzero(&blank_hiber_info, sizeof(hiber_info)); 931 932 if (get_hibernate_info(&hiber_info, 0)) 933 return (1); 934 935 /* Write (zeroed) hibernate info to disk */ 936 if (hibernate_block_io(&hiber_info, 937 hiber_info.sig_offset - hiber_info.swap_offset, 938 hiber_info.secsize, (vaddr_t)&blank_hiber_info, 1)) 939 panic("error hibernate write 6"); 940 941 return (0); 942 } 943 944 /* 945 * Check chunk range overlap when calculating whether or not to copy a 946 * compressed chunk to the piglet area before decompressing. 947 * 948 * returns zero if the ranges do not overlap, non-zero otherwise. 949 */ 950 int 951 hibernate_check_overlap(paddr_t r1s, paddr_t r1e, paddr_t r2s, paddr_t r2e) 952 { 953 /* case A : end of r1 overlaps start of r2 */ 954 if (r1s < r2s && r1e > r2s) 955 return (1); 956 957 /* case B : r1 entirely inside r2 */ 958 if (r1s >= r2s && r1e <= r2e) 959 return (1); 960 961 /* case C : r2 entirely inside r1 */ 962 if (r2s >= r1s && r2e <= r1e) 963 return (1); 964 965 /* case D : end of r2 overlaps start of r1 */ 966 if (r2s < r1s && r2e > r1s) 967 return (1); 968 969 return (0); 970 } 971 972 /* 973 * Compare two hibernate_infos to determine if they are the same (eg, 974 * we should be performing a hibernate resume on this machine. 975 * Not all fields are checked - just enough to verify that the machine 976 * has the same memory configuration and kernel as the one that 977 * wrote the signature previously. 978 */ 979 int 980 hibernate_compare_signature(union hibernate_info *mine, 981 union hibernate_info *disk) 982 { 983 u_int i; 984 985 if (mine->nranges != disk->nranges) 986 return (1); 987 988 if (strcmp(mine->kernel_version, disk->kernel_version) != 0) 989 return (1); 990 991 for (i = 0; i < mine->nranges; i++) { 992 if ((mine->ranges[i].base != disk->ranges[i].base) || 993 (mine->ranges[i].end != disk->ranges[i].end) ) 994 return (1); 995 } 996 997 return (0); 998 } 999 1000 /* 1001 * Transfers xfer_size bytes between the hibernate device specified in 1002 * hib_info at offset blkctr and the vaddr specified at dest. 1003 * 1004 * Separate offsets and pages are used to handle misaligned reads (reads 1005 * that span a page boundary). 1006 * 1007 * blkctr specifies a relative offset (relative to the start of swap), 1008 * not an absolute disk offset 1009 * 1010 */ 1011 int 1012 hibernate_block_io(union hibernate_info *hib_info, daddr_t blkctr, 1013 size_t xfer_size, vaddr_t dest, int iswrite) 1014 { 1015 struct buf *bp; 1016 struct bdevsw *bdsw; 1017 int error; 1018 1019 bp = geteblk(xfer_size); 1020 bdsw = &bdevsw[major(hib_info->device)]; 1021 1022 error = (*bdsw->d_open)(hib_info->device, FREAD, S_IFCHR, curproc); 1023 if (error) { 1024 printf("hibernate_block_io open failed\n"); 1025 return (1); 1026 } 1027 1028 if (iswrite) 1029 bcopy((caddr_t)dest, bp->b_data, xfer_size); 1030 1031 bp->b_bcount = xfer_size; 1032 bp->b_blkno = blkctr; 1033 CLR(bp->b_flags, B_READ | B_WRITE | B_DONE); 1034 SET(bp->b_flags, B_BUSY | (iswrite ? B_WRITE : B_READ) | B_RAW); 1035 bp->b_dev = hib_info->device; 1036 bp->b_cylinder = 0; 1037 (*bdsw->d_strategy)(bp); 1038 1039 error = biowait(bp); 1040 if (error) { 1041 printf("hibernate_block_io biowait failed %d\n", error); 1042 error = (*bdsw->d_close)(hib_info->device, 0, S_IFCHR, 1043 curproc); 1044 if (error) 1045 printf("hibernate_block_io error close failed\n"); 1046 return (1); 1047 } 1048 1049 error = (*bdsw->d_close)(hib_info->device, FREAD, S_IFCHR, curproc); 1050 if (error) { 1051 printf("hibernate_block_io close failed\n"); 1052 return (1); 1053 } 1054 1055 if (!iswrite) 1056 bcopy(bp->b_data, (caddr_t)dest, xfer_size); 1057 1058 bp->b_flags |= B_INVAL; 1059 brelse(bp); 1060 1061 return (0); 1062 } 1063 1064 /* 1065 * Reads the signature block from swap, checks against the current machine's 1066 * information. If the information matches, perform a resume by reading the 1067 * saved image into the pig area, and unpacking. 1068 */ 1069 void 1070 hibernate_resume(void) 1071 { 1072 union hibernate_info hiber_info; 1073 int s; 1074 1075 /* Get current running machine's hibernate info */ 1076 bzero(&hiber_info, sizeof(hiber_info)); 1077 if (get_hibernate_info(&hiber_info, 0)) 1078 return; 1079 1080 /* Read hibernate info from disk */ 1081 s = splbio(); 1082 1083 if (hibernate_block_io(&hiber_info, 1084 hiber_info.sig_offset - hiber_info.swap_offset, 1085 hiber_info.secsize, (vaddr_t)&disk_hiber_info, 0)) { 1086 printf("error in hibernate read\n"); 1087 goto fail; 1088 } 1089 1090 /* 1091 * If on-disk and in-memory hibernate signatures match, 1092 * this means we should do a resume from hibernate. 1093 */ 1094 if (hibernate_compare_signature(&hiber_info, &disk_hiber_info)) { 1095 splx(s); 1096 return; 1097 } 1098 1099 /* Read the image from disk into the image (pig) area */ 1100 if (hibernate_read_image(&disk_hiber_info)) 1101 goto fail; 1102 1103 if (config_suspend(TAILQ_FIRST(&alldevs), DVACT_QUIESCE) != 0) 1104 goto fail; 1105 1106 (void) splhigh(); 1107 disable_intr(); 1108 cold = 1; 1109 1110 if (config_suspend(TAILQ_FIRST(&alldevs), DVACT_SUSPEND) != 0) { 1111 cold = 0; 1112 enable_intr(); 1113 goto fail; 1114 } 1115 1116 /* Point of no return ... */ 1117 1118 pmap_kenter_pa(HIBERNATE_HIBALLOC_PAGE, HIBERNATE_HIBALLOC_PAGE, 1119 VM_PROT_ALL); 1120 pmap_activate(curproc); 1121 1122 /* Switch stacks */ 1123 hibernate_switch_stack_machdep(); 1124 1125 /* 1126 * Image is now in high memory (pig area), copy to correct location 1127 * in memory. We'll eventually end up copying on top of ourself, but 1128 * we are assured the kernel code here is the same between the 1129 * hibernated and resuming kernel, and we are running on our own 1130 * stack, so the overwrite is ok. 1131 */ 1132 hibernate_unpack_image(&disk_hiber_info); 1133 1134 /* 1135 * Resume the loaded kernel by jumping to the MD resume vector. 1136 * We won't be returning from this call. 1137 */ 1138 hibernate_resume_machdep(); 1139 1140 fail: 1141 splx(s); 1142 printf("Unable to resume hibernated image\n"); 1143 } 1144 1145 /* 1146 * Unpack image from pig area to original location by looping through the 1147 * list of output chunks in the order they should be restored (fchunks). 1148 * This ordering is used to avoid having inflate overwrite a chunk in the 1149 * middle of processing that chunk. This will, of course, happen during the 1150 * final output chunk, where we copy the chunk to the piglet area first, 1151 * before inflating. 1152 */ 1153 void 1154 hibernate_unpack_image(union hibernate_info *hiber_info) 1155 { 1156 struct hibernate_disk_chunk *chunks; 1157 union hibernate_info local_hiber_info; 1158 paddr_t image_cur = global_pig_start; 1159 int *fchunks, i; 1160 char *pva = (char *)hiber_info->piglet_va; 1161 struct hibernate_zlib_state *hibernate_state; 1162 1163 hibernate_state = (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 1164 1165 /* Mask off based on arch-specific piglet page size */ 1166 pva = (char *)((paddr_t)pva & (PIGLET_PAGE_MASK)); 1167 fchunks = (int *)(pva + (6 * PAGE_SIZE)); 1168 1169 chunks = (struct hibernate_disk_chunk *)(pva + HIBERNATE_CHUNK_SIZE); 1170 1171 /* Can't use hiber_info that's passed in after this point */ 1172 bcopy(hiber_info, &local_hiber_info, sizeof(union hibernate_info)); 1173 1174 hibernate_activate_resume_pt_machdep(); 1175 1176 for (i = 0; i < local_hiber_info.chunk_ctr; i++) { 1177 /* Reset zlib for inflate */ 1178 if (hibernate_zlib_reset(&local_hiber_info, 0) != Z_OK) 1179 panic("hibernate failed to reset zlib for inflate"); 1180 1181 hibernate_process_chunk(&local_hiber_info, &chunks[fchunks[i]], 1182 image_cur); 1183 1184 image_cur += chunks[fchunks[i]].compressed_size; 1185 1186 } 1187 } 1188 1189 /* 1190 * Process a chunk by ensuring its proper placement, followed by unpacking 1191 */ 1192 void 1193 hibernate_process_chunk(union hibernate_info *hiber_info, 1194 struct hibernate_disk_chunk *chunk, paddr_t img_cur) 1195 { 1196 char *pva = (char *)hiber_info->piglet_va; 1197 1198 /* 1199 * If there is a conflict, copy the chunk to the piglet area 1200 * before unpacking it to its original location. 1201 */ 1202 if ((chunk->flags & HIBERNATE_CHUNK_CONFLICT) == 0) 1203 hibernate_inflate_region(hiber_info, chunk->base, 1204 img_cur, chunk->compressed_size); 1205 else { 1206 bcopy((caddr_t)img_cur, 1207 pva + (HIBERNATE_CHUNK_SIZE * 2), 1208 chunk->compressed_size); 1209 hibernate_inflate_region(hiber_info, chunk->base, 1210 (vaddr_t)(pva + (HIBERNATE_CHUNK_SIZE * 2)), 1211 chunk->compressed_size); 1212 } 1213 } 1214 1215 /* 1216 * Write a compressed version of this machine's memory to disk, at the 1217 * precalculated swap offset: 1218 * 1219 * end of swap - signature block size - chunk table size - memory size 1220 * 1221 * The function begins by looping through each phys mem range, cutting each 1222 * one into MD sized chunks. These chunks are then compressed individually 1223 * and written out to disk, in phys mem order. Some chunks might compress 1224 * more than others, and for this reason, each chunk's size is recorded 1225 * in the chunk table, which is written to disk after the image has 1226 * properly been compressed and written (in hibernate_write_chunktable). 1227 * 1228 * When this function is called, the machine is nearly suspended - most 1229 * devices are quiesced/suspended, interrupts are off, and cold has 1230 * been set. This means that there can be no side effects once the 1231 * write has started, and the write function itself can also have no 1232 * side effects. This also means no printfs are permitted (since it 1233 * has side effects.) 1234 */ 1235 int 1236 hibernate_write_chunks(union hibernate_info *hiber_info) 1237 { 1238 paddr_t range_base, range_end, inaddr, temp_inaddr; 1239 size_t nblocks, out_remaining, used; 1240 struct hibernate_disk_chunk *chunks; 1241 vaddr_t hibernate_io_page = hiber_info->piglet_va + PAGE_SIZE; 1242 daddr_t blkctr = hiber_info->image_offset, offset = 0; 1243 int i; 1244 struct hibernate_zlib_state *hibernate_state; 1245 1246 hibernate_state = (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 1247 1248 hiber_info->chunk_ctr = 0; 1249 1250 /* 1251 * Allocate VA for the temp and copy page. 1252 * These will becomee part of the suspended kernel and will 1253 * be freed in hibernate_free, upon resume. 1254 */ 1255 hibernate_temp_page = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, 1256 &kp_none, &kd_nowait); 1257 if (!hibernate_temp_page) 1258 return (1); 1259 1260 hibernate_copy_page = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, 1261 &kp_none, &kd_nowait); 1262 if (!hibernate_copy_page) 1263 return (1); 1264 1265 pmap_kenter_pa(hibernate_copy_page, 1266 (hiber_info->piglet_pa + 3*PAGE_SIZE), VM_PROT_ALL); 1267 1268 /* XXX - not needed on all archs */ 1269 pmap_activate(curproc); 1270 1271 chunks = (struct hibernate_disk_chunk *)(hiber_info->piglet_va + 1272 HIBERNATE_CHUNK_SIZE); 1273 1274 /* Calculate the chunk regions */ 1275 for (i = 0; i < hiber_info->nranges; i++) { 1276 range_base = hiber_info->ranges[i].base; 1277 range_end = hiber_info->ranges[i].end; 1278 1279 inaddr = range_base; 1280 1281 while (inaddr < range_end) { 1282 chunks[hiber_info->chunk_ctr].base = inaddr; 1283 if (inaddr + HIBERNATE_CHUNK_SIZE < range_end) 1284 chunks[hiber_info->chunk_ctr].end = inaddr + 1285 HIBERNATE_CHUNK_SIZE; 1286 else 1287 chunks[hiber_info->chunk_ctr].end = range_end; 1288 1289 inaddr += HIBERNATE_CHUNK_SIZE; 1290 hiber_info->chunk_ctr ++; 1291 } 1292 } 1293 1294 /* Compress and write the chunks in the chunktable */ 1295 for (i = 0; i < hiber_info->chunk_ctr; i++) { 1296 range_base = chunks[i].base; 1297 range_end = chunks[i].end; 1298 1299 chunks[i].offset = blkctr; 1300 1301 /* Reset zlib for deflate */ 1302 if (hibernate_zlib_reset(hiber_info, 1) != Z_OK) 1303 return (1); 1304 1305 inaddr = range_base; 1306 1307 /* 1308 * For each range, loop through its phys mem region 1309 * and write out the chunks (the last chunk might be 1310 * smaller than the chunk size). 1311 */ 1312 while (inaddr < range_end) { 1313 out_remaining = PAGE_SIZE; 1314 while (out_remaining > 0 && inaddr < range_end) { 1315 1316 /* 1317 * Adjust for regions that are not evenly 1318 * divisible by PAGE_SIZE or overflowed 1319 * pages from the previous iteration. 1320 */ 1321 temp_inaddr = (inaddr & PAGE_MASK) + 1322 hibernate_copy_page; 1323 1324 /* Deflate from temp_inaddr to IO page */ 1325 if (inaddr != range_end) { 1326 pmap_kenter_pa(hibernate_temp_page, 1327 inaddr & PMAP_PA_MASK, VM_PROT_ALL); 1328 1329 /* XXX - not needed on all archs */ 1330 pmap_activate(curproc); 1331 1332 bcopy((caddr_t)hibernate_temp_page, 1333 (caddr_t)hibernate_copy_page, PAGE_SIZE); 1334 inaddr += hibernate_deflate(hiber_info, 1335 temp_inaddr, &out_remaining); 1336 } 1337 1338 if (out_remaining == 0) { 1339 /* Filled up the page */ 1340 nblocks = PAGE_SIZE / hiber_info->secsize; 1341 1342 if (hiber_info->io_func(hiber_info->device, 1343 blkctr, (vaddr_t)hibernate_io_page, 1344 PAGE_SIZE, HIB_W, hiber_info->io_page)) 1345 return (1); 1346 1347 blkctr += nblocks; 1348 } 1349 } 1350 } 1351 1352 if (inaddr != range_end) 1353 return (1); 1354 1355 /* 1356 * End of range. Round up to next secsize bytes 1357 * after finishing compress 1358 */ 1359 if (out_remaining == 0) 1360 out_remaining = PAGE_SIZE; 1361 1362 /* Finish compress */ 1363 hibernate_state->hib_stream.next_in = (caddr_t)inaddr; 1364 hibernate_state->hib_stream.avail_in = 0; 1365 hibernate_state->hib_stream.next_out = 1366 (caddr_t)hibernate_io_page + (PAGE_SIZE - out_remaining); 1367 hibernate_state->hib_stream.avail_out = out_remaining; 1368 1369 if (deflate(&hibernate_state->hib_stream, Z_FINISH) != 1370 Z_STREAM_END) 1371 return (1); 1372 1373 out_remaining = hibernate_state->hib_stream.avail_out; 1374 1375 used = PAGE_SIZE - out_remaining; 1376 nblocks = used / hiber_info->secsize; 1377 1378 /* Round up to next block if needed */ 1379 if (used % hiber_info->secsize != 0) 1380 nblocks ++; 1381 1382 /* Write final block(s) for this chunk */ 1383 if (hiber_info->io_func(hiber_info->device, blkctr, 1384 (vaddr_t)hibernate_io_page, nblocks*hiber_info->secsize, 1385 HIB_W, hiber_info->io_page)) 1386 return (1); 1387 1388 blkctr += nblocks; 1389 1390 offset = blkctr; 1391 chunks[i].compressed_size = (offset - chunks[i].offset) * 1392 hiber_info->secsize; 1393 } 1394 1395 return (0); 1396 } 1397 1398 /* 1399 * Reset the zlib stream state and allocate a new hiballoc area for either 1400 * inflate or deflate. This function is called once for each hibernate chunk. 1401 * Calling hiballoc_init multiple times is acceptable since the memory it is 1402 * provided is unmanaged memory (stolen). We use the memory provided to us 1403 * by the piglet allocated via the supplied hiber_info. 1404 */ 1405 int 1406 hibernate_zlib_reset(union hibernate_info *hiber_info, int deflate) 1407 { 1408 vaddr_t hibernate_zlib_start; 1409 size_t hibernate_zlib_size; 1410 char *pva = (char *)hiber_info->piglet_va; 1411 struct hibernate_zlib_state *hibernate_state; 1412 1413 hibernate_state = (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE; 1414 1415 if(!deflate) 1416 pva = (char *)((paddr_t)pva & (PIGLET_PAGE_MASK)); 1417 1418 hibernate_zlib_start = (vaddr_t)(pva + (8 * PAGE_SIZE)); 1419 hibernate_zlib_size = 80 * PAGE_SIZE; 1420 1421 bzero((caddr_t)hibernate_zlib_start, hibernate_zlib_size); 1422 bzero((caddr_t)hibernate_state, PAGE_SIZE); 1423 1424 /* Set up stream structure */ 1425 hibernate_state->hib_stream.zalloc = (alloc_func)hibernate_zlib_alloc; 1426 hibernate_state->hib_stream.zfree = (free_func)hibernate_zlib_free; 1427 1428 /* Initialize the hiballoc arena for zlib allocs/frees */ 1429 hiballoc_init(&hibernate_state->hiballoc_arena, 1430 (caddr_t)hibernate_zlib_start, hibernate_zlib_size); 1431 1432 if (deflate) { 1433 return deflateInit(&hibernate_state->hib_stream, 1434 Z_BEST_SPEED); 1435 } else 1436 return inflateInit(&hibernate_state->hib_stream); 1437 } 1438 1439 /* 1440 * Reads the hibernated memory image from disk, whose location and 1441 * size are recorded in hiber_info. Begin by reading the persisted 1442 * chunk table, which records the original chunk placement location 1443 * and compressed size for each. Next, allocate a pig region of 1444 * sufficient size to hold the compressed image. Next, read the 1445 * chunks into the pig area (calling hibernate_read_chunks to do this), 1446 * and finally, if all of the above succeeds, clear the hibernate signature. 1447 * The function will then return to hibernate_resume, which will proceed 1448 * to unpack the pig image to the correct place in memory. 1449 */ 1450 int 1451 hibernate_read_image(union hibernate_info *hiber_info) 1452 { 1453 size_t compressed_size, disk_size, chunktable_size, pig_sz; 1454 paddr_t image_start, image_end, pig_start, pig_end; 1455 struct hibernate_disk_chunk *chunks; 1456 daddr_t blkctr; 1457 vaddr_t chunktable = (vaddr_t)NULL; 1458 paddr_t piglet_chunktable = hiber_info->piglet_pa + 1459 HIBERNATE_CHUNK_SIZE; 1460 int i; 1461 1462 pmap_activate(curproc); 1463 1464 /* Calculate total chunk table size in disk blocks */ 1465 chunktable_size = HIBERNATE_CHUNK_TABLE_SIZE / hiber_info->secsize; 1466 1467 blkctr = hiber_info->sig_offset - chunktable_size - 1468 hiber_info->swap_offset; 1469 1470 chunktable = (vaddr_t)km_alloc(HIBERNATE_CHUNK_TABLE_SIZE, &kv_any, 1471 &kp_none, &kd_nowait); 1472 1473 if (!chunktable) 1474 return (1); 1475 1476 /* Read the chunktable from disk into the piglet chunktable */ 1477 for (i = 0; i < HIBERNATE_CHUNK_TABLE_SIZE; 1478 i += PAGE_SIZE, blkctr += PAGE_SIZE/hiber_info->secsize) { 1479 pmap_kenter_pa(chunktable + i, piglet_chunktable + i, VM_PROT_ALL); 1480 pmap_update(pmap_kernel()); 1481 hibernate_block_io(hiber_info, blkctr, PAGE_SIZE, 1482 chunktable + i, 0); 1483 } 1484 1485 blkctr = hiber_info->image_offset; 1486 compressed_size = 0; 1487 1488 chunks = (struct hibernate_disk_chunk *)chunktable; 1489 1490 for (i = 0; i < hiber_info->chunk_ctr; i++) 1491 compressed_size += chunks[i].compressed_size; 1492 1493 disk_size = compressed_size; 1494 1495 /* Allocate the pig area */ 1496 pig_sz = compressed_size + HIBERNATE_CHUNK_SIZE; 1497 if (uvm_pmr_alloc_pig(&pig_start, pig_sz) == ENOMEM) 1498 return (1); 1499 1500 pig_end = pig_start + pig_sz; 1501 1502 /* Calculate image extents. Pig image must end on a chunk boundary. */ 1503 image_end = pig_end & ~(HIBERNATE_CHUNK_SIZE - 1); 1504 image_start = pig_start; 1505 1506 image_start = image_end - disk_size; 1507 1508 hibernate_read_chunks(hiber_info, image_start, image_end, disk_size, 1509 chunks); 1510 1511 pmap_kremove(chunktable, PAGE_SIZE); 1512 pmap_update(pmap_kernel()); 1513 1514 /* Prepare the resume time pmap/page table */ 1515 hibernate_populate_resume_pt(hiber_info, image_start, image_end); 1516 1517 /* Read complete, clear the signature and return */ 1518 return hibernate_clear_signature(); 1519 } 1520 1521 /* 1522 * Read the hibernated memory chunks from disk (chunk information at this 1523 * point is stored in the piglet) into the pig area specified by 1524 * [pig_start .. pig_end]. Order the chunks so that the final chunk is the 1525 * only chunk with overlap possibilities. 1526 */ 1527 int 1528 hibernate_read_chunks(union hibernate_info *hib_info, paddr_t pig_start, 1529 paddr_t pig_end, size_t image_compr_size, 1530 struct hibernate_disk_chunk *chunks) 1531 { 1532 paddr_t img_index, img_cur, r1s, r1e, r2s, r2e; 1533 paddr_t copy_start, copy_end, piglet_cur; 1534 paddr_t piglet_base = hib_info->piglet_pa; 1535 paddr_t piglet_end = piglet_base + HIBERNATE_CHUNK_SIZE; 1536 daddr_t blkctr; 1537 size_t processed, compressed_size, read_size; 1538 int i, j, overlap, found, nchunks; 1539 int nochunks = 0, nfchunks = 0, npchunks = 0; 1540 int *ochunks, *pchunks, *fchunks; 1541 vaddr_t tempva = (vaddr_t)NULL, hibernate_fchunk_area = (vaddr_t)NULL; 1542 1543 global_pig_start = pig_start; 1544 1545 /* XXX - dont need this on all archs */ 1546 pmap_activate(curproc); 1547 1548 /* 1549 * These mappings go into the resuming kernel's page table, and are 1550 * used only during image read. They dissappear from existence 1551 * when the suspended kernel is unpacked on top of us. 1552 */ 1553 tempva = (vaddr_t)km_alloc(2*PAGE_SIZE, &kv_any, &kp_none, &kd_nowait); 1554 if (!tempva) 1555 return (1); 1556 hibernate_fchunk_area = (vaddr_t)km_alloc(3*PAGE_SIZE, &kv_any, 1557 &kp_none, &kd_nowait); 1558 if (!hibernate_fchunk_area) 1559 return (1); 1560 1561 /* Temporary output chunk ordering VA */ 1562 ochunks = (int *)hibernate_fchunk_area; 1563 1564 /* Piglet chunk ordering VA */ 1565 pchunks = (int *)(hibernate_fchunk_area + PAGE_SIZE); 1566 1567 /* Final chunk ordering VA */ 1568 fchunks = (int *)(hibernate_fchunk_area + (2*PAGE_SIZE)); 1569 1570 /* Map the chunk ordering region */ 1571 pmap_kenter_pa(hibernate_fchunk_area, 1572 piglet_base + (4*PAGE_SIZE), VM_PROT_ALL); 1573 pmap_update(pmap_kernel()); 1574 pmap_kenter_pa((vaddr_t)pchunks, piglet_base + (5*PAGE_SIZE), 1575 VM_PROT_ALL); 1576 pmap_update(pmap_kernel()); 1577 pmap_kenter_pa((vaddr_t)fchunks, piglet_base + (6*PAGE_SIZE), 1578 VM_PROT_ALL); 1579 pmap_update(pmap_kernel()); 1580 1581 nchunks = hib_info->chunk_ctr; 1582 1583 /* Initially start all chunks as unplaced */ 1584 for (i = 0; i < nchunks; i++) 1585 chunks[i].flags = 0; 1586 1587 /* 1588 * Search the list for chunks that are outside the pig area. These 1589 * can be placed first in the final output list. 1590 */ 1591 for (i = 0; i < nchunks; i++) { 1592 if (chunks[i].end <= pig_start || chunks[i].base >= pig_end) { 1593 ochunks[nochunks] = i; 1594 fchunks[nfchunks] = i; 1595 nochunks++; 1596 nfchunks++; 1597 chunks[i].flags |= HIBERNATE_CHUNK_USED; 1598 } 1599 } 1600 1601 /* 1602 * Walk the ordering, place the chunks in ascending memory order. 1603 * Conflicts might arise, these are handled next. 1604 */ 1605 do { 1606 img_index = -1; 1607 found = 0; 1608 j = -1; 1609 for (i = 0; i < nchunks; i++) 1610 if (chunks[i].base < img_index && 1611 chunks[i].flags == 0 ) { 1612 j = i; 1613 img_index = chunks[i].base; 1614 } 1615 1616 if (j != -1) { 1617 found = 1; 1618 ochunks[nochunks] = (short)j; 1619 nochunks++; 1620 chunks[j].flags |= HIBERNATE_CHUNK_PLACED; 1621 } 1622 } while (found); 1623 1624 img_index = pig_start; 1625 1626 /* 1627 * Identify chunk output conflicts (chunks whose pig load area 1628 * corresponds to their original memory placement location) 1629 */ 1630 for (i = 0; i < nochunks ; i++) { 1631 overlap = 0; 1632 r1s = img_index; 1633 r1e = img_index + chunks[ochunks[i]].compressed_size; 1634 r2s = chunks[ochunks[i]].base; 1635 r2e = chunks[ochunks[i]].end; 1636 1637 overlap = hibernate_check_overlap(r1s, r1e, r2s, r2e); 1638 if (overlap) 1639 chunks[ochunks[i]].flags |= HIBERNATE_CHUNK_CONFLICT; 1640 img_index += chunks[ochunks[i]].compressed_size; 1641 } 1642 1643 /* 1644 * Prepare the final output chunk list. Calculate an output 1645 * inflate strategy for overlapping chunks if needed. 1646 */ 1647 img_index = pig_start; 1648 for (i = 0; i < nochunks ; i++) { 1649 /* 1650 * If a conflict is detected, consume enough compressed 1651 * output chunks to fill the piglet 1652 */ 1653 if (chunks[ochunks[i]].flags & HIBERNATE_CHUNK_CONFLICT) { 1654 copy_start = piglet_base; 1655 copy_end = piglet_end; 1656 piglet_cur = piglet_base; 1657 npchunks = 0; 1658 j = i; 1659 1660 while (copy_start < copy_end && j < nochunks) { 1661 piglet_cur += chunks[ochunks[j]].compressed_size; 1662 pchunks[npchunks] = ochunks[j]; 1663 npchunks++; 1664 copy_start += chunks[ochunks[j]].compressed_size; 1665 img_index += chunks[ochunks[j]].compressed_size; 1666 i++; 1667 j++; 1668 } 1669 1670 piglet_cur = piglet_base; 1671 for (j = 0; j < npchunks; j++) { 1672 piglet_cur += chunks[pchunks[j]].compressed_size; 1673 fchunks[nfchunks] = pchunks[j]; 1674 chunks[pchunks[j]].flags |= HIBERNATE_CHUNK_USED; 1675 nfchunks++; 1676 } 1677 } else { 1678 /* 1679 * No conflict, chunk can be added without copying 1680 */ 1681 if ((chunks[ochunks[i]].flags & 1682 HIBERNATE_CHUNK_USED) == 0) { 1683 fchunks[nfchunks] = ochunks[i]; 1684 chunks[ochunks[i]].flags |= HIBERNATE_CHUNK_USED; 1685 nfchunks++; 1686 } 1687 img_index += chunks[ochunks[i]].compressed_size; 1688 } 1689 } 1690 1691 img_index = pig_start; 1692 for (i = 0; i < nfchunks; i++) { 1693 piglet_cur = piglet_base; 1694 img_index += chunks[fchunks[i]].compressed_size; 1695 } 1696 1697 img_cur = pig_start; 1698 1699 for (i = 0; i < nfchunks; i++) { 1700 blkctr = chunks[fchunks[i]].offset - hib_info->swap_offset; 1701 processed = 0; 1702 compressed_size = chunks[fchunks[i]].compressed_size; 1703 1704 while (processed < compressed_size) { 1705 pmap_kenter_pa(tempva, img_cur, VM_PROT_ALL); 1706 pmap_kenter_pa(tempva + PAGE_SIZE, img_cur+PAGE_SIZE, 1707 VM_PROT_ALL); 1708 pmap_update(pmap_kernel()); 1709 1710 if (compressed_size - processed >= PAGE_SIZE) 1711 read_size = PAGE_SIZE; 1712 else 1713 read_size = compressed_size - processed; 1714 1715 hibernate_block_io(hib_info, blkctr, read_size, 1716 tempva + (img_cur & PAGE_MASK), 0); 1717 1718 blkctr += (read_size / hib_info->secsize); 1719 1720 hibernate_flush(); 1721 pmap_kremove(tempva, PAGE_SIZE); 1722 pmap_kremove(tempva + PAGE_SIZE, PAGE_SIZE); 1723 processed += read_size; 1724 img_cur += read_size; 1725 } 1726 } 1727 1728 pmap_kremove(hibernate_fchunk_area, PAGE_SIZE); 1729 pmap_kremove((vaddr_t)pchunks, PAGE_SIZE); 1730 pmap_kremove((vaddr_t)fchunks, PAGE_SIZE); 1731 pmap_update(pmap_kernel()); 1732 1733 return (0); 1734 } 1735 1736 /* 1737 * Hibernating a machine comprises the following operations: 1738 * 1. Calculating this machine's hibernate_info information 1739 * 2. Allocating a piglet and saving the piglet's physaddr 1740 * 3. Calculating the memory chunks 1741 * 4. Writing the compressed chunks to disk 1742 * 5. Writing the chunk table 1743 * 6. Writing the signature block (hibernate_info) 1744 * 1745 * On most architectures, the function calling hibernate_suspend would 1746 * then power off the machine using some MD-specific implementation. 1747 */ 1748 int 1749 hibernate_suspend(void) 1750 { 1751 union hibernate_info hib_info; 1752 size_t swap_size; 1753 1754 /* 1755 * Calculate memory ranges, swap offsets, etc. 1756 * This also allocates a piglet whose physaddr is stored in 1757 * hib_info->piglet_pa and vaddr stored in hib_info->piglet_va 1758 */ 1759 if (get_hibernate_info(&hib_info, 1)) 1760 return (1); 1761 1762 swap_size = hib_info.image_size + hib_info.secsize + 1763 HIBERNATE_CHUNK_TABLE_SIZE; 1764 1765 if (uvm_swap_check_range(hib_info.device, swap_size)) { 1766 printf("insufficient swap space for hibernate\n"); 1767 return (1); 1768 } 1769 1770 pmap_kenter_pa(HIBERNATE_HIBALLOC_PAGE, HIBERNATE_HIBALLOC_PAGE, 1771 VM_PROT_ALL); 1772 pmap_activate(curproc); 1773 1774 /* Stash the piglet VA so we can free it in the resuming kernel */ 1775 global_piglet_va = hib_info.piglet_va; 1776 1777 if (hibernate_write_chunks(&hib_info)) 1778 return (1); 1779 1780 if (hibernate_write_chunktable(&hib_info)) 1781 return (1); 1782 1783 if (hibernate_write_signature(&hib_info)) 1784 return (1); 1785 1786 delay(500000); 1787 return (0); 1788 } 1789 1790 /* 1791 * Free items allocated by hibernate_suspend() 1792 */ 1793 void 1794 hibernate_free(void) 1795 { 1796 if (global_piglet_va) 1797 uvm_pmr_free_piglet(global_piglet_va, 1798 3*HIBERNATE_CHUNK_SIZE); 1799 1800 if (hibernate_copy_page) 1801 pmap_kremove(hibernate_copy_page, PAGE_SIZE); 1802 if (hibernate_temp_page) 1803 pmap_kremove(hibernate_temp_page, PAGE_SIZE); 1804 1805 pmap_update(pmap_kernel()); 1806 1807 if (hibernate_copy_page) 1808 km_free((void *)hibernate_copy_page, PAGE_SIZE, 1809 &kv_any, &kp_none); 1810 if (hibernate_temp_page) 1811 km_free((void *)hibernate_temp_page, PAGE_SIZE, 1812 &kv_any, &kp_none); 1813 1814 global_piglet_va = 0; 1815 hibernate_copy_page = 0; 1816 hibernate_temp_page = 0; 1817 } 1818