xref: /openbsd-src/sys/kern/subr_hibernate.c (revision 48950c12d106c85f315112191a0228d7b83b9510)
1 /*	$OpenBSD: subr_hibernate.c,v 1.52 2013/03/07 01:26:54 mlarkin Exp $	*/
2 
3 /*
4  * Copyright (c) 2011 Ariane van der Steldt <ariane@stack.nl>
5  * Copyright (c) 2011 Mike Larkin <mlarkin@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/hibernate.h>
21 #include <sys/malloc.h>
22 #include <sys/param.h>
23 #include <sys/tree.h>
24 #include <sys/types.h>
25 #include <sys/systm.h>
26 #include <sys/disklabel.h>
27 #include <sys/disk.h>
28 #include <sys/conf.h>
29 #include <sys/buf.h>
30 #include <sys/fcntl.h>
31 #include <sys/stat.h>
32 #include <uvm/uvm.h>
33 #include <uvm/uvm_swap.h>
34 #include <machine/hibernate.h>
35 
36 /*
37  * Hibernate piglet layout information
38  *
39  * The piglet is a scratch area of memory allocated by the suspending kernel.
40  * Its phys and virt addrs are recorded in the signature block. The piglet is
41  * used to guarantee an unused area of memory that can be used by the resuming
42  * kernel for various things. The piglet is excluded during unpack operations.
43  * The piglet size is presently 3*HIBERNATE_CHUNK_SIZE (typically 3*4MB).
44  *
45  * Offset from piglet_base	Purpose
46  * ----------------------------------------------------------------------------
47  * 0				I/O page used during resume
48  * 1*PAGE_SIZE		 	I/O page used during hibernate suspend
49  * 2*PAGE_SIZE			unused
50  * 3*PAGE_SIZE			copy page used during hibernate suspend
51  * 4*PAGE_SIZE			final chunk ordering list (8 pages)
52  * 12*PAGE_SIZE			piglet chunk ordering list (8 pages)
53  * 20*PAGE_SIZE			temp chunk ordering list (8 pages)
54  * 28*PAGE_SIZE			start of hiballoc area
55  * 108*PAGE_SIZE		end of hiballoc area (80 pages)
56  * ...				unused
57  * HIBERNATE_CHUNK_SIZE		start of hibernate chunk table
58  * 2*HIBERNATE_CHUNK_SIZE	bounce area for chunks being unpacked
59  * 3*HIBERNATE_CHUNK_SIZE	end of piglet
60  */
61 
62 /* Temporary vaddr ranges used during hibernate */
63 vaddr_t hibernate_temp_page;
64 vaddr_t hibernate_copy_page;
65 
66 /* Hibernate info as read from disk during resume */
67 union hibernate_info disk_hiber_info;
68 paddr_t global_pig_start;
69 vaddr_t global_piglet_va;
70 
71 void hibernate_copy_chunk_to_piglet(paddr_t, vaddr_t, size_t);
72 
73 /*
74  * Hib alloc enforced alignment.
75  */
76 #define HIB_ALIGN		8 /* bytes alignment */
77 
78 /*
79  * sizeof builtin operation, but with alignment constraint.
80  */
81 #define HIB_SIZEOF(_type)	roundup(sizeof(_type), HIB_ALIGN)
82 
83 struct hiballoc_entry {
84 	size_t			hibe_use;
85 	size_t			hibe_space;
86 	RB_ENTRY(hiballoc_entry) hibe_entry;
87 };
88 
89 /*
90  * Compare hiballoc entries based on the address they manage.
91  *
92  * Since the address is fixed, relative to struct hiballoc_entry,
93  * we just compare the hiballoc_entry pointers.
94  */
95 static __inline int
96 hibe_cmp(struct hiballoc_entry *l, struct hiballoc_entry *r)
97 {
98 	return l < r ? -1 : (l > r);
99 }
100 
101 RB_PROTOTYPE(hiballoc_addr, hiballoc_entry, hibe_entry, hibe_cmp)
102 
103 /*
104  * Given a hiballoc entry, return the address it manages.
105  */
106 static __inline void *
107 hib_entry_to_addr(struct hiballoc_entry *entry)
108 {
109 	caddr_t addr;
110 
111 	addr = (caddr_t)entry;
112 	addr += HIB_SIZEOF(struct hiballoc_entry);
113 	return addr;
114 }
115 
116 /*
117  * Given an address, find the hiballoc that corresponds.
118  */
119 static __inline struct hiballoc_entry*
120 hib_addr_to_entry(void *addr_param)
121 {
122 	caddr_t addr;
123 
124 	addr = (caddr_t)addr_param;
125 	addr -= HIB_SIZEOF(struct hiballoc_entry);
126 	return (struct hiballoc_entry*)addr;
127 }
128 
129 RB_GENERATE(hiballoc_addr, hiballoc_entry, hibe_entry, hibe_cmp)
130 
131 /*
132  * Allocate memory from the arena.
133  *
134  * Returns NULL if no memory is available.
135  */
136 void *
137 hib_alloc(struct hiballoc_arena *arena, size_t alloc_sz)
138 {
139 	struct hiballoc_entry *entry, *new_entry;
140 	size_t find_sz;
141 
142 	/*
143 	 * Enforce alignment of HIB_ALIGN bytes.
144 	 *
145 	 * Note that, because the entry is put in front of the allocation,
146 	 * 0-byte allocations are guaranteed a unique address.
147 	 */
148 	alloc_sz = roundup(alloc_sz, HIB_ALIGN);
149 
150 	/*
151 	 * Find an entry with hibe_space >= find_sz.
152 	 *
153 	 * If the root node is not large enough, we switch to tree traversal.
154 	 * Because all entries are made at the bottom of the free space,
155 	 * traversal from the end has a slightly better chance of yielding
156 	 * a sufficiently large space.
157 	 */
158 	find_sz = alloc_sz + HIB_SIZEOF(struct hiballoc_entry);
159 	entry = RB_ROOT(&arena->hib_addrs);
160 	if (entry != NULL && entry->hibe_space < find_sz) {
161 		RB_FOREACH_REVERSE(entry, hiballoc_addr, &arena->hib_addrs) {
162 			if (entry->hibe_space >= find_sz)
163 				break;
164 		}
165 	}
166 
167 	/*
168 	 * Insufficient or too fragmented memory.
169 	 */
170 	if (entry == NULL)
171 		return NULL;
172 
173 	/*
174 	 * Create new entry in allocated space.
175 	 */
176 	new_entry = (struct hiballoc_entry*)(
177 	    (caddr_t)hib_entry_to_addr(entry) + entry->hibe_use);
178 	new_entry->hibe_space = entry->hibe_space - find_sz;
179 	new_entry->hibe_use = alloc_sz;
180 
181 	/*
182 	 * Insert entry.
183 	 */
184 	if (RB_INSERT(hiballoc_addr, &arena->hib_addrs, new_entry) != NULL)
185 		panic("hib_alloc: insert failure");
186 	entry->hibe_space = 0;
187 
188 	/* Return address managed by entry. */
189 	return hib_entry_to_addr(new_entry);
190 }
191 
192 /*
193  * Free a pointer previously allocated from this arena.
194  *
195  * If addr is NULL, this will be silently accepted.
196  */
197 void
198 hib_free(struct hiballoc_arena *arena, void *addr)
199 {
200 	struct hiballoc_entry *entry, *prev;
201 
202 	if (addr == NULL)
203 		return;
204 
205 	/*
206 	 * Derive entry from addr and check it is really in this arena.
207 	 */
208 	entry = hib_addr_to_entry(addr);
209 	if (RB_FIND(hiballoc_addr, &arena->hib_addrs, entry) != entry)
210 		panic("hib_free: freed item %p not in hib arena", addr);
211 
212 	/*
213 	 * Give the space in entry to its predecessor.
214 	 *
215 	 * If entry has no predecessor, change its used space into free space
216 	 * instead.
217 	 */
218 	prev = RB_PREV(hiballoc_addr, &arena->hib_addrs, entry);
219 	if (prev != NULL &&
220 	    (void *)((caddr_t)prev + HIB_SIZEOF(struct hiballoc_entry) +
221 	    prev->hibe_use + prev->hibe_space) == entry) {
222 		/* Merge entry. */
223 		RB_REMOVE(hiballoc_addr, &arena->hib_addrs, entry);
224 		prev->hibe_space += HIB_SIZEOF(struct hiballoc_entry) +
225 		    entry->hibe_use + entry->hibe_space;
226 	} else {
227 		/* Flip used memory to free space. */
228 		entry->hibe_space += entry->hibe_use;
229 		entry->hibe_use = 0;
230 	}
231 }
232 
233 /*
234  * Initialize hiballoc.
235  *
236  * The allocator will manage memmory at ptr, which is len bytes.
237  */
238 int
239 hiballoc_init(struct hiballoc_arena *arena, void *p_ptr, size_t p_len)
240 {
241 	struct hiballoc_entry *entry;
242 	caddr_t ptr;
243 	size_t len;
244 
245 	RB_INIT(&arena->hib_addrs);
246 
247 	/*
248 	 * Hib allocator enforces HIB_ALIGN alignment.
249 	 * Fixup ptr and len.
250 	 */
251 	ptr = (caddr_t)roundup((vaddr_t)p_ptr, HIB_ALIGN);
252 	len = p_len - ((size_t)ptr - (size_t)p_ptr);
253 	len &= ~((size_t)HIB_ALIGN - 1);
254 
255 	/*
256 	 * Insufficient memory to be able to allocate and also do bookkeeping.
257 	 */
258 	if (len <= HIB_SIZEOF(struct hiballoc_entry))
259 		return ENOMEM;
260 
261 	/*
262 	 * Create entry describing space.
263 	 */
264 	entry = (struct hiballoc_entry*)ptr;
265 	entry->hibe_use = 0;
266 	entry->hibe_space = len - HIB_SIZEOF(struct hiballoc_entry);
267 	RB_INSERT(hiballoc_addr, &arena->hib_addrs, entry);
268 
269 	return 0;
270 }
271 
272 /*
273  * Zero all free memory.
274  */
275 void
276 uvm_pmr_zero_everything(void)
277 {
278 	struct uvm_pmemrange	*pmr;
279 	struct vm_page		*pg;
280 	int			 i;
281 
282 	uvm_lock_fpageq();
283 	TAILQ_FOREACH(pmr, &uvm.pmr_control.use, pmr_use) {
284 		/* Zero single pages. */
285 		while ((pg = TAILQ_FIRST(&pmr->single[UVM_PMR_MEMTYPE_DIRTY]))
286 		    != NULL) {
287 			uvm_pmr_remove(pmr, pg);
288 			uvm_pagezero(pg);
289 			atomic_setbits_int(&pg->pg_flags, PG_ZERO);
290 			uvmexp.zeropages++;
291 			uvm_pmr_insert(pmr, pg, 0);
292 		}
293 
294 		/* Zero multi page ranges. */
295 		while ((pg = RB_ROOT(&pmr->size[UVM_PMR_MEMTYPE_DIRTY]))
296 		    != NULL) {
297 			pg--; /* Size tree always has second page. */
298 			uvm_pmr_remove(pmr, pg);
299 			for (i = 0; i < pg->fpgsz; i++) {
300 				uvm_pagezero(&pg[i]);
301 				atomic_setbits_int(&pg[i].pg_flags, PG_ZERO);
302 				uvmexp.zeropages++;
303 			}
304 			uvm_pmr_insert(pmr, pg, 0);
305 		}
306 	}
307 	uvm_unlock_fpageq();
308 }
309 
310 /*
311  * Mark all memory as dirty.
312  *
313  * Used to inform the system that the clean memory isn't clean for some
314  * reason, for example because we just came back from hibernate.
315  */
316 void
317 uvm_pmr_dirty_everything(void)
318 {
319 	struct uvm_pmemrange	*pmr;
320 	struct vm_page		*pg;
321 	int			 i;
322 
323 	uvm_lock_fpageq();
324 	TAILQ_FOREACH(pmr, &uvm.pmr_control.use, pmr_use) {
325 		/* Dirty single pages. */
326 		while ((pg = TAILQ_FIRST(&pmr->single[UVM_PMR_MEMTYPE_ZERO]))
327 		    != NULL) {
328 			uvm_pmr_remove(pmr, pg);
329 			atomic_clearbits_int(&pg->pg_flags, PG_ZERO);
330 			uvm_pmr_insert(pmr, pg, 0);
331 		}
332 
333 		/* Dirty multi page ranges. */
334 		while ((pg = RB_ROOT(&pmr->size[UVM_PMR_MEMTYPE_ZERO]))
335 		    != NULL) {
336 			pg--; /* Size tree always has second page. */
337 			uvm_pmr_remove(pmr, pg);
338 			for (i = 0; i < pg->fpgsz; i++)
339 				atomic_clearbits_int(&pg[i].pg_flags, PG_ZERO);
340 			uvm_pmr_insert(pmr, pg, 0);
341 		}
342 	}
343 
344 	uvmexp.zeropages = 0;
345 	uvm_unlock_fpageq();
346 }
347 
348 /*
349  * Allocate the highest address that can hold sz.
350  *
351  * sz in bytes.
352  */
353 int
354 uvm_pmr_alloc_pig(paddr_t *addr, psize_t sz)
355 {
356 	struct uvm_pmemrange	*pmr;
357 	struct vm_page		*pig_pg, *pg;
358 
359 	/*
360 	 * Convert sz to pages, since that is what pmemrange uses internally.
361 	 */
362 	sz = atop(round_page(sz));
363 
364 	uvm_lock_fpageq();
365 
366 	TAILQ_FOREACH(pmr, &uvm.pmr_control.use, pmr_use) {
367 		RB_FOREACH_REVERSE(pig_pg, uvm_pmr_addr, &pmr->addr) {
368 			if (pig_pg->fpgsz >= sz) {
369 				goto found;
370 			}
371 		}
372 	}
373 
374 	/*
375 	 * Allocation failure.
376 	 */
377 	uvm_unlock_fpageq();
378 	return ENOMEM;
379 
380 found:
381 	/* Remove page from freelist. */
382 	uvm_pmr_remove_size(pmr, pig_pg);
383 	pig_pg->fpgsz -= sz;
384 	pg = pig_pg + pig_pg->fpgsz;
385 	if (pig_pg->fpgsz == 0)
386 		uvm_pmr_remove_addr(pmr, pig_pg);
387 	else
388 		uvm_pmr_insert_size(pmr, pig_pg);
389 
390 	uvmexp.free -= sz;
391 	*addr = VM_PAGE_TO_PHYS(pg);
392 
393 	/*
394 	 * Update pg flags.
395 	 *
396 	 * Note that we trash the sz argument now.
397 	 */
398 	while (sz > 0) {
399 		KASSERT(pg->pg_flags & PQ_FREE);
400 
401 		atomic_clearbits_int(&pg->pg_flags,
402 		    PG_PMAP0|PG_PMAP1|PG_PMAP2|PG_PMAP3);
403 
404 		if (pg->pg_flags & PG_ZERO)
405 			uvmexp.zeropages -= sz;
406 		atomic_clearbits_int(&pg->pg_flags,
407 		    PG_ZERO|PQ_FREE);
408 
409 		pg->uobject = NULL;
410 		pg->uanon = NULL;
411 		pg->pg_version++;
412 
413 		/*
414 		 * Next.
415 		 */
416 		pg++;
417 		sz--;
418 	}
419 
420 	/* Return. */
421 	uvm_unlock_fpageq();
422 	return 0;
423 }
424 
425 /*
426  * Allocate a piglet area.
427  *
428  * This is as low as possible.
429  * Piglets are aligned.
430  *
431  * sz and align in bytes.
432  *
433  * The call will sleep for the pagedaemon to attempt to free memory.
434  * The pagedaemon may decide its not possible to free enough memory, causing
435  * the allocation to fail.
436  */
437 int
438 uvm_pmr_alloc_piglet(vaddr_t *va, paddr_t *pa, vsize_t sz, paddr_t align)
439 {
440 	paddr_t			 pg_addr, piglet_addr;
441 	struct uvm_pmemrange	*pmr;
442 	struct vm_page		*pig_pg, *pg;
443 	struct pglist		 pageq;
444 	int			 pdaemon_woken;
445 	vaddr_t			 piglet_va;
446 
447 	KASSERT((align & (align - 1)) == 0);
448 	pdaemon_woken = 0; /* Didn't wake the pagedaemon. */
449 
450 	/*
451 	 * Fixup arguments: align must be at least PAGE_SIZE,
452 	 * sz will be converted to pagecount, since that is what
453 	 * pmemrange uses internally.
454 	 */
455 	if (align < PAGE_SIZE)
456 		align = PAGE_SIZE;
457 	sz = round_page(sz);
458 
459 	uvm_lock_fpageq();
460 
461 	TAILQ_FOREACH_REVERSE(pmr, &uvm.pmr_control.use, uvm_pmemrange_use,
462 	    pmr_use) {
463 retry:
464 		/*
465 		 * Search for a range with enough space.
466 		 * Use the address tree, to ensure the range is as low as
467 		 * possible.
468 		 */
469 		RB_FOREACH(pig_pg, uvm_pmr_addr, &pmr->addr) {
470 			pg_addr = VM_PAGE_TO_PHYS(pig_pg);
471 			piglet_addr = (pg_addr + (align - 1)) & ~(align - 1);
472 
473 			if (atop(pg_addr) + pig_pg->fpgsz >=
474 			    atop(piglet_addr) + atop(sz))
475 				goto found;
476 		}
477 	}
478 
479 	/*
480 	 * Try to coerce the pagedaemon into freeing memory
481 	 * for the piglet.
482 	 *
483 	 * pdaemon_woken is set to prevent the code from
484 	 * falling into an endless loop.
485 	 */
486 	if (!pdaemon_woken) {
487 		pdaemon_woken = 1;
488 		if (uvm_wait_pla(ptoa(pmr->low), ptoa(pmr->high) - 1,
489 		    sz, UVM_PLA_FAILOK) == 0)
490 			goto retry;
491 	}
492 
493 	/* Return failure. */
494 	uvm_unlock_fpageq();
495 	return ENOMEM;
496 
497 found:
498 	/*
499 	 * Extract piglet from pigpen.
500 	 */
501 	TAILQ_INIT(&pageq);
502 	uvm_pmr_extract_range(pmr, pig_pg,
503 	    atop(piglet_addr), atop(piglet_addr) + atop(sz), &pageq);
504 
505 	*pa = piglet_addr;
506 	uvmexp.free -= atop(sz);
507 
508 	/*
509 	 * Update pg flags.
510 	 *
511 	 * Note that we trash the sz argument now.
512 	 */
513 	TAILQ_FOREACH(pg, &pageq, pageq) {
514 		KASSERT(pg->pg_flags & PQ_FREE);
515 
516 		atomic_clearbits_int(&pg->pg_flags,
517 		    PG_PMAP0|PG_PMAP1|PG_PMAP2|PG_PMAP3);
518 
519 		if (pg->pg_flags & PG_ZERO)
520 			uvmexp.zeropages--;
521 		atomic_clearbits_int(&pg->pg_flags,
522 		    PG_ZERO|PQ_FREE);
523 
524 		pg->uobject = NULL;
525 		pg->uanon = NULL;
526 		pg->pg_version++;
527 	}
528 
529 	uvm_unlock_fpageq();
530 
531 	/*
532 	 * Now allocate a va.
533 	 * Use direct mappings for the pages.
534 	 */
535 
536 	piglet_va = *va = (vaddr_t)km_alloc(sz, &kv_any, &kp_none, &kd_waitok);
537 	if (!piglet_va) {
538 		uvm_pglistfree(&pageq);
539 		return ENOMEM;
540 	}
541 
542 	/*
543 	 * Map piglet to va.
544 	 */
545 	TAILQ_FOREACH(pg, &pageq, pageq) {
546 		pmap_kenter_pa(piglet_va, VM_PAGE_TO_PHYS(pg), UVM_PROT_RW);
547 		piglet_va += PAGE_SIZE;
548 	}
549 	pmap_update(pmap_kernel());
550 
551 	return 0;
552 }
553 
554 /*
555  * Free a piglet area.
556  */
557 void
558 uvm_pmr_free_piglet(vaddr_t va, vsize_t sz)
559 {
560 	paddr_t			 pa;
561 	struct vm_page		*pg;
562 
563 	/*
564 	 * Fix parameters.
565 	 */
566 	sz = round_page(sz);
567 
568 	/*
569 	 * Find the first page in piglet.
570 	 * Since piglets are contiguous, the first pg is all we need.
571 	 */
572 	if (!pmap_extract(pmap_kernel(), va, &pa))
573 		panic("uvm_pmr_free_piglet: piglet 0x%lx has no pages", va);
574 	pg = PHYS_TO_VM_PAGE(pa);
575 	if (pg == NULL)
576 		panic("uvm_pmr_free_piglet: unmanaged page 0x%lx", pa);
577 
578 	/*
579 	 * Unmap.
580 	 */
581 	pmap_kremove(va, sz);
582 	pmap_update(pmap_kernel());
583 
584 	/*
585 	 * Free the physical and virtual memory.
586 	 */
587 	uvm_pmr_freepages(pg, atop(sz));
588 	km_free((void *)va, sz, &kv_any, &kp_none);
589 }
590 
591 /*
592  * Physmem RLE compression support.
593  *
594  * Given a physical page address, return the number of pages starting at the
595  * address that are free.  Clamps to the number of pages in
596  * HIBERNATE_CHUNK_SIZE. Returns 0 if the page at addr is not free.
597  */
598 int
599 uvm_page_rle(paddr_t addr)
600 {
601 	struct vm_page		*pg, *pg_end;
602 	struct vm_physseg	*vmp;
603 	int			 pseg_idx, off_idx;
604 
605 	pseg_idx = vm_physseg_find(atop(addr), &off_idx);
606 	if (pseg_idx == -1)
607 		return 0;
608 
609 	vmp = &vm_physmem[pseg_idx];
610 	pg = &vmp->pgs[off_idx];
611 	if (!(pg->pg_flags & PQ_FREE))
612 		return 0;
613 
614 	/*
615 	 * Search for the first non-free page after pg.
616 	 * Note that the page may not be the first page in a free pmemrange,
617 	 * therefore pg->fpgsz cannot be used.
618 	 */
619 	for (pg_end = pg; pg_end <= vmp->lastpg &&
620 	    (pg_end->pg_flags & PQ_FREE) == PQ_FREE; pg_end++)
621 		;
622 	return min((pg_end - pg), HIBERNATE_CHUNK_SIZE/PAGE_SIZE);
623 }
624 
625 /*
626  * Fills out the hibernate_info union pointed to by hiber_info
627  * with information about this machine (swap signature block
628  * offsets, number of memory ranges, kernel in use, etc)
629  */
630 int
631 get_hibernate_info(union hibernate_info *hiber_info, int suspend)
632 {
633 	int chunktable_size;
634 	struct disklabel dl;
635 	char err_string[128], *dl_ret;
636 
637 	/* Determine I/O function to use */
638 	hiber_info->io_func = get_hibernate_io_function();
639 	if (hiber_info->io_func == NULL)
640 		return (1);
641 
642 	/* Calculate hibernate device */
643 	hiber_info->device = swdevt[0].sw_dev;
644 
645 	/* Read disklabel (used to calculate signature and image offsets) */
646 	dl_ret = disk_readlabel(&dl, hiber_info->device, err_string, 128);
647 
648 	if (dl_ret) {
649 		printf("Hibernate error reading disklabel: %s\n", dl_ret);
650 		return (1);
651 	}
652 
653 	/* Make sure we have a swap partition. */
654 	if (dl.d_partitions[1].p_fstype != FS_SWAP ||
655 	    dl.d_partitions[1].p_size == 0)
656 		return (1);
657 
658 	hiber_info->secsize = dl.d_secsize;
659 
660 	/* Make sure the signature can fit in one block */
661 	KASSERT(sizeof(union hibernate_info) <= hiber_info->secsize);
662 
663 	/* Calculate swap offset from start of disk */
664 	hiber_info->swap_offset = dl.d_partitions[1].p_offset;
665 
666 	/* Calculate signature block location */
667 	hiber_info->sig_offset = dl.d_partitions[1].p_offset +
668 	    dl.d_partitions[1].p_size -
669 	    sizeof(union hibernate_info)/hiber_info->secsize;
670 
671 	chunktable_size = HIBERNATE_CHUNK_TABLE_SIZE / hiber_info->secsize;
672 
673 	/* Stash kernel version information */
674 	bzero(&hiber_info->kernel_version, 128);
675 	bcopy(version, &hiber_info->kernel_version,
676 	    min(strlen(version), sizeof(hiber_info->kernel_version)-1));
677 
678 	if (suspend) {
679 		/* Allocate piglet region */
680 		if (uvm_pmr_alloc_piglet(&hiber_info->piglet_va,
681 		    &hiber_info->piglet_pa, HIBERNATE_CHUNK_SIZE*3,
682 		    HIBERNATE_CHUNK_SIZE)) {
683 			printf("Hibernate failed to allocate the piglet\n");
684 			return (1);
685 		}
686 		hiber_info->io_page = (void *)hiber_info->piglet_va;
687 
688 		/*
689 		 * Initialization of the hibernate IO function for drivers
690 		 * that need to do prep work (such as allocating memory or
691 		 * setting up data structures that cannot safely be done
692 		 * during suspend without causing side effects). There is
693 		 * a matching HIB_DONE call performed after the write is
694 		 * completed.
695 		 */
696 		if (hiber_info->io_func(hiber_info->device, 0,
697 		    (vaddr_t)NULL, 0, HIB_INIT, hiber_info->io_page))
698 			goto fail;
699 
700 	} else {
701 		/*
702 		 * Resuming kernels use a regular I/O page since we won't
703 		 * have access to the suspended kernel's piglet VA at this
704 		 * point. No need to free this I/O page as it will vanish
705 		 * as part of the resume.
706 		 */
707 		hiber_info->io_page = malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT);
708 		if (!hiber_info->io_page)
709 			return (1);
710 	}
711 
712 
713 	if (get_hibernate_info_md(hiber_info))
714 		goto fail;
715 
716 	/* Calculate memory image location in swap */
717 	hiber_info->image_offset = dl.d_partitions[1].p_offset +
718 	    dl.d_partitions[1].p_size -
719 	    (hiber_info->image_size / hiber_info->secsize) -
720 	    sizeof(union hibernate_info)/hiber_info->secsize -
721 	    chunktable_size;
722 
723 	return (0);
724 fail:
725 	if (suspend)
726 		uvm_pmr_free_piglet(hiber_info->piglet_va,
727 		    HIBERNATE_CHUNK_SIZE * 3);
728 
729 	return (1);
730 }
731 
732 /*
733  * Allocate nitems*size bytes from the hiballoc area presently in use
734  */
735 void *
736 hibernate_zlib_alloc(void *unused, int nitems, int size)
737 {
738 	struct hibernate_zlib_state *hibernate_state;
739 
740 	hibernate_state =
741 	    (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE;
742 
743 	return hib_alloc(&hibernate_state->hiballoc_arena, nitems*size);
744 }
745 
746 /*
747  * Free the memory pointed to by addr in the hiballoc area presently in
748  * use
749  */
750 void
751 hibernate_zlib_free(void *unused, void *addr)
752 {
753 	struct hibernate_zlib_state *hibernate_state;
754 
755 	hibernate_state =
756 	    (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE;
757 
758 	hib_free(&hibernate_state->hiballoc_arena, addr);
759 }
760 
761 /*
762  * Gets the next RLE value from the image stream
763  */
764 int
765 hibernate_get_next_rle(void)
766 {
767 	int rle, i;
768 	struct hibernate_zlib_state *hibernate_state;
769 
770 	hibernate_state =
771 	    (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE;
772 
773 	/* Read RLE code */
774 	hibernate_state->hib_stream.next_out = (char *)&rle;
775 	hibernate_state->hib_stream.avail_out = sizeof(rle);
776 
777 	i = inflate(&hibernate_state->hib_stream, Z_FULL_FLUSH);
778 	if (i != Z_OK && i != Z_STREAM_END) {
779 		/*
780 		 * XXX - this will likely reboot/hang most machines
781 		 *       since the console output buffer will be unmapped,
782 		 *       but there's not much else we can do here.
783 		 */
784 		panic("inflate rle error");
785 	}
786 
787 	/* Sanity check what RLE value we got */
788 	if (rle > HIBERNATE_CHUNK_SIZE/PAGE_SIZE || rle < 0)
789 		panic("invalid RLE code");
790 
791 	if (i == Z_STREAM_END)
792 		rle = -1;
793 
794 	return rle;
795 }
796 
797 /*
798  * Inflate next page of data from the image stream
799  */
800 int
801 hibernate_inflate_page(void)
802 {
803 	struct hibernate_zlib_state *hibernate_state;
804 	int i;
805 
806 	hibernate_state =
807 	    (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE;
808 
809 	/* Set up the stream for inflate */
810 	hibernate_state->hib_stream.next_out = (char *)HIBERNATE_INFLATE_PAGE;
811 	hibernate_state->hib_stream.avail_out = PAGE_SIZE;
812 
813 	/* Process next block of data */
814 	i = inflate(&hibernate_state->hib_stream, Z_PARTIAL_FLUSH);
815 	if (i != Z_OK && i != Z_STREAM_END) {
816 		/*
817 		 * XXX - this will likely reboot/hang most machines
818 		 *       since the console output buffer will be unmapped,
819 		 *       but there's not much else we can do here.
820 		 */
821 		panic("inflate error");
822 	}
823 
824 	/* We should always have extracted a full page ... */
825 	if (hibernate_state->hib_stream.avail_out != 0) {
826 		/*
827 		 * XXX - this will likely reboot/hang most machines
828 		 *       since the console output buffer will be unmapped,
829 		 *       but there's not much else we can do here.
830 		 */
831 		panic("incomplete page");
832 	}
833 
834 	return (i == Z_STREAM_END);
835 }
836 
837 /*
838  * Inflate size bytes from src into dest, skipping any pages in
839  * [src..dest] that are special (see hibernate_inflate_skip)
840  *
841  * This function executes while using the resume-time stack
842  * and pmap, and therefore cannot use ddb/printf/etc. Doing so
843  * will likely hang or reset the machine since the console output buffer
844  * will be unmapped.
845  */
846 void
847 hibernate_inflate_region(union hibernate_info *hiber_info, paddr_t dest,
848     paddr_t src, size_t size)
849 {
850 	int end_stream = 0 ;
851 	struct hibernate_zlib_state *hibernate_state;
852 
853 	hibernate_state =
854 	    (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE;
855 
856 	hibernate_state->hib_stream.next_in = (char *)src;
857 	hibernate_state->hib_stream.avail_in = size;
858 
859 	do {
860 		/* Flush cache and TLB */
861 		hibernate_flush();
862 
863 		/*
864 		 * Is this a special page? If yes, redirect the
865 		 * inflate output to a scratch page (eg, discard it)
866 		 */
867 		if (hibernate_inflate_skip(hiber_info, dest)) {
868 			hibernate_enter_resume_mapping(
869 			    HIBERNATE_INFLATE_PAGE,
870 			    HIBERNATE_INFLATE_PAGE, 0);
871 		} else {
872 			hibernate_enter_resume_mapping(
873 			    HIBERNATE_INFLATE_PAGE, dest, 0);
874 		}
875 
876 		hibernate_flush();
877 		end_stream = hibernate_inflate_page();
878 
879 		dest += PAGE_SIZE;
880 	} while (!end_stream);
881 }
882 
883 /*
884  * deflate from src into the I/O page, up to 'remaining' bytes
885  *
886  * Returns number of input bytes consumed, and may reset
887  * the 'remaining' parameter if not all the output space was consumed
888  * (this information is needed to know how much to write to disk
889  */
890 size_t
891 hibernate_deflate(union hibernate_info *hiber_info, paddr_t src,
892     size_t *remaining)
893 {
894 	vaddr_t hibernate_io_page = hiber_info->piglet_va + PAGE_SIZE;
895 	struct hibernate_zlib_state *hibernate_state;
896 
897 	hibernate_state =
898 	    (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE;
899 
900 	/* Set up the stream for deflate */
901 	hibernate_state->hib_stream.next_in = (caddr_t)src;
902 	hibernate_state->hib_stream.avail_in = PAGE_SIZE - (src & PAGE_MASK);
903 	hibernate_state->hib_stream.next_out = (caddr_t)hibernate_io_page +
904 	    (PAGE_SIZE - *remaining);
905 	hibernate_state->hib_stream.avail_out = *remaining;
906 
907 	/* Process next block of data */
908 	if (deflate(&hibernate_state->hib_stream, Z_PARTIAL_FLUSH) != Z_OK)
909 		panic("hibernate zlib deflate error");
910 
911 	/* Update pointers and return number of bytes consumed */
912 	*remaining = hibernate_state->hib_stream.avail_out;
913 	return (PAGE_SIZE - (src & PAGE_MASK)) -
914 	    hibernate_state->hib_stream.avail_in;
915 }
916 
917 /*
918  * Write the hibernation information specified in hiber_info
919  * to the location in swap previously calculated (last block of
920  * swap), called the "signature block".
921  */
922 int
923 hibernate_write_signature(union hibernate_info *hiber_info)
924 {
925 	/* Write hibernate info to disk */
926 	return (hiber_info->io_func(hiber_info->device, hiber_info->sig_offset,
927 	    (vaddr_t)hiber_info, hiber_info->secsize, HIB_W,
928 	    hiber_info->io_page));
929 }
930 
931 /*
932  * Write the memory chunk table to the area in swap immediately
933  * preceding the signature block. The chunk table is stored
934  * in the piglet when this function is called.
935  */
936 int
937 hibernate_write_chunktable(union hibernate_info *hiber_info)
938 {
939 	struct hibernate_disk_chunk *chunks;
940 	vaddr_t hibernate_chunk_table_start;
941 	size_t hibernate_chunk_table_size;
942 	daddr_t chunkbase;
943 	int i;
944 
945 	hibernate_chunk_table_size = HIBERNATE_CHUNK_TABLE_SIZE;
946 
947 	chunkbase = hiber_info->sig_offset -
948 	    (hibernate_chunk_table_size / hiber_info->secsize);
949 
950 	hibernate_chunk_table_start = hiber_info->piglet_va +
951 	    HIBERNATE_CHUNK_SIZE;
952 
953 	chunks = (struct hibernate_disk_chunk *)(hiber_info->piglet_va +
954 	    HIBERNATE_CHUNK_SIZE);
955 
956 	/* Write chunk table */
957 	for (i = 0; i < hibernate_chunk_table_size; i += MAXPHYS) {
958 		if (hiber_info->io_func(hiber_info->device,
959 		    chunkbase + (i/hiber_info->secsize),
960 		    (vaddr_t)(hibernate_chunk_table_start + i),
961 		    MAXPHYS, HIB_W, hiber_info->io_page))
962 			return (1);
963 	}
964 
965 	return (0);
966 }
967 
968 /*
969  * Write an empty hiber_info to the swap signature block, which is
970  * guaranteed to not match any valid hiber_info.
971  */
972 int
973 hibernate_clear_signature(void)
974 {
975 	union hibernate_info blank_hiber_info;
976 	union hibernate_info hiber_info;
977 
978 	/* Zero out a blank hiber_info */
979 	bzero(&blank_hiber_info, sizeof(hiber_info));
980 
981 	if (get_hibernate_info(&hiber_info, 0))
982 		return (1);
983 
984 	/* Write (zeroed) hibernate info to disk */
985 	if (hibernate_block_io(&hiber_info,
986 	    hiber_info.sig_offset - hiber_info.swap_offset,
987 	    hiber_info.secsize, (vaddr_t)&blank_hiber_info, 1))
988 		panic("error hibernate write 6");
989 
990 	return (0);
991 }
992 
993 /*
994  * Check chunk range overlap when calculating whether or not to copy a
995  * compressed chunk to the piglet area before decompressing.
996  *
997  * returns zero if the ranges do not overlap, non-zero otherwise.
998  */
999 int
1000 hibernate_check_overlap(paddr_t r1s, paddr_t r1e, paddr_t r2s, paddr_t r2e)
1001 {
1002 	/* case A : end of r1 overlaps start of r2 */
1003 	if (r1s < r2s && r1e > r2s)
1004 		return (1);
1005 
1006 	/* case B : r1 entirely inside r2 */
1007 	if (r1s >= r2s && r1e <= r2e)
1008 		return (1);
1009 
1010 	/* case C : r2 entirely inside r1 */
1011 	if (r2s >= r1s && r2e <= r1e)
1012 		return (1);
1013 
1014 	/* case D : end of r2 overlaps start of r1 */
1015 	if (r2s < r1s && r2e > r1s)
1016 		return (1);
1017 
1018 	return (0);
1019 }
1020 
1021 /*
1022  * Compare two hibernate_infos to determine if they are the same (eg,
1023  * we should be performing a hibernate resume on this machine.
1024  * Not all fields are checked - just enough to verify that the machine
1025  * has the same memory configuration and kernel as the one that
1026  * wrote the signature previously.
1027  */
1028 int
1029 hibernate_compare_signature(union hibernate_info *mine,
1030     union hibernate_info *disk)
1031 {
1032 	u_int i;
1033 
1034 	if (mine->nranges != disk->nranges)
1035 		return (1);
1036 
1037 	if (strcmp(mine->kernel_version, disk->kernel_version) != 0)
1038 		return (1);
1039 
1040 	for (i = 0; i < mine->nranges; i++) {
1041 		if ((mine->ranges[i].base != disk->ranges[i].base) ||
1042 		    (mine->ranges[i].end != disk->ranges[i].end) )
1043 			return (1);
1044 	}
1045 
1046 	return (0);
1047 }
1048 
1049 /*
1050  * Transfers xfer_size bytes between the hibernate device specified in
1051  * hib_info at offset blkctr and the vaddr specified at dest.
1052  *
1053  * Separate offsets and pages are used to handle misaligned reads (reads
1054  * that span a page boundary).
1055  *
1056  * blkctr specifies a relative offset (relative to the start of swap),
1057  * not an absolute disk offset
1058  *
1059  */
1060 int
1061 hibernate_block_io(union hibernate_info *hib_info, daddr_t blkctr,
1062     size_t xfer_size, vaddr_t dest, int iswrite)
1063 {
1064 	struct buf *bp;
1065 	struct bdevsw *bdsw;
1066 	int error;
1067 
1068 	bp = geteblk(xfer_size);
1069 	bdsw = &bdevsw[major(hib_info->device)];
1070 
1071 	error = (*bdsw->d_open)(hib_info->device, FREAD, S_IFCHR, curproc);
1072 	if (error) {
1073 		printf("hibernate_block_io open failed\n");
1074 		return (1);
1075 	}
1076 
1077 	if (iswrite)
1078 		bcopy((caddr_t)dest, bp->b_data, xfer_size);
1079 
1080 	bp->b_bcount = xfer_size;
1081 	bp->b_blkno = blkctr;
1082 	CLR(bp->b_flags, B_READ | B_WRITE | B_DONE);
1083 	SET(bp->b_flags, B_BUSY | (iswrite ? B_WRITE : B_READ) | B_RAW);
1084 	bp->b_dev = hib_info->device;
1085 	bp->b_cylinder = 0;
1086 	(*bdsw->d_strategy)(bp);
1087 
1088 	error = biowait(bp);
1089 	if (error) {
1090 		printf("hibernate_block_io biowait failed %d\n", error);
1091 		error = (*bdsw->d_close)(hib_info->device, 0, S_IFCHR,
1092 		    curproc);
1093 		if (error)
1094 			printf("hibernate_block_io error close failed\n");
1095 		return (1);
1096 	}
1097 
1098 	error = (*bdsw->d_close)(hib_info->device, FREAD, S_IFCHR, curproc);
1099 	if (error) {
1100 		printf("hibernate_block_io close failed\n");
1101 		return (1);
1102 	}
1103 
1104 	if (!iswrite)
1105 		bcopy(bp->b_data, (caddr_t)dest, xfer_size);
1106 
1107 	bp->b_flags |= B_INVAL;
1108 	brelse(bp);
1109 
1110 	return (0);
1111 }
1112 
1113 /*
1114  * Reads the signature block from swap, checks against the current machine's
1115  * information. If the information matches, perform a resume by reading the
1116  * saved image into the pig area, and unpacking.
1117  */
1118 void
1119 hibernate_resume(void)
1120 {
1121 	union hibernate_info hiber_info;
1122 	int s;
1123 
1124 	/* Get current running machine's hibernate info */
1125 	bzero(&hiber_info, sizeof(hiber_info));
1126 	if (get_hibernate_info(&hiber_info, 0))
1127 		return;
1128 
1129 	/* Read hibernate info from disk */
1130 	s = splbio();
1131 
1132 	if (hibernate_block_io(&hiber_info,
1133 	    hiber_info.sig_offset - hiber_info.swap_offset,
1134 	    hiber_info.secsize, (vaddr_t)&disk_hiber_info, 0))
1135 		panic("error in hibernate read");
1136 
1137 	/*
1138 	 * If on-disk and in-memory hibernate signatures match,
1139 	 * this means we should do a resume from hibernate.
1140 	 */
1141 	if (hibernate_compare_signature(&hiber_info, &disk_hiber_info)) {
1142 		splx(s);
1143 		return;
1144 	}
1145 
1146 	printf("Unhibernating...\n");
1147 
1148 	/* Read the image from disk into the image (pig) area */
1149 	if (hibernate_read_image(&disk_hiber_info))
1150 		goto fail;
1151 
1152 	if (config_suspend(TAILQ_FIRST(&alldevs), DVACT_QUIESCE) != 0)
1153 		goto fail;
1154 
1155 	(void) splhigh();
1156 	disable_intr();
1157 	cold = 1;
1158 
1159 	if (config_suspend(TAILQ_FIRST(&alldevs), DVACT_SUSPEND) != 0) {
1160 		cold = 0;
1161 		enable_intr();
1162 		goto fail;
1163 	}
1164 
1165 	pmap_kenter_pa(HIBERNATE_HIBALLOC_PAGE, HIBERNATE_HIBALLOC_PAGE,
1166 	    VM_PROT_ALL);
1167 	pmap_activate(curproc);
1168 
1169 	/* Switch stacks */
1170 	hibernate_switch_stack_machdep();
1171 
1172 	/*
1173 	 * Point of no return. Once we pass this point, only kernel code can
1174 	 * be accessed. No global variables or other kernel data structures
1175 	 * are guaranteed to be coherent after unpack starts.
1176 	 *
1177 	 * The image is now in high memory (pig area), we unpack from the pig
1178 	 * to the correct location in memory. We'll eventually end up copying
1179 	 * on top of ourself, but we are assured the kernel code here is the
1180 	 * same between the hibernated and resuming kernel, and we are running
1181 	 * on our own stack, so the overwrite is ok.
1182 	 */
1183 	hibernate_unpack_image(&disk_hiber_info);
1184 
1185 	/*
1186 	 * Resume the loaded kernel by jumping to the MD resume vector.
1187 	 * We won't be returning from this call.
1188 	 */
1189 	hibernate_resume_machdep();
1190 
1191 fail:
1192 	splx(s);
1193 	printf("Unable to resume hibernated image\n");
1194 }
1195 
1196 /*
1197  * Unpack image from pig area to original location by looping through the
1198  * list of output chunks in the order they should be restored (fchunks).
1199  */
1200 void
1201 hibernate_unpack_image(union hibernate_info *hiber_info)
1202 {
1203 	struct hibernate_disk_chunk *chunks;
1204 	union hibernate_info local_hiber_info;
1205 	paddr_t image_cur = global_pig_start;
1206 	short i, *fchunks;
1207 	char *pva = (char *)hiber_info->piglet_va;
1208 	struct hibernate_zlib_state *hibernate_state;
1209 
1210 	hibernate_state =
1211 	    (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE;
1212 
1213 	/* Mask off based on arch-specific piglet page size */
1214 	pva = (char *)((paddr_t)pva & (PIGLET_PAGE_MASK));
1215 	fchunks = (short *)(pva + (4 * PAGE_SIZE));
1216 
1217 	chunks = (struct hibernate_disk_chunk *)(pva +  HIBERNATE_CHUNK_SIZE);
1218 
1219 	/* Can't use hiber_info that's passed in after this point */
1220 	bcopy(hiber_info, &local_hiber_info, sizeof(union hibernate_info));
1221 
1222 	hibernate_activate_resume_pt_machdep();
1223 
1224 	for (i = 0; i < local_hiber_info.chunk_ctr; i++) {
1225 		/* Reset zlib for inflate */
1226 		if (hibernate_zlib_reset(&local_hiber_info, 0) != Z_OK)
1227 			panic("hibernate failed to reset zlib for inflate");
1228 
1229 		hibernate_process_chunk(&local_hiber_info, &chunks[fchunks[i]],
1230 		    image_cur);
1231 
1232 		image_cur += chunks[fchunks[i]].compressed_size;
1233 
1234 	}
1235 }
1236 
1237 /*
1238  * Bounce a compressed image chunk to the piglet, entering mappings for the
1239  * copied pages as needed
1240  */
1241 void
1242 hibernate_copy_chunk_to_piglet(paddr_t img_cur, vaddr_t piglet, size_t size)
1243 {
1244 	size_t ct, ofs;
1245 	paddr_t src = img_cur;
1246 	vaddr_t dest = piglet;
1247 
1248 	/* Copy first partial page */
1249 	ct = (PAGE_SIZE) - (src & PAGE_MASK);
1250 	ofs = (src & PAGE_MASK);
1251 
1252 	if (ct < PAGE_SIZE) {
1253 		hibernate_enter_resume_mapping(HIBERNATE_INFLATE_PAGE,
1254 			(src - ofs), 0);
1255 		hibernate_flush();
1256 		bcopy((caddr_t)(HIBERNATE_INFLATE_PAGE + ofs), (caddr_t)dest, ct);
1257 		src += ct;
1258 		dest += ct;
1259 	}
1260 
1261 	/* Copy remaining pages */
1262 	while (src < size + img_cur) {
1263 		hibernate_enter_resume_mapping(HIBERNATE_INFLATE_PAGE, src, 0);
1264 		hibernate_flush();
1265 		ct = PAGE_SIZE;
1266 		bcopy((caddr_t)(HIBERNATE_INFLATE_PAGE), (caddr_t)dest, ct);
1267 		hibernate_flush();
1268 		src += ct;
1269 		dest += ct;
1270 	}
1271 }
1272 
1273 /*
1274  * Process a chunk by bouncing it to the piglet, followed by unpacking
1275  */
1276 void
1277 hibernate_process_chunk(union hibernate_info *hiber_info,
1278     struct hibernate_disk_chunk *chunk, paddr_t img_cur)
1279 {
1280 	char *pva = (char *)hiber_info->piglet_va;
1281 
1282 	hibernate_copy_chunk_to_piglet(img_cur,
1283 	 (vaddr_t)(pva + (HIBERNATE_CHUNK_SIZE * 2)), chunk->compressed_size);
1284 
1285 	hibernate_inflate_region(hiber_info, chunk->base,
1286 	    (vaddr_t)(pva + (HIBERNATE_CHUNK_SIZE * 2)),
1287 	    chunk->compressed_size);
1288 }
1289 
1290 /*
1291  * Write a compressed version of this machine's memory to disk, at the
1292  * precalculated swap offset:
1293  *
1294  * end of swap - signature block size - chunk table size - memory size
1295  *
1296  * The function begins by looping through each phys mem range, cutting each
1297  * one into MD sized chunks. These chunks are then compressed individually
1298  * and written out to disk, in phys mem order. Some chunks might compress
1299  * more than others, and for this reason, each chunk's size is recorded
1300  * in the chunk table, which is written to disk after the image has
1301  * properly been compressed and written (in hibernate_write_chunktable).
1302  *
1303  * When this function is called, the machine is nearly suspended - most
1304  * devices are quiesced/suspended, interrupts are off, and cold has
1305  * been set. This means that there can be no side effects once the
1306  * write has started, and the write function itself can also have no
1307  * side effects. This also means no printfs are permitted (since printf
1308  * has side effects.)
1309  */
1310 int
1311 hibernate_write_chunks(union hibernate_info *hiber_info)
1312 {
1313 	paddr_t range_base, range_end, inaddr, temp_inaddr;
1314 	size_t nblocks, out_remaining, used;
1315 	struct hibernate_disk_chunk *chunks;
1316 	vaddr_t hibernate_io_page = hiber_info->piglet_va + PAGE_SIZE;
1317 	daddr_t blkctr = hiber_info->image_offset, offset = 0;
1318 	int i;
1319 	struct hibernate_zlib_state *hibernate_state;
1320 
1321 	hibernate_state =
1322 	    (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE;
1323 
1324 	hiber_info->chunk_ctr = 0;
1325 
1326 	/*
1327 	 * Allocate VA for the temp and copy page.
1328 	 *
1329 	 * These will become part of the suspended kernel and will
1330 	 * be freed in hibernate_free, upon resume.
1331 	 */
1332 	hibernate_temp_page = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any,
1333 	    &kp_none, &kd_nowait);
1334 	if (!hibernate_temp_page)
1335 		return (1);
1336 
1337 	hibernate_copy_page = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any,
1338 	    &kp_none, &kd_nowait);
1339 	if (!hibernate_copy_page)
1340 		return (1);
1341 
1342 	pmap_kenter_pa(hibernate_copy_page,
1343 	    (hiber_info->piglet_pa + 3*PAGE_SIZE), VM_PROT_ALL);
1344 
1345 	/* XXX - not needed on all archs */
1346 	pmap_activate(curproc);
1347 
1348 	chunks = (struct hibernate_disk_chunk *)(hiber_info->piglet_va +
1349 	    HIBERNATE_CHUNK_SIZE);
1350 
1351 	/* Calculate the chunk regions */
1352 	for (i = 0; i < hiber_info->nranges; i++) {
1353 		range_base = hiber_info->ranges[i].base;
1354 		range_end = hiber_info->ranges[i].end;
1355 
1356 		inaddr = range_base;
1357 
1358 		while (inaddr < range_end) {
1359 			chunks[hiber_info->chunk_ctr].base = inaddr;
1360 			if (inaddr + HIBERNATE_CHUNK_SIZE < range_end)
1361 				chunks[hiber_info->chunk_ctr].end = inaddr +
1362 				    HIBERNATE_CHUNK_SIZE;
1363 			else
1364 				chunks[hiber_info->chunk_ctr].end = range_end;
1365 
1366 			inaddr += HIBERNATE_CHUNK_SIZE;
1367 			hiber_info->chunk_ctr ++;
1368 		}
1369 	}
1370 
1371 	/* Compress and write the chunks in the chunktable */
1372 	for (i = 0; i < hiber_info->chunk_ctr; i++) {
1373 		range_base = chunks[i].base;
1374 		range_end = chunks[i].end;
1375 
1376 		chunks[i].offset = blkctr;
1377 
1378 		/* Reset zlib for deflate */
1379 		if (hibernate_zlib_reset(hiber_info, 1) != Z_OK)
1380 			return (1);
1381 
1382 		inaddr = range_base;
1383 
1384 		/*
1385 		 * For each range, loop through its phys mem region
1386 		 * and write out the chunks (the last chunk might be
1387 		 * smaller than the chunk size).
1388 		 */
1389 		while (inaddr < range_end) {
1390 			out_remaining = PAGE_SIZE;
1391 			while (out_remaining > 0 && inaddr < range_end) {
1392 
1393 				/*
1394 				 * Adjust for regions that are not evenly
1395 				 * divisible by PAGE_SIZE or overflowed
1396 				 * pages from the previous iteration.
1397 				 */
1398 				temp_inaddr = (inaddr & PAGE_MASK) +
1399 				    hibernate_copy_page;
1400 
1401 				/* Deflate from temp_inaddr to IO page */
1402 				if (inaddr != range_end) {
1403 					pmap_kenter_pa(hibernate_temp_page,
1404 					    inaddr & PMAP_PA_MASK, VM_PROT_ALL);
1405 
1406 					/* XXX - not needed on all archs */
1407 					pmap_activate(curproc);
1408 
1409 					bcopy((caddr_t)hibernate_temp_page,
1410 					    (caddr_t)hibernate_copy_page,
1411 					    PAGE_SIZE);
1412 					inaddr += hibernate_deflate(hiber_info,
1413 					    temp_inaddr, &out_remaining);
1414 				}
1415 
1416 				if (out_remaining == 0) {
1417 					/* Filled up the page */
1418 					nblocks =
1419 					    PAGE_SIZE / hiber_info->secsize;
1420 
1421 					if (hiber_info->io_func(
1422 					    hiber_info->device,
1423 					    blkctr, (vaddr_t)hibernate_io_page,
1424 					    PAGE_SIZE, HIB_W,
1425 					    hiber_info->io_page))
1426 						return (1);
1427 
1428 					blkctr += nblocks;
1429 				}
1430 			}
1431 		}
1432 
1433 		if (inaddr != range_end)
1434 			return (1);
1435 
1436 		/*
1437 		 * End of range. Round up to next secsize bytes
1438 		 * after finishing compress
1439 		 */
1440 		if (out_remaining == 0)
1441 			out_remaining = PAGE_SIZE;
1442 
1443 		/* Finish compress */
1444 		hibernate_state->hib_stream.next_in = (caddr_t)inaddr;
1445 		hibernate_state->hib_stream.avail_in = 0;
1446 		hibernate_state->hib_stream.next_out =
1447 		    (caddr_t)hibernate_io_page + (PAGE_SIZE - out_remaining);
1448 		hibernate_state->hib_stream.avail_out = out_remaining;
1449 
1450 		if (deflate(&hibernate_state->hib_stream, Z_FINISH) !=
1451 		    Z_STREAM_END)
1452 			return (1);
1453 
1454 		out_remaining = hibernate_state->hib_stream.avail_out;
1455 
1456 		used = PAGE_SIZE - out_remaining;
1457 		nblocks = used / hiber_info->secsize;
1458 
1459 		/* Round up to next block if needed */
1460 		if (used % hiber_info->secsize != 0)
1461 			nblocks ++;
1462 
1463 		/* Write final block(s) for this chunk */
1464 		if (hiber_info->io_func(hiber_info->device, blkctr,
1465 		    (vaddr_t)hibernate_io_page, nblocks*hiber_info->secsize,
1466 		    HIB_W, hiber_info->io_page))
1467 			return (1);
1468 
1469 		blkctr += nblocks;
1470 
1471 		offset = blkctr;
1472 		chunks[i].compressed_size = (offset - chunks[i].offset) *
1473 		    hiber_info->secsize;
1474 	}
1475 
1476 	return (0);
1477 }
1478 
1479 /*
1480  * Reset the zlib stream state and allocate a new hiballoc area for either
1481  * inflate or deflate. This function is called once for each hibernate chunk.
1482  * Calling hiballoc_init multiple times is acceptable since the memory it is
1483  * provided is unmanaged memory (stolen). We use the memory provided to us
1484  * by the piglet allocated via the supplied hiber_info.
1485  */
1486 int
1487 hibernate_zlib_reset(union hibernate_info *hiber_info, int deflate)
1488 {
1489 	vaddr_t hibernate_zlib_start;
1490 	size_t hibernate_zlib_size;
1491 	char *pva = (char *)hiber_info->piglet_va;
1492 	struct hibernate_zlib_state *hibernate_state;
1493 
1494 	hibernate_state =
1495 	    (struct hibernate_zlib_state *)HIBERNATE_HIBALLOC_PAGE;
1496 
1497 	if(!deflate)
1498 		pva = (char *)((paddr_t)pva & (PIGLET_PAGE_MASK));
1499 
1500 	hibernate_zlib_start = (vaddr_t)(pva + (28 * PAGE_SIZE));
1501 	hibernate_zlib_size = 80 * PAGE_SIZE;
1502 
1503 	bzero((caddr_t)hibernate_zlib_start, hibernate_zlib_size);
1504 	bzero((caddr_t)hibernate_state, PAGE_SIZE);
1505 
1506 	/* Set up stream structure */
1507 	hibernate_state->hib_stream.zalloc = (alloc_func)hibernate_zlib_alloc;
1508 	hibernate_state->hib_stream.zfree = (free_func)hibernate_zlib_free;
1509 
1510 	/* Initialize the hiballoc arena for zlib allocs/frees */
1511 	hiballoc_init(&hibernate_state->hiballoc_arena,
1512 	    (caddr_t)hibernate_zlib_start, hibernate_zlib_size);
1513 
1514 	if (deflate) {
1515 		return deflateInit(&hibernate_state->hib_stream,
1516 		    Z_BEST_SPEED);
1517 	} else
1518 		return inflateInit(&hibernate_state->hib_stream);
1519 }
1520 
1521 /*
1522  * Reads the hibernated memory image from disk, whose location and
1523  * size are recorded in hiber_info. Begin by reading the persisted
1524  * chunk table, which records the original chunk placement location
1525  * and compressed size for each. Next, allocate a pig region of
1526  * sufficient size to hold the compressed image. Next, read the
1527  * chunks into the pig area (calling hibernate_read_chunks to do this),
1528  * and finally, if all of the above succeeds, clear the hibernate signature.
1529  * The function will then return to hibernate_resume, which will proceed
1530  * to unpack the pig image to the correct place in memory.
1531  */
1532 int
1533 hibernate_read_image(union hibernate_info *hiber_info)
1534 {
1535 	size_t compressed_size, disk_size, chunktable_size, pig_sz;
1536 	paddr_t image_start, image_end, pig_start, pig_end;
1537 	struct hibernate_disk_chunk *chunks;
1538 	daddr_t blkctr;
1539 	vaddr_t chunktable = (vaddr_t)NULL;
1540 	paddr_t piglet_chunktable = hiber_info->piglet_pa +
1541 	    HIBERNATE_CHUNK_SIZE;
1542 	int i;
1543 
1544 	pmap_activate(curproc);
1545 
1546 	/* Calculate total chunk table size in disk blocks */
1547 	chunktable_size = HIBERNATE_CHUNK_TABLE_SIZE / hiber_info->secsize;
1548 
1549 	blkctr = hiber_info->sig_offset - chunktable_size -
1550 			hiber_info->swap_offset;
1551 
1552 	chunktable = (vaddr_t)km_alloc(HIBERNATE_CHUNK_TABLE_SIZE, &kv_any,
1553 	    &kp_none, &kd_nowait);
1554 
1555 	if (!chunktable)
1556 		return (1);
1557 
1558 	/* Read the chunktable from disk into the piglet chunktable */
1559 	for (i = 0; i < HIBERNATE_CHUNK_TABLE_SIZE;
1560 	    i += PAGE_SIZE, blkctr += PAGE_SIZE/hiber_info->secsize) {
1561 		pmap_kenter_pa(chunktable + i, piglet_chunktable + i,
1562 		    VM_PROT_ALL);
1563 		pmap_update(pmap_kernel());
1564 		hibernate_block_io(hiber_info, blkctr, PAGE_SIZE,
1565 		    chunktable + i, 0);
1566 	}
1567 
1568 	blkctr = hiber_info->image_offset;
1569 	compressed_size = 0;
1570 
1571 	chunks = (struct hibernate_disk_chunk *)chunktable;
1572 
1573 	for (i = 0; i < hiber_info->chunk_ctr; i++)
1574 		compressed_size += chunks[i].compressed_size;
1575 
1576 	disk_size = compressed_size;
1577 
1578 	/* Allocate the pig area */
1579 	pig_sz = compressed_size + HIBERNATE_CHUNK_SIZE;
1580 	if (uvm_pmr_alloc_pig(&pig_start, pig_sz) == ENOMEM)
1581 		return (1);
1582 
1583 	pig_end = pig_start + pig_sz;
1584 
1585 	/* Calculate image extents. Pig image must end on a chunk boundary. */
1586 	image_end = pig_end & ~(HIBERNATE_CHUNK_SIZE - 1);
1587 	image_start = pig_start;
1588 
1589 	image_start = image_end - disk_size;
1590 
1591 	hibernate_read_chunks(hiber_info, image_start, image_end, disk_size,
1592 	    chunks);
1593 
1594 	pmap_kremove(chunktable, PAGE_SIZE);
1595 	pmap_update(pmap_kernel());
1596 
1597 	/* Prepare the resume time pmap/page table */
1598 	hibernate_populate_resume_pt(hiber_info, image_start, image_end);
1599 
1600 	/* Read complete, clear the signature and return */
1601 	return hibernate_clear_signature();
1602 }
1603 
1604 /*
1605  * Read the hibernated memory chunks from disk (chunk information at this
1606  * point is stored in the piglet) into the pig area specified by
1607  * [pig_start .. pig_end]. Order the chunks so that the final chunk is the
1608  * only chunk with overlap possibilities.
1609  */
1610 int
1611 hibernate_read_chunks(union hibernate_info *hib_info, paddr_t pig_start,
1612     paddr_t pig_end, size_t image_compr_size,
1613     struct hibernate_disk_chunk *chunks)
1614 {
1615 	paddr_t img_index, img_cur, r1s, r1e, r2s, r2e;
1616 	paddr_t copy_start, copy_end, piglet_cur;
1617 	paddr_t piglet_base = hib_info->piglet_pa;
1618 	paddr_t piglet_end = piglet_base + HIBERNATE_CHUNK_SIZE;
1619 	daddr_t blkctr;
1620 	size_t processed, compressed_size, read_size;
1621 	int overlap, found, nchunks, nochunks = 0, nfchunks = 0, npchunks = 0;
1622 	short *ochunks, *pchunks, *fchunks, i, j;
1623 	vaddr_t tempva = (vaddr_t)NULL, hibernate_fchunk_area = (vaddr_t)NULL;
1624 
1625 	global_pig_start = pig_start;
1626 
1627 	/* XXX - dont need this on all archs */
1628 	pmap_activate(curproc);
1629 
1630 	/*
1631 	 * These mappings go into the resuming kernel's page table, and are
1632 	 * used only during image read. They dissappear from existence
1633 	 * when the suspended kernel is unpacked on top of us.
1634 	 */
1635 	tempva = (vaddr_t)km_alloc(2*PAGE_SIZE, &kv_any, &kp_none, &kd_nowait);
1636 	if (!tempva)
1637 		return (1);
1638 	hibernate_fchunk_area = (vaddr_t)km_alloc(24*PAGE_SIZE, &kv_any,
1639 	    &kp_none, &kd_nowait);
1640 	if (!hibernate_fchunk_area)
1641 		return (1);
1642 
1643 	/* Final output chunk ordering VA */
1644 	fchunks = (short *)hibernate_fchunk_area;
1645 
1646 	/* Piglet chunk ordering VA */
1647 	pchunks = (short *)(hibernate_fchunk_area + (8*PAGE_SIZE));
1648 
1649 	/* Final chunk ordering VA */
1650 	ochunks = (short *)(hibernate_fchunk_area + (16*PAGE_SIZE));
1651 
1652 	/* Map the chunk ordering region */
1653 	for(i=0; i<24 ; i++) {
1654 		pmap_kenter_pa(hibernate_fchunk_area + (i*PAGE_SIZE),
1655 			piglet_base + ((4+i)*PAGE_SIZE), VM_PROT_ALL);
1656 		pmap_update(pmap_kernel());
1657 	}
1658 
1659 	nchunks = hib_info->chunk_ctr;
1660 
1661 	/* Initially start all chunks as unplaced */
1662 	for (i = 0; i < nchunks; i++)
1663 		chunks[i].flags = 0;
1664 
1665 	/*
1666 	 * Search the list for chunks that are outside the pig area. These
1667 	 * can be placed first in the final output list.
1668 	 */
1669 	for (i = 0; i < nchunks; i++) {
1670 		if (chunks[i].end <= pig_start || chunks[i].base >= pig_end) {
1671 			ochunks[nochunks] = i;
1672 			fchunks[nfchunks] = i;
1673 			nochunks++;
1674 			nfchunks++;
1675 			chunks[i].flags |= HIBERNATE_CHUNK_USED;
1676 		}
1677 	}
1678 
1679 	/*
1680 	 * Walk the ordering, place the chunks in ascending memory order.
1681 	 * Conflicts might arise, these are handled next.
1682 	 */
1683 	do {
1684 		img_index = -1;
1685 		found = 0;
1686 		j = -1;
1687 		for (i = 0; i < nchunks; i++)
1688 			if (chunks[i].base < img_index &&
1689 			    chunks[i].flags == 0 ) {
1690 				j = i;
1691 				img_index = chunks[i].base;
1692 			}
1693 
1694 		if (j != -1) {
1695 			found = 1;
1696 			ochunks[nochunks] = j;
1697 			nochunks++;
1698 			chunks[j].flags |= HIBERNATE_CHUNK_PLACED;
1699 		}
1700 	} while (found);
1701 
1702 	img_index = pig_start;
1703 
1704 	/*
1705 	 * Identify chunk output conflicts (chunks whose pig load area
1706 	 * corresponds to their original memory placement location)
1707 	 */
1708 	for (i = 0; i < nochunks ; i++) {
1709 		overlap = 0;
1710 		r1s = img_index;
1711 		r1e = img_index + chunks[ochunks[i]].compressed_size;
1712 		r2s = chunks[ochunks[i]].base;
1713 		r2e = chunks[ochunks[i]].end;
1714 
1715 		overlap = hibernate_check_overlap(r1s, r1e, r2s, r2e);
1716 		if (overlap)
1717 			chunks[ochunks[i]].flags |= HIBERNATE_CHUNK_CONFLICT;
1718 		img_index += chunks[ochunks[i]].compressed_size;
1719 	}
1720 
1721 	/*
1722 	 * Prepare the final output chunk list. Calculate an output
1723 	 * inflate strategy for overlapping chunks if needed.
1724 	 */
1725 	img_index = pig_start;
1726 	for (i = 0; i < nochunks ; i++) {
1727 		/*
1728 		 * If a conflict is detected, consume enough compressed
1729 		 * output chunks to fill the piglet
1730 		 */
1731 		if (chunks[ochunks[i]].flags & HIBERNATE_CHUNK_CONFLICT) {
1732 			copy_start = piglet_base;
1733 			copy_end = piglet_end;
1734 			piglet_cur = piglet_base;
1735 			npchunks = 0;
1736 			j = i;
1737 
1738 			while (copy_start < copy_end && j < nochunks) {
1739 				piglet_cur +=
1740 				    chunks[ochunks[j]].compressed_size;
1741 				pchunks[npchunks] = ochunks[j];
1742 				npchunks++;
1743 				copy_start +=
1744 				    chunks[ochunks[j]].compressed_size;
1745 				img_index += chunks[ochunks[j]].compressed_size;
1746 				i++;
1747 				j++;
1748 			}
1749 
1750 			piglet_cur = piglet_base;
1751 			for (j = 0; j < npchunks; j++) {
1752 				piglet_cur +=
1753 				    chunks[pchunks[j]].compressed_size;
1754 				fchunks[nfchunks] = pchunks[j];
1755 				chunks[pchunks[j]].flags |=
1756 				    HIBERNATE_CHUNK_USED;
1757 				nfchunks++;
1758 			}
1759 		} else {
1760 			/*
1761 			 * No conflict, chunk can be added without copying
1762 			 */
1763 			if ((chunks[ochunks[i]].flags &
1764 			    HIBERNATE_CHUNK_USED) == 0) {
1765 				fchunks[nfchunks] = ochunks[i];
1766 				chunks[ochunks[i]].flags |=
1767 				    HIBERNATE_CHUNK_USED;
1768 				nfchunks++;
1769 			}
1770 			img_index += chunks[ochunks[i]].compressed_size;
1771 		}
1772 	}
1773 
1774 	img_index = pig_start;
1775 	for (i = 0; i < nfchunks; i++) {
1776 		piglet_cur = piglet_base;
1777 		img_index += chunks[fchunks[i]].compressed_size;
1778 	}
1779 
1780 	img_cur = pig_start;
1781 
1782 	for (i = 0; i < nfchunks; i++) {
1783 		blkctr = chunks[fchunks[i]].offset - hib_info->swap_offset;
1784 		processed = 0;
1785 		compressed_size = chunks[fchunks[i]].compressed_size;
1786 
1787 		while (processed < compressed_size) {
1788 			pmap_kenter_pa(tempva, img_cur, VM_PROT_ALL);
1789 			pmap_kenter_pa(tempva + PAGE_SIZE, img_cur+PAGE_SIZE,
1790 			    VM_PROT_ALL);
1791 			pmap_update(pmap_kernel());
1792 
1793 			if (compressed_size - processed >= PAGE_SIZE)
1794 				read_size = PAGE_SIZE;
1795 			else
1796 				read_size = compressed_size - processed;
1797 
1798 			hibernate_block_io(hib_info, blkctr, read_size,
1799 			    tempva + (img_cur & PAGE_MASK), 0);
1800 
1801 			blkctr += (read_size / hib_info->secsize);
1802 
1803 			hibernate_flush();
1804 			pmap_kremove(tempva, PAGE_SIZE);
1805 			pmap_kremove(tempva + PAGE_SIZE, PAGE_SIZE);
1806 			processed += read_size;
1807 			img_cur += read_size;
1808 		}
1809 	}
1810 
1811 	pmap_kremove(hibernate_fchunk_area, PAGE_SIZE);
1812 	pmap_kremove((vaddr_t)pchunks, PAGE_SIZE);
1813 	pmap_kremove((vaddr_t)fchunks, PAGE_SIZE);
1814 	pmap_update(pmap_kernel());
1815 
1816 	return (0);
1817 }
1818 
1819 /*
1820  * Hibernating a machine comprises the following operations:
1821  *  1. Calculating this machine's hibernate_info information
1822  *  2. Allocating a piglet and saving the piglet's physaddr
1823  *  3. Calculating the memory chunks
1824  *  4. Writing the compressed chunks to disk
1825  *  5. Writing the chunk table
1826  *  6. Writing the signature block (hibernate_info)
1827  *
1828  * On most architectures, the function calling hibernate_suspend would
1829  * then power off the machine using some MD-specific implementation.
1830  */
1831 int
1832 hibernate_suspend(void)
1833 {
1834 	union hibernate_info hib_info;
1835 	size_t swap_size;
1836 
1837 	/*
1838 	 * Calculate memory ranges, swap offsets, etc.
1839 	 * This also allocates a piglet whose physaddr is stored in
1840 	 * hib_info->piglet_pa and vaddr stored in hib_info->piglet_va
1841 	 */
1842 	if (get_hibernate_info(&hib_info, 1))
1843 		return (1);
1844 
1845 	swap_size = hib_info.image_size + hib_info.secsize +
1846 		HIBERNATE_CHUNK_TABLE_SIZE;
1847 
1848 	if (uvm_swap_check_range(hib_info.device, swap_size)) {
1849 		printf("insufficient swap space for hibernate\n");
1850 		return (1);
1851 	}
1852 
1853 	pmap_kenter_pa(HIBERNATE_HIBALLOC_PAGE, HIBERNATE_HIBALLOC_PAGE,
1854 		VM_PROT_ALL);
1855 	pmap_activate(curproc);
1856 
1857 	/* Stash the piglet VA so we can free it in the resuming kernel */
1858 	global_piglet_va = hib_info.piglet_va;
1859 
1860 	if (hibernate_write_chunks(&hib_info))
1861 		return (1);
1862 
1863 	if (hibernate_write_chunktable(&hib_info))
1864 		return (1);
1865 
1866 	if (hibernate_write_signature(&hib_info))
1867 		return (1);
1868 
1869 	/* Allow the disk to settle */
1870 	delay(500000);
1871 
1872 	/*
1873 	 * Give the device-specific I/O function a notification that we're
1874 	 * done, and that it can clean up or shutdown as needed.
1875 	 */
1876 	hib_info.io_func(hib_info.device, 0, (vaddr_t)NULL, 0,
1877 	    HIB_DONE, hib_info.io_page);
1878 
1879 	return (0);
1880 }
1881 
1882 /*
1883  * Free items allocated by hibernate_suspend()
1884  */
1885 void
1886 hibernate_free(void)
1887 {
1888 	if (global_piglet_va)
1889 		uvm_pmr_free_piglet(global_piglet_va,
1890 		    3*HIBERNATE_CHUNK_SIZE);
1891 
1892 	if (hibernate_copy_page)
1893 		pmap_kremove(hibernate_copy_page, PAGE_SIZE);
1894 	if (hibernate_temp_page)
1895 		pmap_kremove(hibernate_temp_page, PAGE_SIZE);
1896 
1897 	pmap_update(pmap_kernel());
1898 
1899 	if (hibernate_copy_page)
1900 		km_free((void *)hibernate_copy_page, PAGE_SIZE,
1901 		    &kv_any, &kp_none);
1902 	if (hibernate_temp_page)
1903 		km_free((void *)hibernate_temp_page, PAGE_SIZE,
1904 		    &kv_any, &kp_none);
1905 
1906 	global_piglet_va = 0;
1907 	hibernate_copy_page = 0;
1908 	hibernate_temp_page = 0;
1909 }
1910