xref: /openbsd-src/sys/kern/subr_hibernate.c (revision 42015ed94f3763848efc87fe97d3d21cdf623ed3)
1 /*	$OpenBSD: subr_hibernate.c,v 1.22 2011/11/14 00:25:17 mlarkin Exp $	*/
2 
3 /*
4  * Copyright (c) 2011 Ariane van der Steldt <ariane@stack.nl>
5  * Copyright (c) 2011 Mike Larkin <mlarkin@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/hibernate.h>
21 #include <sys/malloc.h>
22 #include <sys/param.h>
23 #include <sys/tree.h>
24 #include <sys/types.h>
25 #include <sys/systm.h>
26 #include <sys/disklabel.h>
27 #include <sys/disk.h>
28 #include <sys/conf.h>
29 #include <sys/buf.h>
30 #include <sys/fcntl.h>
31 #include <sys/stat.h>
32 #include <uvm/uvm.h>
33 #include <machine/hibernate.h>
34 
35 struct hibernate_zlib_state *hibernate_state;
36 
37 /* Temporary vaddr ranges used during hibernate */
38 vaddr_t hibernate_temp_page;
39 vaddr_t hibernate_copy_page;
40 vaddr_t hibernate_stack_page;
41 vaddr_t hibernate_fchunk_area;
42 vaddr_t	hibernate_chunktable_area;
43 
44 /* Hibernate info as read from disk during resume */
45 union hibernate_info disk_hiber_info;
46 paddr_t global_pig_start;
47 vaddr_t global_piglet_va;
48 
49 /*
50  * Hib alloc enforced alignment.
51  */
52 #define HIB_ALIGN		8 /* bytes alignment */
53 
54 /*
55  * sizeof builtin operation, but with alignment constraint.
56  */
57 #define HIB_SIZEOF(_type)	roundup(sizeof(_type), HIB_ALIGN)
58 
59 struct hiballoc_entry {
60 	size_t			hibe_use;
61 	size_t			hibe_space;
62 	RB_ENTRY(hiballoc_entry) hibe_entry;
63 };
64 
65 /*
66  * Compare hiballoc entries based on the address they manage.
67  *
68  * Since the address is fixed, relative to struct hiballoc_entry,
69  * we just compare the hiballoc_entry pointers.
70  */
71 static __inline int
72 hibe_cmp(struct hiballoc_entry *l, struct hiballoc_entry *r)
73 {
74 	return l < r ? -1 : (l > r);
75 }
76 
77 RB_PROTOTYPE(hiballoc_addr, hiballoc_entry, hibe_entry, hibe_cmp)
78 
79 /*
80  * Given a hiballoc entry, return the address it manages.
81  */
82 static __inline void *
83 hib_entry_to_addr(struct hiballoc_entry *entry)
84 {
85 	caddr_t addr;
86 
87 	addr = (caddr_t)entry;
88 	addr += HIB_SIZEOF(struct hiballoc_entry);
89 	return addr;
90 }
91 
92 /*
93  * Given an address, find the hiballoc that corresponds.
94  */
95 static __inline struct hiballoc_entry*
96 hib_addr_to_entry(void *addr_param)
97 {
98 	caddr_t addr;
99 
100 	addr = (caddr_t)addr_param;
101 	addr -= HIB_SIZEOF(struct hiballoc_entry);
102 	return (struct hiballoc_entry*)addr;
103 }
104 
105 RB_GENERATE(hiballoc_addr, hiballoc_entry, hibe_entry, hibe_cmp)
106 
107 /*
108  * Allocate memory from the arena.
109  *
110  * Returns NULL if no memory is available.
111  */
112 void *
113 hib_alloc(struct hiballoc_arena *arena, size_t alloc_sz)
114 {
115 	struct hiballoc_entry *entry, *new_entry;
116 	size_t find_sz;
117 
118 	/*
119 	 * Enforce alignment of HIB_ALIGN bytes.
120 	 *
121 	 * Note that, because the entry is put in front of the allocation,
122 	 * 0-byte allocations are guaranteed a unique address.
123 	 */
124 	alloc_sz = roundup(alloc_sz, HIB_ALIGN);
125 
126 	/*
127 	 * Find an entry with hibe_space >= find_sz.
128 	 *
129 	 * If the root node is not large enough, we switch to tree traversal.
130 	 * Because all entries are made at the bottom of the free space,
131 	 * traversal from the end has a slightly better chance of yielding
132 	 * a sufficiently large space.
133 	 */
134 	find_sz = alloc_sz + HIB_SIZEOF(struct hiballoc_entry);
135 	entry = RB_ROOT(&arena->hib_addrs);
136 	if (entry != NULL && entry->hibe_space < find_sz) {
137 		RB_FOREACH_REVERSE(entry, hiballoc_addr, &arena->hib_addrs) {
138 			if (entry->hibe_space >= find_sz)
139 				break;
140 		}
141 	}
142 
143 	/*
144 	 * Insufficient or too fragmented memory.
145 	 */
146 	if (entry == NULL)
147 		return NULL;
148 
149 	/*
150 	 * Create new entry in allocated space.
151 	 */
152 	new_entry = (struct hiballoc_entry*)(
153 	    (caddr_t)hib_entry_to_addr(entry) + entry->hibe_use);
154 	new_entry->hibe_space = entry->hibe_space - find_sz;
155 	new_entry->hibe_use = alloc_sz;
156 
157 	/*
158 	 * Insert entry.
159 	 */
160 	if (RB_INSERT(hiballoc_addr, &arena->hib_addrs, new_entry) != NULL)
161 		panic("hib_alloc: insert failure");
162 	entry->hibe_space = 0;
163 
164 	/* Return address managed by entry. */
165 	return hib_entry_to_addr(new_entry);
166 }
167 
168 /*
169  * Free a pointer previously allocated from this arena.
170  *
171  * If addr is NULL, this will be silently accepted.
172  */
173 void
174 hib_free(struct hiballoc_arena *arena, void *addr)
175 {
176 	struct hiballoc_entry *entry, *prev;
177 
178 	if (addr == NULL)
179 		return;
180 
181 	/*
182 	 * Derive entry from addr and check it is really in this arena.
183 	 */
184 	entry = hib_addr_to_entry(addr);
185 	if (RB_FIND(hiballoc_addr, &arena->hib_addrs, entry) != entry)
186 		panic("hib_free: freed item %p not in hib arena", addr);
187 
188 	/*
189 	 * Give the space in entry to its predecessor.
190 	 *
191 	 * If entry has no predecessor, change its used space into free space
192 	 * instead.
193 	 */
194 	prev = RB_PREV(hiballoc_addr, &arena->hib_addrs, entry);
195 	if (prev != NULL &&
196 	    (void *)((caddr_t)prev + HIB_SIZEOF(struct hiballoc_entry) +
197 	    prev->hibe_use + prev->hibe_space) == entry) {
198 		/* Merge entry. */
199 		RB_REMOVE(hiballoc_addr, &arena->hib_addrs, entry);
200 		prev->hibe_space += HIB_SIZEOF(struct hiballoc_entry) +
201 		    entry->hibe_use + entry->hibe_space;
202 	} else {
203 		/* Flip used memory to free space. */
204 		entry->hibe_space += entry->hibe_use;
205 		entry->hibe_use = 0;
206 	}
207 }
208 
209 /*
210  * Initialize hiballoc.
211  *
212  * The allocator will manage memmory at ptr, which is len bytes.
213  */
214 int
215 hiballoc_init(struct hiballoc_arena *arena, void *p_ptr, size_t p_len)
216 {
217 	struct hiballoc_entry *entry;
218 	caddr_t ptr;
219 	size_t len;
220 
221 	RB_INIT(&arena->hib_addrs);
222 
223 	/*
224 	 * Hib allocator enforces HIB_ALIGN alignment.
225 	 * Fixup ptr and len.
226 	 */
227 	ptr = (caddr_t)roundup((vaddr_t)p_ptr, HIB_ALIGN);
228 	len = p_len - ((size_t)ptr - (size_t)p_ptr);
229 	len &= ~((size_t)HIB_ALIGN - 1);
230 
231 	/*
232 	 * Insufficient memory to be able to allocate and also do bookkeeping.
233 	 */
234 	if (len <= HIB_SIZEOF(struct hiballoc_entry))
235 		return ENOMEM;
236 
237 	/*
238 	 * Create entry describing space.
239 	 */
240 	entry = (struct hiballoc_entry*)ptr;
241 	entry->hibe_use = 0;
242 	entry->hibe_space = len - HIB_SIZEOF(struct hiballoc_entry);
243 	RB_INSERT(hiballoc_addr, &arena->hib_addrs, entry);
244 
245 	return 0;
246 }
247 
248 /*
249  * Zero all free memory.
250  */
251 void
252 uvm_pmr_zero_everything(void)
253 {
254 	struct uvm_pmemrange	*pmr;
255 	struct vm_page		*pg;
256 	int			 i;
257 
258 	uvm_lock_fpageq();
259 	TAILQ_FOREACH(pmr, &uvm.pmr_control.use, pmr_use) {
260 		/* Zero single pages. */
261 		while ((pg = TAILQ_FIRST(&pmr->single[UVM_PMR_MEMTYPE_DIRTY]))
262 		    != NULL) {
263 			uvm_pmr_remove(pmr, pg);
264 			uvm_pagezero(pg);
265 			atomic_setbits_int(&pg->pg_flags, PG_ZERO);
266 			uvmexp.zeropages++;
267 			uvm_pmr_insert(pmr, pg, 0);
268 		}
269 
270 		/* Zero multi page ranges. */
271 		while ((pg = RB_ROOT(&pmr->size[UVM_PMR_MEMTYPE_DIRTY]))
272 		    != NULL) {
273 			pg--; /* Size tree always has second page. */
274 			uvm_pmr_remove(pmr, pg);
275 			for (i = 0; i < pg->fpgsz; i++) {
276 				uvm_pagezero(&pg[i]);
277 				atomic_setbits_int(&pg[i].pg_flags, PG_ZERO);
278 				uvmexp.zeropages++;
279 			}
280 			uvm_pmr_insert(pmr, pg, 0);
281 		}
282 	}
283 	uvm_unlock_fpageq();
284 }
285 
286 /*
287  * Mark all memory as dirty.
288  *
289  * Used to inform the system that the clean memory isn't clean for some
290  * reason, for example because we just came back from hibernate.
291  */
292 void
293 uvm_pmr_dirty_everything(void)
294 {
295 	struct uvm_pmemrange	*pmr;
296 	struct vm_page		*pg;
297 	int			 i;
298 
299 	uvm_lock_fpageq();
300 	TAILQ_FOREACH(pmr, &uvm.pmr_control.use, pmr_use) {
301 		/* Dirty single pages. */
302 		while ((pg = TAILQ_FIRST(&pmr->single[UVM_PMR_MEMTYPE_ZERO]))
303 		    != NULL) {
304 			uvm_pmr_remove(pmr, pg);
305 			atomic_clearbits_int(&pg->pg_flags, PG_ZERO);
306 			uvm_pmr_insert(pmr, pg, 0);
307 		}
308 
309 		/* Dirty multi page ranges. */
310 		while ((pg = RB_ROOT(&pmr->size[UVM_PMR_MEMTYPE_ZERO]))
311 		    != NULL) {
312 			pg--; /* Size tree always has second page. */
313 			uvm_pmr_remove(pmr, pg);
314 			for (i = 0; i < pg->fpgsz; i++)
315 				atomic_clearbits_int(&pg[i].pg_flags, PG_ZERO);
316 			uvm_pmr_insert(pmr, pg, 0);
317 		}
318 	}
319 
320 	uvmexp.zeropages = 0;
321 	uvm_unlock_fpageq();
322 }
323 
324 /*
325  * Allocate the highest address that can hold sz.
326  *
327  * sz in bytes.
328  */
329 int
330 uvm_pmr_alloc_pig(paddr_t *addr, psize_t sz)
331 {
332 	struct uvm_pmemrange	*pmr;
333 	struct vm_page		*pig_pg, *pg;
334 
335 	/*
336 	 * Convert sz to pages, since that is what pmemrange uses internally.
337 	 */
338 	sz = atop(round_page(sz));
339 
340 	uvm_lock_fpageq();
341 
342 	TAILQ_FOREACH(pmr, &uvm.pmr_control.use, pmr_use) {
343 		RB_FOREACH_REVERSE(pig_pg, uvm_pmr_addr, &pmr->addr) {
344 			if (pig_pg->fpgsz >= sz) {
345 				goto found;
346 			}
347 		}
348 	}
349 
350 	/*
351 	 * Allocation failure.
352 	 */
353 	uvm_unlock_pageq();
354 	return ENOMEM;
355 
356 found:
357 	/* Remove page from freelist. */
358 	uvm_pmr_remove_size(pmr, pig_pg);
359 	pig_pg->fpgsz -= sz;
360 	pg = pig_pg + pig_pg->fpgsz;
361 	if (pig_pg->fpgsz == 0)
362 		uvm_pmr_remove_addr(pmr, pig_pg);
363 	else
364 		uvm_pmr_insert_size(pmr, pig_pg);
365 
366 	uvmexp.free -= sz;
367 	*addr = VM_PAGE_TO_PHYS(pg);
368 
369 	/*
370 	 * Update pg flags.
371 	 *
372 	 * Note that we trash the sz argument now.
373 	 */
374 	while (sz > 0) {
375 		KASSERT(pg->pg_flags & PQ_FREE);
376 
377 		atomic_clearbits_int(&pg->pg_flags,
378 		    PG_PMAP0|PG_PMAP1|PG_PMAP2|PG_PMAP3);
379 
380 		if (pg->pg_flags & PG_ZERO)
381 			uvmexp.zeropages -= sz;
382 		atomic_clearbits_int(&pg->pg_flags,
383 		    PG_ZERO|PQ_FREE);
384 
385 		pg->uobject = NULL;
386 		pg->uanon = NULL;
387 		pg->pg_version++;
388 
389 		/*
390 		 * Next.
391 		 */
392 		pg++;
393 		sz--;
394 	}
395 
396 	/* Return. */
397 	uvm_unlock_fpageq();
398 	return 0;
399 }
400 
401 /*
402  * Allocate a piglet area.
403  *
404  * This is as low as possible.
405  * Piglets are aligned.
406  *
407  * sz and align in bytes.
408  *
409  * The call will sleep for the pagedaemon to attempt to free memory.
410  * The pagedaemon may decide its not possible to free enough memory, causing
411  * the allocation to fail.
412  */
413 int
414 uvm_pmr_alloc_piglet(vaddr_t *va, paddr_t *pa, vsize_t sz, paddr_t align)
415 {
416 	paddr_t			 pg_addr, piglet_addr;
417 	struct uvm_pmemrange	*pmr;
418 	struct vm_page		*pig_pg, *pg;
419 	struct pglist		 pageq;
420 	int			 pdaemon_woken;
421 	vaddr_t			 piglet_va;
422 
423 	KASSERT((align & (align - 1)) == 0);
424 	pdaemon_woken = 0; /* Didn't wake the pagedaemon. */
425 
426 	/*
427 	 * Fixup arguments: align must be at least PAGE_SIZE,
428 	 * sz will be converted to pagecount, since that is what
429 	 * pmemrange uses internally.
430 	 */
431 	if (align < PAGE_SIZE)
432 		align = PAGE_SIZE;
433 	sz = round_page(sz);
434 
435 	uvm_lock_fpageq();
436 
437 	TAILQ_FOREACH_REVERSE(pmr, &uvm.pmr_control.use, uvm_pmemrange_use,
438 	    pmr_use) {
439 retry:
440 		/*
441 		 * Search for a range with enough space.
442 		 * Use the address tree, to ensure the range is as low as
443 		 * possible.
444 		 */
445 		RB_FOREACH(pig_pg, uvm_pmr_addr, &pmr->addr) {
446 			pg_addr = VM_PAGE_TO_PHYS(pig_pg);
447 			piglet_addr = (pg_addr + (align - 1)) & ~(align - 1);
448 
449 			if (atop(pg_addr) + pig_pg->fpgsz >=
450 			    atop(piglet_addr) + atop(sz))
451 				goto found;
452 		}
453 	}
454 
455 	/*
456 	 * Try to coerse the pagedaemon into freeing memory
457 	 * for the piglet.
458 	 *
459 	 * pdaemon_woken is set to prevent the code from
460 	 * falling into an endless loop.
461 	 */
462 	if (!pdaemon_woken) {
463 		pdaemon_woken = 1;
464 		if (uvm_wait_pla(ptoa(pmr->low), ptoa(pmr->high) - 1,
465 		    sz, UVM_PLA_FAILOK) == 0)
466 			goto retry;
467 	}
468 
469 	/* Return failure. */
470 	uvm_unlock_fpageq();
471 	return ENOMEM;
472 
473 found:
474 	/*
475 	 * Extract piglet from pigpen.
476 	 */
477 	TAILQ_INIT(&pageq);
478 	uvm_pmr_extract_range(pmr, pig_pg,
479 	    atop(piglet_addr), atop(piglet_addr) + atop(sz), &pageq);
480 
481 	*pa = piglet_addr;
482 	uvmexp.free -= atop(sz);
483 
484 	/*
485 	 * Update pg flags.
486 	 *
487 	 * Note that we trash the sz argument now.
488 	 */
489 	TAILQ_FOREACH(pg, &pageq, pageq) {
490 		KASSERT(pg->pg_flags & PQ_FREE);
491 
492 		atomic_clearbits_int(&pg->pg_flags,
493 		    PG_PMAP0|PG_PMAP1|PG_PMAP2|PG_PMAP3);
494 
495 		if (pg->pg_flags & PG_ZERO)
496 			uvmexp.zeropages--;
497 		atomic_clearbits_int(&pg->pg_flags,
498 		    PG_ZERO|PQ_FREE);
499 
500 		pg->uobject = NULL;
501 		pg->uanon = NULL;
502 		pg->pg_version++;
503 	}
504 
505 	uvm_unlock_fpageq();
506 
507 	/*
508 	 * Now allocate a va.
509 	 * Use direct mappings for the pages.
510 	 */
511 
512 	piglet_va = *va = (vaddr_t)km_alloc(sz, &kv_any, &kp_none, &kd_waitok);
513 	if (!piglet_va) {
514 		uvm_pglistfree(&pageq);
515 		return ENOMEM;
516 	}
517 
518 	/*
519 	 * Map piglet to va.
520 	 */
521 	TAILQ_FOREACH(pg, &pageq, pageq) {
522 		pmap_kenter_pa(piglet_va, VM_PAGE_TO_PHYS(pg), UVM_PROT_RW);
523 		piglet_va += PAGE_SIZE;
524 	}
525 	pmap_update(pmap_kernel());
526 
527 	return 0;
528 }
529 
530 /*
531  * Free a piglet area.
532  */
533 void
534 uvm_pmr_free_piglet(vaddr_t va, vsize_t sz)
535 {
536 	paddr_t			 pa;
537 	struct vm_page		*pg;
538 
539 	/*
540 	 * Fix parameters.
541 	 */
542 	sz = round_page(sz);
543 
544 	/*
545 	 * Find the first page in piglet.
546 	 * Since piglets are contiguous, the first pg is all we need.
547 	 */
548 	if (!pmap_extract(pmap_kernel(), va, &pa))
549 		panic("uvm_pmr_free_piglet: piglet 0x%lx has no pages", va);
550 	pg = PHYS_TO_VM_PAGE(pa);
551 	if (pg == NULL)
552 		panic("uvm_pmr_free_piglet: unmanaged page 0x%lx", pa);
553 
554 	/*
555 	 * Unmap.
556 	 */
557 	pmap_kremove(va, sz);
558 	pmap_update(pmap_kernel());
559 
560 	/*
561 	 * Free the physical and virtual memory.
562 	 */
563 	uvm_pmr_freepages(pg, atop(sz));
564 	km_free((void *)va, sz, &kv_any, &kp_none);
565 }
566 
567 /*
568  * Physmem RLE compression support.
569  *
570  * Given a physical page address, it will return the number of pages
571  * starting at the address, that are free.
572  * Returns 0 if the page at addr is not free.
573  */
574 psize_t
575 uvm_page_rle(paddr_t addr)
576 {
577 	struct vm_page		*pg, *pg_end;
578 	struct vm_physseg	*vmp;
579 	int			 pseg_idx, off_idx;
580 
581 	pseg_idx = vm_physseg_find(atop(addr), &off_idx);
582 	if (pseg_idx == -1)
583 		return 0;
584 
585 	vmp = &vm_physmem[pseg_idx];
586 	pg = &vmp->pgs[off_idx];
587 	if (!(pg->pg_flags & PQ_FREE))
588 		return 0;
589 
590 	/*
591 	 * Search for the first non-free page after pg.
592 	 * Note that the page may not be the first page in a free pmemrange,
593 	 * therefore pg->fpgsz cannot be used.
594 	 */
595 	for (pg_end = pg; pg_end <= vmp->lastpg &&
596 	    (pg_end->pg_flags & PQ_FREE) == PQ_FREE; pg_end++);
597 	return pg_end - pg;
598 }
599 
600 /*
601  * Fills out the hibernate_info union pointed to by hiber_info
602  * with information about this machine (swap signature block
603  * offsets, number of memory ranges, kernel in use, etc)
604  */
605 int
606 get_hibernate_info(union hibernate_info *hiber_info, int suspend)
607 {
608 	int chunktable_size;
609 	struct disklabel dl;
610 	char err_string[128], *dl_ret;
611 
612 	/* Determine I/O function to use */
613 	hiber_info->io_func = get_hibernate_io_function();
614 	if (hiber_info->io_func == NULL)
615 		return (1);
616 
617 	/* Calculate hibernate device */
618 	hiber_info->device = swdevt[0].sw_dev;
619 
620 	/* Read disklabel (used to calculate signature and image offsets) */
621 	dl_ret = disk_readlabel(&dl, hiber_info->device, err_string, 128);
622 
623 	if (dl_ret) {
624 		printf("Hibernate error reading disklabel: %s\n", dl_ret);
625 		return (1);
626 	}
627 
628 	hiber_info->secsize = dl.d_secsize;
629 
630 	/* Make sure the signature can fit in one block */
631 	KASSERT(sizeof(union hibernate_info)/hiber_info->secsize == 1);
632 
633 	/* Calculate swap offset from start of disk */
634 	hiber_info->swap_offset = dl.d_partitions[1].p_offset;
635 
636 	/* Calculate signature block location */
637 	hiber_info->sig_offset = dl.d_partitions[1].p_offset +
638 	    dl.d_partitions[1].p_size -
639 	    sizeof(union hibernate_info)/hiber_info->secsize;
640 
641 	chunktable_size = HIBERNATE_CHUNK_TABLE_SIZE / hiber_info->secsize;
642 
643 	/* Stash kernel version information */
644 	bzero(&hiber_info->kernel_version, 128);
645 	bcopy(version, &hiber_info->kernel_version,
646 	    min(strlen(version), sizeof(hiber_info->kernel_version)-1));
647 
648 	if (suspend) {
649 		/* Allocate piglet region */
650 		if (uvm_pmr_alloc_piglet(&hiber_info->piglet_va,
651 		    &hiber_info->piglet_pa, HIBERNATE_CHUNK_SIZE*3,
652 		    HIBERNATE_CHUNK_SIZE)) {
653 			printf("Hibernate failed to allocate the piglet\n");
654 			return (1);
655 		}
656 		hiber_info->io_page = (void *)hiber_info->piglet_va;
657 	} else {
658 		/*
659 		 * Resuming kernels use a regular I/O page since we won't
660 		 * have access to the suspended kernel's piglet VA at this
661 		 * point. No need to free this I/O page as it will vanish
662 		 * as part of the resume.
663 		 */
664 		hiber_info->io_page = malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT);
665 		if (!hiber_info->io_page)
666 			return (1);
667 	}
668 
669 
670 	/*
671 	 * operation -1 (HIB_INIT) requests initialization of the hibernate
672 	 * IO function
673 	 */
674 	if (hiber_info->io_func(hiber_info->device, 0,
675 	    (vaddr_t)NULL, 0, HIB_INIT, hiber_info->io_page) == -1)
676 		goto fail;
677 
678 	if (get_hibernate_info_md(hiber_info))
679 		goto fail;
680 
681 	/* Calculate memory image location */
682 	hiber_info->image_offset = dl.d_partitions[1].p_offset +
683 	    dl.d_partitions[1].p_size -
684 	    (hiber_info->image_size / hiber_info->secsize) -
685 	    sizeof(union hibernate_info)/hiber_info->secsize -
686 	    chunktable_size;
687 
688 	return (0);
689 fail:
690 	uvm_pmr_free_piglet(hiber_info->piglet_va, HIBERNATE_CHUNK_SIZE*3);
691 	return (1);
692 }
693 
694 /*
695  * Allocate nitems*size bytes from the hiballoc area presently in use
696  */
697 void
698 *hibernate_zlib_alloc(void *unused, int nitems, int size)
699 {
700 	return hib_alloc(&hibernate_state->hiballoc_arena, nitems*size);
701 }
702 
703 /*
704  * Free the memory pointed to by addr in the hiballoc area presently in
705  * use
706  */
707 void
708 hibernate_zlib_free(void *unused, void *addr)
709 {
710 	hib_free(&hibernate_state->hiballoc_arena, addr);
711 }
712 
713 /*
714  * Inflate size bytes from src into dest, skipping any pages in
715  * [src..dest] that are special (see hibernate_inflate_skip)
716  *
717  * For each page of output data, we map HIBERNATE_TEMP_PAGE
718  * to the current output page, and tell inflate() to inflate
719  * its data there, resulting in the inflated data being placed
720  * at the proper paddr.
721  *
722  * This function executes while using the resume-time stack
723  * and pmap, and therefore cannot use ddb/printf/etc. Doing so
724  * will likely hang or reset the machine.
725  */
726 void
727 hibernate_inflate(union hibernate_info *hiber_info, paddr_t dest,
728     paddr_t src, size_t size)
729 {
730 	int i;
731 
732 	hibernate_state->hib_stream.avail_in = size;
733 	hibernate_state->hib_stream.next_in = (char *)src;
734 
735 	do {
736 		/* Flush cache and TLB */
737 		hibernate_flush();
738 
739 		/*
740 		 * Is this a special page? If yes, redirect the
741 		 * inflate output to a scratch page (eg, discard it)
742 		 */
743 		if (hibernate_inflate_skip(hiber_info, dest))
744 			hibernate_enter_resume_mapping(
745 			    HIBERNATE_INFLATE_PAGE,
746 			    HIBERNATE_INFLATE_PAGE, 0);
747 		else
748 			hibernate_enter_resume_mapping(
749 			    HIBERNATE_INFLATE_PAGE, dest, 0);
750 
751 		/* Set up the stream for inflate */
752 		hibernate_state->hib_stream.avail_out = PAGE_SIZE;
753 		hibernate_state->hib_stream.next_out =
754 		    (char *)HIBERNATE_INFLATE_PAGE;
755 
756 		/* Process next block of data */
757 		i = inflate(&hibernate_state->hib_stream, Z_PARTIAL_FLUSH);
758 		if (i != Z_OK && i != Z_STREAM_END) {
759 			/*
760 			 * XXX - this will likely reboot/hang most machines,
761 			 *       but there's not much else we can do here.
762 			 */
763 			panic("inflate error");
764 		}
765 
766 		dest += PAGE_SIZE - hibernate_state->hib_stream.avail_out;
767 	} while (i != Z_STREAM_END);
768 }
769 
770 /*
771  * deflate from src into the I/O page, up to 'remaining' bytes
772  *
773  * Returns number of input bytes consumed, and may reset
774  * the 'remaining' parameter if not all the output space was consumed
775  * (this information is needed to know how much to write to disk
776  */
777 size_t
778 hibernate_deflate(union hibernate_info *hiber_info, paddr_t src,
779     size_t *remaining)
780 {
781 	vaddr_t hibernate_io_page = hiber_info->piglet_va + PAGE_SIZE;
782 
783 	/* Set up the stream for deflate */
784 	hibernate_state->hib_stream.avail_in = PAGE_SIZE - (src & PAGE_MASK);
785 	hibernate_state->hib_stream.avail_out = *remaining;
786 	hibernate_state->hib_stream.next_in = (caddr_t)src;
787 	hibernate_state->hib_stream.next_out = (caddr_t)hibernate_io_page +
788 	    (PAGE_SIZE - *remaining);
789 
790 	/* Process next block of data */
791 	if (deflate(&hibernate_state->hib_stream, Z_PARTIAL_FLUSH) != Z_OK)
792 		panic("hibernate zlib deflate error\n");
793 
794 	/* Update pointers and return number of bytes consumed */
795 	*remaining = hibernate_state->hib_stream.avail_out;
796 	return (PAGE_SIZE - (src & PAGE_MASK)) -
797 		hibernate_state->hib_stream.avail_in;
798 }
799 
800 /*
801  * Write the hibernation information specified in hiber_info
802  * to the location in swap previously calculated (last block of
803  * swap), called the "signature block".
804  *
805  * Write the memory chunk table to the area in swap immediately
806  * preceding the signature block.
807  */
808 int
809 hibernate_write_signature(union hibernate_info *hiber_info)
810 {
811 	/* Write hibernate info to disk */
812 	return (hiber_info->io_func(hiber_info->device, hiber_info->sig_offset,
813 	    (vaddr_t)hiber_info, hiber_info->secsize, HIB_W,
814 	    hiber_info->io_page));
815 }
816 
817 /*
818  * Write the memory chunk table to the area in swap immediately
819  * preceding the signature block. The chunk table is stored
820  * in the piglet when this function is called.
821  */
822 int
823 hibernate_write_chunktable(union hibernate_info *hiber_info)
824 {
825 	struct hibernate_disk_chunk *chunks;
826 	vaddr_t hibernate_chunk_table_start;
827 	size_t hibernate_chunk_table_size;
828 	daddr_t chunkbase;
829 	int i;
830 
831 	hibernate_chunk_table_size = HIBERNATE_CHUNK_TABLE_SIZE;
832 
833 	chunkbase = hiber_info->sig_offset -
834 	    (hibernate_chunk_table_size / hiber_info->secsize);
835 
836 	hibernate_chunk_table_start = hiber_info->piglet_va +
837 	    HIBERNATE_CHUNK_SIZE;
838 
839 	chunks = (struct hibernate_disk_chunk *)(hiber_info->piglet_va +
840 	    HIBERNATE_CHUNK_SIZE);
841 
842 	/* Write chunk table */
843 	for (i = 0; i < hibernate_chunk_table_size; i += MAXPHYS) {
844 		if (hiber_info->io_func(hiber_info->device,
845 		    chunkbase + (i/hiber_info->secsize),
846 		    (vaddr_t)(hibernate_chunk_table_start + i),
847 		    MAXPHYS, HIB_W, hiber_info->io_page))
848 			return (1);
849 	}
850 
851 	return (0);
852 }
853 
854 /*
855  * Write an empty hiber_info to the swap signature block, which is
856  * guaranteed to not match any valid hiber_info.
857  */
858 int
859 hibernate_clear_signature(void)
860 {
861 	union hibernate_info blank_hiber_info;
862 	union hibernate_info hiber_info;
863 
864 	/* Zero out a blank hiber_info */
865 	bzero(&blank_hiber_info, sizeof(hiber_info));
866 
867 	if (get_hibernate_info(&hiber_info, 0))
868 		return (1);
869 
870 	/* Write (zeroed) hibernate info to disk */
871 	/* XXX - use regular kernel write routine for this */
872 	if (hiber_info.io_func(hiber_info.device, hiber_info.sig_offset,
873 	    (vaddr_t)&blank_hiber_info, hiber_info.secsize, HIB_W,
874 	    hiber_info.io_page))
875 		panic("error hibernate write 6\n");
876 
877 	return (0);
878 }
879 
880 /*
881  * Check chunk range overlap when calculating whether or not to copy a
882  * compressed chunk to the piglet area before decompressing.
883  *
884  * returns zero if the ranges do not overlap, non-zero otherwise.
885  */
886 int
887 hibernate_check_overlap(paddr_t r1s, paddr_t r1e, paddr_t r2s, paddr_t r2e)
888 {
889 	/* case A : end of r1 overlaps start of r2 */
890 	if (r1s < r2s && r1e > r2s)
891 		return (1);
892 
893 	/* case B : r1 entirely inside r2 */
894 	if (r1s >= r2s && r1e <= r2e)
895 		return (1);
896 
897 	/* case C : r2 entirely inside r1 */
898 	if (r2s >= r1s && r2e <= r1e)
899 		return (1);
900 
901 	/* case D : end of r2 overlaps start of r1 */
902 	if (r2s < r1s && r2e > r1s)
903 		return (1);
904 
905 	return (0);
906 }
907 
908 /*
909  * Compare two hibernate_infos to determine if they are the same (eg,
910  * we should be performing a hibernate resume on this machine.
911  * Not all fields are checked - just enough to verify that the machine
912  * has the same memory configuration and kernel as the one that
913  * wrote the signature previously.
914  */
915 int
916 hibernate_compare_signature(union hibernate_info *mine,
917     union hibernate_info *disk)
918 {
919 	u_int i;
920 
921 	if (mine->nranges != disk->nranges)
922 		return (1);
923 
924 	if (strcmp(mine->kernel_version, disk->kernel_version) != 0)
925 		return (1);
926 
927 	for (i = 0; i < mine->nranges; i++) {
928 		if ((mine->ranges[i].base != disk->ranges[i].base) ||
929 		    (mine->ranges[i].end != disk->ranges[i].end) )
930 			return (1);
931 	}
932 
933 	return (0);
934 }
935 
936 /*
937  * Reads read_size bytes from the hibernate device specified in
938  * hib_info at offset blkctr. Output is placed into the vaddr specified
939  * at dest.
940  *
941  * Separate offsets and pages are used to handle misaligned reads (reads
942  * that span a page boundary).
943  *
944  * blkctr specifies a relative offset (relative to the start of swap),
945  * not an absolute disk offset
946  *
947  */
948 int
949 hibernate_read_block(union hibernate_info *hib_info, daddr_t blkctr,
950     size_t read_size, vaddr_t dest)
951 {
952 	struct buf *bp;
953 	struct bdevsw *bdsw;
954 	int error;
955 
956 	bp = geteblk(read_size);
957 	bdsw = &bdevsw[major(hib_info->device)];
958 
959 	error = (*bdsw->d_open)(hib_info->device, FREAD, S_IFCHR, curproc);
960 	if (error) {
961 		printf("hibernate_read_block open failed\n");
962 		return (1);
963 	}
964 
965 	bp->b_bcount = read_size;
966 	bp->b_blkno = blkctr;
967 	CLR(bp->b_flags, B_READ | B_WRITE | B_DONE);
968 	SET(bp->b_flags, B_BUSY | B_READ | B_RAW);
969 	bp->b_dev = hib_info->device;
970 	bp->b_cylinder = 0;
971 	(*bdsw->d_strategy)(bp);
972 
973 	error = biowait(bp);
974 	if (error) {
975 		printf("hibernate_read_block biowait failed %d\n", error);
976 		error = (*bdsw->d_close)(hib_info->device, 0, S_IFCHR,
977 		    curproc);
978 		if (error)
979 			printf("hibernate_read_block error close failed\n");
980 		return (1);
981 	}
982 
983 	error = (*bdsw->d_close)(hib_info->device, FREAD, S_IFCHR, curproc);
984 	if (error) {
985 		printf("hibernate_read_block close failed\n");
986 		return (1);
987 	}
988 
989 	bcopy(bp->b_data, (caddr_t)dest, read_size);
990 
991 	bp->b_flags |= B_INVAL;
992 	brelse(bp);
993 
994 	return (0);
995 }
996 
997 /*
998  * Reads the signature block from swap, checks against the current machine's
999  * information. If the information matches, perform a resume by reading the
1000  * saved image into the pig area, and unpacking.
1001  */
1002 void
1003 hibernate_resume(void)
1004 {
1005 	union hibernate_info hiber_info;
1006 	int s;
1007 
1008 	/* Scrub temporary vaddr ranges used during resume */
1009 	hibernate_temp_page = (vaddr_t)NULL;
1010 	hibernate_fchunk_area = (vaddr_t)NULL;
1011 	hibernate_chunktable_area = (vaddr_t)NULL;
1012 	hibernate_stack_page = (vaddr_t)NULL;
1013 
1014 	/* Get current running machine's hibernate info */
1015 	bzero(&hiber_info, sizeof(hiber_info));
1016 	if (get_hibernate_info(&hiber_info, 0))
1017 		return;
1018 
1019 	/* Read hibernate info from disk */
1020 	s = splbio();
1021 
1022 	/* XXX use regular kernel read routine here */
1023 	if (hiber_info.io_func(hiber_info.device, hiber_info.sig_offset,
1024 	    (vaddr_t)&disk_hiber_info, hiber_info.secsize, HIB_R,
1025 	    hiber_info.io_page))
1026 		panic("error in hibernate read\n");
1027 
1028 	/*
1029 	 * If on-disk and in-memory hibernate signatures match,
1030 	 * this means we should do a resume from hibernate.
1031 	 */
1032 	if (hibernate_compare_signature(&hiber_info, &disk_hiber_info))
1033 		return;
1034 
1035 	/*
1036 	 * Allocate several regions of vaddrs for use during read.
1037 	 * These mappings go into the resuming kernel's page table, and are
1038 	 * used only during image read.
1039 	 */
1040 	hibernate_temp_page = (vaddr_t)km_alloc(2*PAGE_SIZE, &kv_any,
1041 	    &kp_none, &kd_nowait);
1042 	if (!hibernate_temp_page)
1043 		goto fail;
1044 
1045 	hibernate_fchunk_area = (vaddr_t)km_alloc(3*PAGE_SIZE, &kv_any,
1046 	    &kp_none, &kd_nowait);
1047 	if (!hibernate_fchunk_area)
1048 		goto fail;
1049 
1050 	/* Allocate a temporary chunktable area */
1051 	hibernate_chunktable_area = (vaddr_t)malloc(HIBERNATE_CHUNK_TABLE_SIZE,
1052 					   M_DEVBUF, M_NOWAIT);
1053 	if (!hibernate_chunktable_area)
1054 		goto fail;
1055 
1056 	/* Allocate one temporary page of VAs for the resume time stack */
1057 	hibernate_stack_page = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any,
1058 	    &kp_none, &kd_nowait);
1059 	if (!hibernate_stack_page)
1060 		goto fail;
1061 
1062 	/* Read the image from disk into the image (pig) area */
1063 	if (hibernate_read_image(&disk_hiber_info))
1064 		goto fail;
1065 
1066 	/* Point of no return ... */
1067 
1068 	disable_intr();
1069 	cold = 1;
1070 
1071 	/* Switch stacks */
1072 	hibernate_switch_stack_machdep();
1073 
1074 	/*
1075 	 * Image is now in high memory (pig area), copy to correct location
1076 	 * in memory. We'll eventually end up copying on top of ourself, but
1077 	 * we are assured the kernel code here is the same between the
1078 	 * hibernated and resuming kernel, and we are running on our own
1079 	 * stack, so the overwrite is ok.
1080 	 */
1081 	hibernate_unpack_image(&disk_hiber_info);
1082 
1083 	/*
1084 	 * Resume the loaded kernel by jumping to the MD resume vector.
1085 	 * We won't be returning from this call.
1086 	 */
1087 	hibernate_resume_machdep();
1088 
1089 fail:
1090 	printf("Unable to resume hibernated image\n");
1091 
1092 	if (hibernate_temp_page)
1093 		km_free((void *)hibernate_temp_page, 2*PAGE_SIZE, &kv_any,
1094 		    &kp_none);
1095 
1096 	if (hibernate_fchunk_area)
1097 		km_free((void *)hibernate_fchunk_area, 3*PAGE_SIZE, &kv_any,
1098 		    &kp_none);
1099 
1100 	if (hibernate_chunktable_area)
1101 		free((void *)hibernate_chunktable_area, M_DEVBUF);
1102 }
1103 
1104 /*
1105  * Unpack image from pig area to original location by looping through the
1106  * list of output chunks in the order they should be restored (fchunks).
1107  * This ordering is used to avoid having inflate overwrite a chunk in the
1108  * middle of processing that chunk. This will, of course, happen during the
1109  * final output chunk, where we copy the chunk to the piglet area first,
1110  * before inflating.
1111  */
1112 void
1113 hibernate_unpack_image(union hibernate_info *hiber_info)
1114 {
1115 	struct hibernate_disk_chunk *chunks;
1116 	union hibernate_info local_hiber_info;
1117 	paddr_t image_cur = global_pig_start;
1118 	vaddr_t tempva;
1119 	int *fchunks, i;
1120 	char *pva = (char *)hiber_info->piglet_va;
1121 
1122 	/* Mask off based on arch-specific piglet page size */
1123 	pva = (char *)((paddr_t)pva & (PIGLET_PAGE_MASK));
1124 	fchunks = (int *)(pva + (6 * PAGE_SIZE));
1125 
1126 	/* Copy temporary chunktable to piglet */
1127 	tempva = (vaddr_t)km_alloc(HIBERNATE_CHUNK_TABLE_SIZE, &kv_any,
1128 	    &kp_none, &kd_nowait);
1129 	for (i = 0; i < HIBERNATE_CHUNK_TABLE_SIZE; i += PAGE_SIZE)
1130 		pmap_kenter_pa(tempva + i, hiber_info->piglet_pa +
1131 		    HIBERNATE_CHUNK_SIZE + i, VM_PROT_ALL);
1132 
1133 	bcopy((caddr_t)hibernate_chunktable_area, (caddr_t)tempva,
1134 	    HIBERNATE_CHUNK_TABLE_SIZE);
1135 
1136 	chunks = (struct hibernate_disk_chunk *)(pva +  HIBERNATE_CHUNK_SIZE);
1137 
1138 	/* Can't use hiber_info that's passed in after here */
1139 	bcopy(hiber_info, &local_hiber_info, sizeof(union hibernate_info));
1140 
1141 	hibernate_activate_resume_pt_machdep();
1142 
1143 	for (i = 0; i < local_hiber_info.chunk_ctr; i++) {
1144 		/* Reset zlib for inflate */
1145 		if (hibernate_zlib_reset(&local_hiber_info, 0) != Z_OK)
1146 			panic("hibernate failed to reset zlib for inflate\n");
1147 
1148 		/*
1149 		 * If there is a conflict, copy the chunk to the piglet area
1150 		 * before unpacking it to its original location.
1151 		 */
1152 		if ((chunks[fchunks[i]].flags & HIBERNATE_CHUNK_CONFLICT) == 0)
1153 			hibernate_inflate(&local_hiber_info,
1154 			    chunks[fchunks[i]].base, image_cur,
1155 			    chunks[fchunks[i]].compressed_size);
1156 		else {
1157 			bcopy((caddr_t)image_cur,
1158 			    pva + (HIBERNATE_CHUNK_SIZE * 2),
1159 			    chunks[fchunks[i]].compressed_size);
1160 			hibernate_inflate(&local_hiber_info,
1161 			    chunks[fchunks[i]].base,
1162 			    (vaddr_t)(pva + (HIBERNATE_CHUNK_SIZE * 2)),
1163 			    chunks[fchunks[i]].compressed_size);
1164 		}
1165 		image_cur += chunks[fchunks[i]].compressed_size;
1166 	}
1167 }
1168 
1169 /*
1170  * Write a compressed version of this machine's memory to disk, at the
1171  * precalculated swap offset:
1172  *
1173  * end of swap - signature block size - chunk table size - memory size
1174  *
1175  * The function begins by looping through each phys mem range, cutting each
1176  * one into 4MB chunks. These chunks are then compressed individually
1177  * and written out to disk, in phys mem order. Some chunks might compress
1178  * more than others, and for this reason, each chunk's size is recorded
1179  * in the chunk table, which is written to disk after the image has
1180  * properly been compressed and written (in hibernate_write_chunktable).
1181  *
1182  * When this function is called, the machine is nearly suspended - most
1183  * devices are quiesced/suspended, interrupts are off, and cold has
1184  * been set. This means that there can be no side effects once the
1185  * write has started, and the write function itself can also have no
1186  * side effects.
1187  *
1188  * This function uses the piglet area during this process as follows:
1189  *
1190  * offset from piglet base	use
1191  * -----------------------	--------------------
1192  * 0				i/o allocation area
1193  * PAGE_SIZE			i/o write area
1194  * 2*PAGE_SIZE			temp/scratch page
1195  * 3*PAGE_SIZE			temp/scratch page
1196  * 4*PAGE_SIZE			hiballoc arena
1197  * 5*PAGE_SIZE to 85*PAGE_SIZE	zlib deflate area
1198  * ...
1199  * HIBERNATE_CHUNK_SIZE		chunk table temporary area
1200  *
1201  * Some transient piglet content is saved as part of deflate,
1202  * but it is irrelevant during resume as it will be repurposed
1203  * at that time for other things.
1204  */
1205 int
1206 hibernate_write_chunks(union hibernate_info *hiber_info)
1207 {
1208 	paddr_t range_base, range_end, inaddr, temp_inaddr;
1209 	size_t nblocks, out_remaining, used, offset = 0;
1210 	struct hibernate_disk_chunk *chunks;
1211 	vaddr_t hibernate_io_page = hiber_info->piglet_va + PAGE_SIZE;
1212 	daddr_t blkctr = hiber_info->image_offset;
1213 	int i;
1214 
1215 	hiber_info->chunk_ctr = 0;
1216 
1217 	/*
1218 	 * Allocate VA for the temp and copy page.
1219 	 */
1220 
1221 	hibernate_temp_page = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any,
1222 	    &kp_none, &kd_nowait);
1223 	if (!hibernate_temp_page)
1224 		return (1);
1225 
1226 	hibernate_copy_page = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any,
1227 	    &kp_none, &kd_nowait);
1228 	if (!hibernate_copy_page)
1229 		return (1);
1230 
1231 	pmap_kenter_pa(hibernate_copy_page,
1232 	    (hiber_info->piglet_pa + 3*PAGE_SIZE), VM_PROT_ALL);
1233 
1234 	/* XXX - not needed on all archs */
1235 	pmap_activate(curproc);
1236 
1237 	chunks = (struct hibernate_disk_chunk *)(hiber_info->piglet_va +
1238 	    HIBERNATE_CHUNK_SIZE);
1239 
1240 	/* Calculate the chunk regions */
1241 	for (i = 0; i < hiber_info->nranges; i++) {
1242 		range_base = hiber_info->ranges[i].base;
1243 		range_end = hiber_info->ranges[i].end;
1244 
1245 		inaddr = range_base;
1246 
1247 		while (inaddr < range_end) {
1248 			chunks[hiber_info->chunk_ctr].base = inaddr;
1249 			if (inaddr + HIBERNATE_CHUNK_SIZE < range_end)
1250 				chunks[hiber_info->chunk_ctr].end = inaddr +
1251 				    HIBERNATE_CHUNK_SIZE;
1252 			else
1253 				chunks[hiber_info->chunk_ctr].end = range_end;
1254 
1255 			inaddr += HIBERNATE_CHUNK_SIZE;
1256 			hiber_info->chunk_ctr ++;
1257 		}
1258 	}
1259 
1260 	/* Compress and write the chunks in the chunktable */
1261 	for (i = 0; i < hiber_info->chunk_ctr; i++) {
1262 		range_base = chunks[i].base;
1263 		range_end = chunks[i].end;
1264 
1265 		chunks[i].offset = blkctr;
1266 
1267 		/* Reset zlib for deflate */
1268 		if (hibernate_zlib_reset(hiber_info, 1) != Z_OK)
1269 			return (1);
1270 
1271 		inaddr = range_base;
1272 
1273 		/*
1274 		 * For each range, loop through its phys mem region
1275 		 * and write out the chunks (the last chunk might be
1276 		 * smaller than the chunk size).
1277 		 */
1278 		while (inaddr < range_end) {
1279 			out_remaining = PAGE_SIZE;
1280 			while (out_remaining > 0 && inaddr < range_end) {
1281 				pmap_kenter_pa(hibernate_temp_page,
1282 				    inaddr & PMAP_PA_MASK, VM_PROT_ALL);
1283 
1284 				/* XXX - not needed on all archs */
1285 				pmap_activate(curproc);
1286 
1287 				bcopy((caddr_t)hibernate_temp_page,
1288 				    (caddr_t)hibernate_copy_page, PAGE_SIZE);
1289 
1290 				/*
1291 				 * Adjust for regions that are not evenly
1292 				 * divisible by PAGE_SIZE
1293 				 */
1294 				temp_inaddr = (inaddr & PAGE_MASK) +
1295 				    hibernate_copy_page;
1296 
1297 				/* Deflate from temp_inaddr to IO page */
1298 				inaddr += hibernate_deflate(hiber_info,
1299 				    temp_inaddr, &out_remaining);
1300 			}
1301 
1302 			if (out_remaining == 0) {
1303 				/* Filled up the page */
1304 				nblocks = PAGE_SIZE / hiber_info->secsize;
1305 
1306 				if (hiber_info->io_func(hiber_info->device,
1307 				    blkctr, (vaddr_t)hibernate_io_page,
1308 				    PAGE_SIZE, HIB_W, hiber_info->io_page))
1309 					return (1);
1310 
1311 				blkctr += nblocks;
1312 			}
1313 		}
1314 
1315 		if (inaddr != range_end)
1316 			return (1);
1317 
1318 		/*
1319 		 * End of range. Round up to next secsize bytes
1320 		 * after finishing compress
1321 		 */
1322 		if (out_remaining == 0)
1323 			out_remaining = PAGE_SIZE;
1324 
1325 		/* Finish compress */
1326 		hibernate_state->hib_stream.avail_in = 0;
1327 		hibernate_state->hib_stream.avail_out = out_remaining;
1328 		hibernate_state->hib_stream.next_in = (caddr_t)inaddr;
1329 		hibernate_state->hib_stream.next_out =
1330 		    (caddr_t)hibernate_io_page + (PAGE_SIZE - out_remaining);
1331 
1332 		if (deflate(&hibernate_state->hib_stream, Z_FINISH) !=
1333 		    Z_STREAM_END)
1334 			return (1);
1335 
1336 		out_remaining = hibernate_state->hib_stream.avail_out;
1337 
1338 		used = PAGE_SIZE - out_remaining;
1339 		nblocks = used / hiber_info->secsize;
1340 
1341 		/* Round up to next block if needed */
1342 		if (used % hiber_info->secsize != 0)
1343 			nblocks ++;
1344 
1345 		/* Write final block(s) for this chunk */
1346 		if (hiber_info->io_func(hiber_info->device, blkctr,
1347 		    (vaddr_t)hibernate_io_page, nblocks*hiber_info->secsize,
1348 		    HIB_W, hiber_info->io_page))
1349 			return (1);
1350 
1351 		blkctr += nblocks;
1352 
1353 		offset = blkctr;
1354 		chunks[i].compressed_size = (offset - chunks[i].offset) *
1355 		    hiber_info->secsize;
1356 	}
1357 
1358 	return (0);
1359 }
1360 
1361 /*
1362  * Reset the zlib stream state and allocate a new hiballoc area for either
1363  * inflate or deflate. This function is called once for each hibernate chunk.
1364  * Calling hiballoc_init multiple times is acceptable since the memory it is
1365  * provided is unmanaged memory (stolen). We use the memory provided to us
1366  * by the piglet allocated via the supplied hiber_info.
1367  */
1368 int
1369 hibernate_zlib_reset(union hibernate_info *hiber_info, int deflate)
1370 {
1371 	vaddr_t hibernate_zlib_start;
1372 	size_t hibernate_zlib_size;
1373 	char *pva = (char *)hiber_info->piglet_va;
1374 
1375 	hibernate_state = (struct hibernate_zlib_state *)
1376 	    (pva + (7 * PAGE_SIZE));
1377 
1378 	hibernate_zlib_start = (vaddr_t)(pva + (8 * PAGE_SIZE));
1379 	hibernate_zlib_size = 80 * PAGE_SIZE;
1380 
1381 	bzero((caddr_t)hibernate_zlib_start, hibernate_zlib_size);
1382 	bzero((caddr_t)hibernate_state, PAGE_SIZE);
1383 
1384 	/* Set up stream structure */
1385 	hibernate_state->hib_stream.zalloc = (alloc_func)hibernate_zlib_alloc;
1386 	hibernate_state->hib_stream.zfree = (free_func)hibernate_zlib_free;
1387 
1388 	/* Initialize the hiballoc arena for zlib allocs/frees */
1389 	hiballoc_init(&hibernate_state->hiballoc_arena,
1390 	    (caddr_t)hibernate_zlib_start, hibernate_zlib_size);
1391 
1392 	if (deflate) {
1393 		return deflateInit(&hibernate_state->hib_stream,
1394 		    Z_DEFAULT_COMPRESSION);
1395 	} else
1396 		return inflateInit(&hibernate_state->hib_stream);
1397 }
1398 
1399 /*
1400  * Reads the hibernated memory image from disk, whose location and
1401  * size are recorded in hiber_info. Begin by reading the persisted
1402  * chunk table, which records the original chunk placement location
1403  * and compressed size for each. Next, allocate a pig region of
1404  * sufficient size to hold the compressed image. Next, read the
1405  * chunks into the pig area (calling hibernate_read_chunks to do this),
1406  * and finally, if all of the above succeeds, clear the hibernate signature.
1407  * The function will then return to hibernate_resume, which will proceed
1408  * to unpack the pig image to the correct place in memory.
1409  */
1410 int
1411 hibernate_read_image(union hibernate_info *hiber_info)
1412 {
1413 	size_t compressed_size, disk_size, chunktable_size, pig_sz;
1414 	paddr_t image_start, image_end, pig_start, pig_end;
1415 	struct hibernate_disk_chunk *chunks;
1416 	daddr_t blkctr;
1417 	int i;
1418 
1419 	/* Calculate total chunk table size in disk blocks */
1420 	chunktable_size = HIBERNATE_CHUNK_TABLE_SIZE / hiber_info->secsize;
1421 
1422 	blkctr = hiber_info->sig_offset - chunktable_size -
1423 			hiber_info->swap_offset;
1424 
1425 	for (i = 0; i < HIBERNATE_CHUNK_TABLE_SIZE;
1426 	    i += MAXPHYS, blkctr += MAXPHYS/hiber_info->secsize)
1427 		hibernate_read_block(hiber_info, blkctr, MAXPHYS,
1428 		    hibernate_chunktable_area + i);
1429 
1430 	blkctr = hiber_info->image_offset;
1431 	compressed_size = 0;
1432 	chunks = (struct hibernate_disk_chunk *)hibernate_chunktable_area;
1433 
1434 	for (i = 0; i < hiber_info->chunk_ctr; i++)
1435 		compressed_size += chunks[i].compressed_size;
1436 
1437 	disk_size = compressed_size;
1438 
1439 	/* Allocate the pig area */
1440 	pig_sz = compressed_size + HIBERNATE_CHUNK_SIZE;
1441 	if (uvm_pmr_alloc_pig(&pig_start, pig_sz) == ENOMEM)
1442 		return (1);
1443 
1444 	pig_end = pig_start + pig_sz;
1445 
1446 	/* Calculate image extents. Pig image must end on a chunk boundary. */
1447 	image_end = pig_end & ~(HIBERNATE_CHUNK_SIZE - 1);
1448 	image_start = pig_start;
1449 
1450 	image_start = image_end - disk_size;
1451 
1452 	hibernate_read_chunks(hiber_info, image_start, image_end, disk_size);
1453 
1454 	/* Prepare the resume time pmap/page table */
1455 	hibernate_populate_resume_pt(hiber_info, image_start, image_end);
1456 
1457 	/* Read complete, clear the signature and return */
1458 	return hibernate_clear_signature();
1459 }
1460 
1461 /*
1462  * Read the hibernated memory chunks from disk (chunk information at this
1463  * point is stored in the piglet) into the pig area specified by
1464  * [pig_start .. pig_end]. Order the chunks so that the final chunk is the
1465  * only chunk with overlap possibilities.
1466  *
1467  * This function uses the piglet area during this process as follows:
1468  *
1469  * offset from piglet base	use
1470  * -----------------------	--------------------
1471  * 0				i/o allocation area
1472  * PAGE_SIZE			i/o write area
1473  * 2*PAGE_SIZE			temp/scratch page
1474  * 3*PAGE_SIZE			temp/scratch page
1475  * 4*PAGE_SIZE to 6*PAGE_SIZE	chunk ordering area
1476  * 7*PAGE_SIZE			hiballoc arena
1477  * 8*PAGE_SIZE to 88*PAGE_SIZE	zlib deflate area
1478  * ...
1479  * HIBERNATE_CHUNK_SIZE		chunk table temporary area
1480  */
1481 int
1482 hibernate_read_chunks(union hibernate_info *hib_info, paddr_t pig_start,
1483     paddr_t pig_end, size_t image_compr_size)
1484 {
1485 	paddr_t img_index, img_cur, r1s, r1e, r2s, r2e;
1486 	paddr_t copy_start, copy_end, piglet_cur;
1487 	paddr_t piglet_base = hib_info->piglet_pa;
1488 	paddr_t piglet_end = piglet_base + HIBERNATE_CHUNK_SIZE;
1489 	daddr_t blkctr;
1490 	size_t processed, compressed_size, read_size;
1491 	int i, j, overlap, found, nchunks;
1492 	int nochunks = 0, nfchunks = 0, npchunks = 0;
1493 	struct hibernate_disk_chunk *chunks;
1494 	int *ochunks, *pchunks, *fchunks;
1495 
1496 	global_pig_start = pig_start;
1497 
1498 	/* XXX - dont need this on all archs */
1499 	pmap_activate(curproc);
1500 
1501 	/* Temporary output chunk ordering */
1502 	ochunks = (int *)hibernate_fchunk_area;
1503 
1504 	/* Piglet chunk ordering */
1505 	pchunks = (int *)(hibernate_fchunk_area + PAGE_SIZE);
1506 
1507 	/* Final chunk ordering */
1508 	fchunks = (int *)(hibernate_fchunk_area + (2*PAGE_SIZE));
1509 
1510 	/* Map the chunk ordering region */
1511 	pmap_kenter_pa(hibernate_fchunk_area,
1512 	    piglet_base + (4*PAGE_SIZE), VM_PROT_ALL);
1513 	pmap_kenter_pa((vaddr_t)pchunks, piglet_base + (5*PAGE_SIZE),
1514 	    VM_PROT_ALL);
1515 	pmap_kenter_pa((vaddr_t)fchunks, piglet_base + (6*PAGE_SIZE),
1516 	    VM_PROT_ALL);
1517 
1518 	nchunks = hib_info->chunk_ctr;
1519 	chunks = (struct hibernate_disk_chunk *)hibernate_chunktable_area;
1520 
1521 	/* Initially start all chunks as unplaced */
1522 	for (i = 0; i < nchunks; i++)
1523 		chunks[i].flags = 0;
1524 
1525 	/*
1526 	 * Search the list for chunks that are outside the pig area. These
1527 	 * can be placed first in the final output list.
1528 	 */
1529 	for (i = 0; i < nchunks; i++) {
1530 		if (chunks[i].end <= pig_start || chunks[i].base >= pig_end) {
1531 			ochunks[nochunks] = (u_int8_t)i;
1532 			fchunks[nfchunks] = (u_int8_t)i;
1533 			nochunks++;
1534 			nfchunks++;
1535 			chunks[i].flags |= HIBERNATE_CHUNK_USED;
1536 		}
1537 	}
1538 
1539 	/*
1540 	 * Walk the ordering, place the chunks in ascending memory order.
1541 	 * Conflicts might arise, these are handled next.
1542 	 */
1543 	do {
1544 		img_index = -1;
1545 		found = 0;
1546 		j = -1;
1547 		for (i = 0; i < nchunks; i++)
1548 			if (chunks[i].base < img_index &&
1549 			    chunks[i].flags == 0 ) {
1550 				j = i;
1551 				img_index = chunks[i].base;
1552 			}
1553 
1554 		if (j != -1) {
1555 			found = 1;
1556 			ochunks[nochunks] = (short)j;
1557 			nochunks++;
1558 			chunks[j].flags |= HIBERNATE_CHUNK_PLACED;
1559 		}
1560 	} while (found);
1561 
1562 	img_index = pig_start;
1563 
1564 	/*
1565 	 * Identify chunk output conflicts (chunks whose pig load area
1566 	 * corresponds to their original memory placement location)
1567 	 */
1568 	for (i = 0; i < nochunks ; i++) {
1569 		overlap = 0;
1570 		r1s = img_index;
1571 		r1e = img_index + chunks[ochunks[i]].compressed_size;
1572 		r2s = chunks[ochunks[i]].base;
1573 		r2e = chunks[ochunks[i]].end;
1574 
1575 		overlap = hibernate_check_overlap(r1s, r1e, r2s, r2e);
1576 		if (overlap)
1577 			chunks[ochunks[i]].flags |= HIBERNATE_CHUNK_CONFLICT;
1578 		img_index += chunks[ochunks[i]].compressed_size;
1579 	}
1580 
1581 	/*
1582 	 * Prepare the final output chunk list. Calculate an output
1583 	 * inflate strategy for overlapping chunks if needed.
1584 	 */
1585 	img_index = pig_start;
1586 	for (i = 0; i < nochunks ; i++) {
1587 		/*
1588 		 * If a conflict is detected, consume enough compressed
1589 		 * output chunks to fill the piglet
1590 		 */
1591 		if (chunks[ochunks[i]].flags & HIBERNATE_CHUNK_CONFLICT) {
1592 			copy_start = piglet_base;
1593 			copy_end = piglet_end;
1594 			piglet_cur = piglet_base;
1595 			npchunks = 0;
1596 			j = i;
1597 			while (copy_start < copy_end && j < nochunks) {
1598 				piglet_cur += chunks[ochunks[j]].compressed_size;
1599 				pchunks[npchunks] = ochunks[j];
1600 				npchunks++;
1601 				copy_start += chunks[ochunks[j]].compressed_size;
1602 				img_index += chunks[ochunks[j]].compressed_size;
1603 				i++;
1604 				j++;
1605 			}
1606 
1607 			piglet_cur = piglet_base;
1608 			for (j = 0; j < npchunks; j++) {
1609 				piglet_cur += chunks[pchunks[j]].compressed_size;
1610 				fchunks[nfchunks] = pchunks[j];
1611 				chunks[pchunks[j]].flags |= HIBERNATE_CHUNK_USED;
1612 				nfchunks++;
1613 			}
1614 		} else {
1615 			/*
1616 			 * No conflict, chunk can be added without copying
1617 			 */
1618 			if ((chunks[ochunks[i]].flags &
1619 			    HIBERNATE_CHUNK_USED) == 0) {
1620 				fchunks[nfchunks] = ochunks[i];
1621 				chunks[ochunks[i]].flags |= HIBERNATE_CHUNK_USED;
1622 				nfchunks++;
1623 			}
1624 			img_index += chunks[ochunks[i]].compressed_size;
1625 		}
1626 	}
1627 
1628 	img_index = pig_start;
1629 	for (i = 0; i < nfchunks; i++) {
1630 		piglet_cur = piglet_base;
1631 		img_index += chunks[fchunks[i]].compressed_size;
1632 	}
1633 
1634 	img_cur = pig_start;
1635 
1636 	for (i = 0; i < nfchunks; i++) {
1637 		blkctr = chunks[fchunks[i]].offset - hib_info->swap_offset;
1638 		processed = 0;
1639 		compressed_size = chunks[fchunks[i]].compressed_size;
1640 
1641 		while (processed < compressed_size) {
1642 			pmap_kenter_pa(hibernate_temp_page, img_cur,
1643 			    VM_PROT_ALL);
1644 			pmap_kenter_pa(hibernate_temp_page + PAGE_SIZE,
1645 			    img_cur+PAGE_SIZE, VM_PROT_ALL);
1646 
1647 			/* XXX - not needed on all archs */
1648 			pmap_activate(curproc);
1649 			if (compressed_size - processed >= PAGE_SIZE)
1650 				read_size = PAGE_SIZE;
1651 			else
1652 				read_size = compressed_size - processed;
1653 
1654 			hibernate_read_block(hib_info, blkctr, read_size,
1655 			    hibernate_temp_page + (img_cur & PAGE_MASK));
1656 
1657 			blkctr += (read_size / hib_info->secsize);
1658 
1659 			hibernate_flush();
1660 			pmap_kremove(hibernate_temp_page, PAGE_SIZE);
1661 			pmap_kremove(hibernate_temp_page + PAGE_SIZE,
1662 			    PAGE_SIZE);
1663 			processed += read_size;
1664 			img_cur += read_size;
1665 		}
1666 	}
1667 
1668 	return (0);
1669 }
1670 
1671 /*
1672  * Hibernating a machine comprises the following operations:
1673  *  1. Calculating this machine's hibernate_info information
1674  *  2. Allocating a piglet and saving the piglet's physaddr
1675  *  3. Calculating the memory chunks
1676  *  4. Writing the compressed chunks to disk
1677  *  5. Writing the chunk table
1678  *  6. Writing the signature block (hibernate_info)
1679  *
1680  * On most architectures, the function calling hibernate_suspend would
1681  * then power off the machine using some MD-specific implementation.
1682  */
1683 int
1684 hibernate_suspend(void)
1685 {
1686 	union hibernate_info hib_info;
1687 
1688 	/*
1689 	 * Calculate memory ranges, swap offsets, etc.
1690 	 * This also allocates a piglet whose physaddr is stored in
1691 	 * hib_info->piglet_pa and vaddr stored in hib_info->piglet_va
1692 	 */
1693 	if (get_hibernate_info(&hib_info, 1))
1694 		return (1);
1695 
1696 	global_piglet_va = hib_info.piglet_va;
1697 
1698 	/* XXX - Won't need to zero everything with RLE */
1699 	uvm_pmr_zero_everything();
1700 
1701 	if (hibernate_write_chunks(&hib_info))
1702 		return (1);
1703 
1704 	if (hibernate_write_chunktable(&hib_info))
1705 		return (1);
1706 
1707 	if (hibernate_write_signature(&hib_info))
1708 		return (1);
1709 
1710 	delay(500000);
1711 	return (0);
1712 }
1713 
1714 /*
1715  * Free items allocated during hibernate
1716  */
1717 void
1718 hibernate_free(void)
1719 {
1720 	uvm_pmr_free_piglet(global_piglet_va, 3*HIBERNATE_CHUNK_SIZE);
1721 
1722 	pmap_kremove(hibernate_copy_page, PAGE_SIZE);
1723 	pmap_kremove(hibernate_temp_page, PAGE_SIZE);
1724 	pmap_update(pmap_kernel());
1725 
1726 	km_free((void *)hibernate_fchunk_area, 3*PAGE_SIZE, &kv_any, &kp_none);
1727 	km_free((void *)hibernate_copy_page, PAGE_SIZE, &kv_any, &kp_none);
1728 	km_free((void *)hibernate_temp_page, PAGE_SIZE, &kv_any, &kp_none);
1729 }
1730