xref: /onnv-gate/usr/src/uts/common/cpr/cpr_dump.c (revision 3290:256464cbb73c)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*3290Sjohansen  * Common Development and Distribution License (the "License").
6*3290Sjohansen  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
22*3290Sjohansen  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
270Sstevel@tonic-gate 
280Sstevel@tonic-gate /*
290Sstevel@tonic-gate  * Fill in and write out the cpr state file
300Sstevel@tonic-gate  *	1. Allocate and write headers, ELF and cpr dump header
310Sstevel@tonic-gate  *	2. Allocate bitmaps according to phys_install
320Sstevel@tonic-gate  *	3. Tag kernel pages into corresponding bitmap
330Sstevel@tonic-gate  *	4. Write bitmaps to state file
340Sstevel@tonic-gate  *	5. Write actual physical page data to state file
350Sstevel@tonic-gate  */
360Sstevel@tonic-gate 
370Sstevel@tonic-gate #include <sys/types.h>
380Sstevel@tonic-gate #include <sys/systm.h>
390Sstevel@tonic-gate #include <sys/vm.h>
400Sstevel@tonic-gate #include <sys/memlist.h>
410Sstevel@tonic-gate #include <sys/kmem.h>
420Sstevel@tonic-gate #include <sys/vnode.h>
430Sstevel@tonic-gate #include <sys/fs/ufs_inode.h>
440Sstevel@tonic-gate #include <sys/errno.h>
450Sstevel@tonic-gate #include <sys/cmn_err.h>
460Sstevel@tonic-gate #include <sys/debug.h>
470Sstevel@tonic-gate #include <vm/page.h>
480Sstevel@tonic-gate #include <vm/seg.h>
490Sstevel@tonic-gate #include <vm/seg_kmem.h>
500Sstevel@tonic-gate #include <vm/seg_kpm.h>
510Sstevel@tonic-gate #include <vm/hat.h>
520Sstevel@tonic-gate #include <sys/cpr.h>
530Sstevel@tonic-gate #include <sys/conf.h>
540Sstevel@tonic-gate #include <sys/ddi.h>
550Sstevel@tonic-gate #include <sys/panic.h>
560Sstevel@tonic-gate #include <sys/thread.h>
570Sstevel@tonic-gate 
580Sstevel@tonic-gate /* Local defines and variables */
590Sstevel@tonic-gate #define	BTOb(bytes)	((bytes) << 3)		/* Bytes to bits, log2(NBBY) */
600Sstevel@tonic-gate #define	bTOB(bits)	((bits) >> 3)		/* bits to Bytes, log2(NBBY) */
610Sstevel@tonic-gate 
620Sstevel@tonic-gate static uint_t cpr_pages_tobe_dumped;
630Sstevel@tonic-gate static uint_t cpr_regular_pgs_dumped;
640Sstevel@tonic-gate 
650Sstevel@tonic-gate static int cpr_dump_regular_pages(vnode_t *);
660Sstevel@tonic-gate static int cpr_count_upages(int, bitfunc_t);
670Sstevel@tonic-gate static int cpr_compress_and_write(vnode_t *, uint_t, pfn_t, pgcnt_t);
680Sstevel@tonic-gate int cpr_flush_write(vnode_t *);
690Sstevel@tonic-gate 
700Sstevel@tonic-gate int cpr_contig_pages(vnode_t *, int);
710Sstevel@tonic-gate 
720Sstevel@tonic-gate void cpr_clear_bitmaps();
730Sstevel@tonic-gate 
740Sstevel@tonic-gate extern size_t cpr_get_devsize(dev_t);
750Sstevel@tonic-gate extern int i_cpr_dump_setup(vnode_t *);
760Sstevel@tonic-gate extern int i_cpr_blockzero(char *, char **, int *, vnode_t *);
770Sstevel@tonic-gate extern int cpr_test_mode;
780Sstevel@tonic-gate 
790Sstevel@tonic-gate ctrm_t cpr_term;
800Sstevel@tonic-gate 
810Sstevel@tonic-gate char *cpr_buf, *cpr_buf_end;
820Sstevel@tonic-gate int cpr_buf_blocks;		/* size of cpr_buf in blocks */
830Sstevel@tonic-gate size_t cpr_buf_size;		/* size of cpr_buf in bytes */
840Sstevel@tonic-gate size_t cpr_bitmap_size;
850Sstevel@tonic-gate int cpr_nbitmaps;
860Sstevel@tonic-gate 
870Sstevel@tonic-gate char *cpr_pagedata;		/* page buffer for compression / tmp copy */
880Sstevel@tonic-gate size_t cpr_pagedata_size;	/* page buffer size in bytes */
890Sstevel@tonic-gate 
900Sstevel@tonic-gate static char *cpr_wptr;		/* keep track of where to write to next */
910Sstevel@tonic-gate static int cpr_file_bn;		/* cpr state-file block offset */
920Sstevel@tonic-gate static int cpr_disk_writes_ok;
930Sstevel@tonic-gate static size_t cpr_dev_space = 0;
940Sstevel@tonic-gate 
950Sstevel@tonic-gate char cpr_pagecopy[CPR_MAXCONTIG * MMU_PAGESIZE];
960Sstevel@tonic-gate 
970Sstevel@tonic-gate /*
980Sstevel@tonic-gate  * On some platforms bcopy may modify the thread structure
990Sstevel@tonic-gate  * during bcopy (eg, to prevent cpu migration).  If the
1000Sstevel@tonic-gate  * range we are currently writing out includes our own
1010Sstevel@tonic-gate  * thread structure then it will be snapshotted by bcopy
1020Sstevel@tonic-gate  * including those modified members - and the updates made
1030Sstevel@tonic-gate  * on exit from bcopy will no longer be seen when we later
1040Sstevel@tonic-gate  * restore the mid-bcopy kthread_t.  So if the range we
1050Sstevel@tonic-gate  * need to copy overlaps with our thread structure we will
1060Sstevel@tonic-gate  * use a simple byte copy.
1070Sstevel@tonic-gate  */
1080Sstevel@tonic-gate void
1090Sstevel@tonic-gate cprbcopy(void *from, void *to, size_t bytes)
1100Sstevel@tonic-gate {
1110Sstevel@tonic-gate 	extern int curthreadremapped;
1120Sstevel@tonic-gate 	caddr_t kthrend;
1130Sstevel@tonic-gate 
1140Sstevel@tonic-gate 	kthrend = (caddr_t)curthread + sizeof (kthread_t) - 1;
1150Sstevel@tonic-gate 	if (curthreadremapped || (kthrend >= (caddr_t)from &&
1160Sstevel@tonic-gate 	    kthrend < (caddr_t)from + bytes + sizeof (kthread_t) - 1)) {
1170Sstevel@tonic-gate 		caddr_t src = from, dst = to;
1180Sstevel@tonic-gate 
1190Sstevel@tonic-gate 		while (bytes-- > 0)
1200Sstevel@tonic-gate 			*dst++ = *src++;
1210Sstevel@tonic-gate 	} else {
1220Sstevel@tonic-gate 		bcopy(from, to, bytes);
1230Sstevel@tonic-gate 	}
1240Sstevel@tonic-gate }
1250Sstevel@tonic-gate 
1260Sstevel@tonic-gate /*
1270Sstevel@tonic-gate  * Allocate pages for buffers used in writing out the statefile
1280Sstevel@tonic-gate  */
1290Sstevel@tonic-gate static int
1300Sstevel@tonic-gate cpr_alloc_bufs(void)
1310Sstevel@tonic-gate {
1320Sstevel@tonic-gate 	char *allocerr = "Unable to allocate memory for cpr buffer";
1330Sstevel@tonic-gate 	size_t size;
1340Sstevel@tonic-gate 
1350Sstevel@tonic-gate 	/*
1360Sstevel@tonic-gate 	 * set the cpr write buffer size to at least the historic
1370Sstevel@tonic-gate 	 * size (128k) or large enough to store the both the early
1380Sstevel@tonic-gate 	 * set of statefile structures (well under 0x800) plus the
1390Sstevel@tonic-gate 	 * bitmaps, and roundup to the next pagesize.
1400Sstevel@tonic-gate 	 */
1410Sstevel@tonic-gate 	size = PAGE_ROUNDUP(dbtob(4) + cpr_bitmap_size);
1420Sstevel@tonic-gate 	cpr_buf_size = MAX(size, CPRBUFSZ);
1430Sstevel@tonic-gate 	cpr_buf_blocks = btodb(cpr_buf_size);
1440Sstevel@tonic-gate 	cpr_buf = kmem_alloc(cpr_buf_size, KM_NOSLEEP);
1450Sstevel@tonic-gate 	if (cpr_buf == NULL) {
1460Sstevel@tonic-gate 		cpr_err(CE_WARN, allocerr);
1470Sstevel@tonic-gate 		return (ENOMEM);
1480Sstevel@tonic-gate 	}
1490Sstevel@tonic-gate 	cpr_buf_end = cpr_buf + cpr_buf_size;
1500Sstevel@tonic-gate 
1510Sstevel@tonic-gate 	cpr_pagedata_size = mmu_ptob(CPR_MAXCONTIG + 1);
1520Sstevel@tonic-gate 	cpr_pagedata = kmem_alloc(cpr_pagedata_size, KM_NOSLEEP);
1530Sstevel@tonic-gate 	if (cpr_pagedata == NULL) {
1540Sstevel@tonic-gate 		kmem_free(cpr_buf, cpr_buf_size);
1550Sstevel@tonic-gate 		cpr_buf = NULL;
1560Sstevel@tonic-gate 		cpr_err(CE_WARN, allocerr);
1570Sstevel@tonic-gate 		return (ENOMEM);
1580Sstevel@tonic-gate 	}
1590Sstevel@tonic-gate 
1600Sstevel@tonic-gate 	return (0);
1610Sstevel@tonic-gate }
1620Sstevel@tonic-gate 
1630Sstevel@tonic-gate 
1640Sstevel@tonic-gate /*
1650Sstevel@tonic-gate  * Set bitmap size in bytes based on phys_install.
1660Sstevel@tonic-gate  */
1670Sstevel@tonic-gate void
1680Sstevel@tonic-gate cpr_set_bitmap_size(void)
1690Sstevel@tonic-gate {
1700Sstevel@tonic-gate 	struct memlist *pmem;
1710Sstevel@tonic-gate 	size_t size = 0;
1720Sstevel@tonic-gate 
1730Sstevel@tonic-gate 	memlist_read_lock();
1740Sstevel@tonic-gate 	for (pmem = phys_install; pmem; pmem = pmem->next)
1750Sstevel@tonic-gate 		size += pmem->size;
1760Sstevel@tonic-gate 	memlist_read_unlock();
1770Sstevel@tonic-gate 	cpr_bitmap_size = BITMAP_BYTES(size);
1780Sstevel@tonic-gate }
1790Sstevel@tonic-gate 
1800Sstevel@tonic-gate 
1810Sstevel@tonic-gate /*
1820Sstevel@tonic-gate  * CPR dump header contains the following information:
1830Sstevel@tonic-gate  *	1. header magic -- unique to cpr state file
1840Sstevel@tonic-gate  *	2. kernel return pc & ppn for resume
1850Sstevel@tonic-gate  *	3. current thread info
1860Sstevel@tonic-gate  *	4. debug level and test mode
1870Sstevel@tonic-gate  *	5. number of bitmaps allocated
1880Sstevel@tonic-gate  *	6. number of page records
1890Sstevel@tonic-gate  */
1900Sstevel@tonic-gate static int
1910Sstevel@tonic-gate cpr_write_header(vnode_t *vp)
1920Sstevel@tonic-gate {
1930Sstevel@tonic-gate 	extern ushort_t cpr_mach_type;
1940Sstevel@tonic-gate 	struct cpr_dump_desc cdump;
1950Sstevel@tonic-gate 	pgcnt_t bitmap_pages;
1960Sstevel@tonic-gate 	pgcnt_t kpages, vpages, upages;
1970Sstevel@tonic-gate 
1980Sstevel@tonic-gate 	cdump.cdd_magic = (uint_t)CPR_DUMP_MAGIC;
1990Sstevel@tonic-gate 	cdump.cdd_version = CPR_VERSION;
2000Sstevel@tonic-gate 	cdump.cdd_machine = cpr_mach_type;
2010Sstevel@tonic-gate 	cdump.cdd_debug = cpr_debug;
2020Sstevel@tonic-gate 	cdump.cdd_test_mode = cpr_test_mode;
2030Sstevel@tonic-gate 	cdump.cdd_bitmaprec = cpr_nbitmaps;
2040Sstevel@tonic-gate 
2050Sstevel@tonic-gate 	cpr_clear_bitmaps();
2060Sstevel@tonic-gate 
2070Sstevel@tonic-gate 	/*
2080Sstevel@tonic-gate 	 * Remember how many pages we plan to save to statefile.
2090Sstevel@tonic-gate 	 * This information will be used for sanity checks.
2100Sstevel@tonic-gate 	 * Untag those pages that will not be saved to statefile.
2110Sstevel@tonic-gate 	 */
2120Sstevel@tonic-gate 	kpages = cpr_count_kpages(REGULAR_BITMAP, cpr_setbit);
2130Sstevel@tonic-gate 	vpages = cpr_count_volatile_pages(REGULAR_BITMAP, cpr_clrbit);
2140Sstevel@tonic-gate 	upages = cpr_count_upages(REGULAR_BITMAP, cpr_setbit);
2150Sstevel@tonic-gate 	cdump.cdd_dumppgsize = kpages - vpages + upages;
2160Sstevel@tonic-gate 	cpr_pages_tobe_dumped = cdump.cdd_dumppgsize;
2170Sstevel@tonic-gate 	DEBUG7(errp(
2180Sstevel@tonic-gate 	    "\ncpr_write_header: kpages %ld - vpages %ld + upages %ld = %d\n",
2190Sstevel@tonic-gate 	    kpages, vpages, upages, cdump.cdd_dumppgsize));
2200Sstevel@tonic-gate 
2210Sstevel@tonic-gate 	/*
2220Sstevel@tonic-gate 	 * Some pages contain volatile data (cpr_buf and storage area for
2230Sstevel@tonic-gate 	 * sensitive kpages), which are no longer needed after the statefile
2240Sstevel@tonic-gate 	 * is dumped to disk.  We have already untagged them from regular
2250Sstevel@tonic-gate 	 * bitmaps.  Now tag them into the volatile bitmaps.  The pages in
2260Sstevel@tonic-gate 	 * volatile bitmaps will be claimed during resume, and the resumed
2270Sstevel@tonic-gate 	 * kernel will free them.
2280Sstevel@tonic-gate 	 */
2290Sstevel@tonic-gate 	(void) cpr_count_volatile_pages(VOLATILE_BITMAP, cpr_setbit);
2300Sstevel@tonic-gate 
2310Sstevel@tonic-gate 	bitmap_pages = mmu_btopr(cpr_bitmap_size);
2320Sstevel@tonic-gate 
2330Sstevel@tonic-gate 	/*
2340Sstevel@tonic-gate 	 * Export accurate statefile size for statefile allocation retry.
2350Sstevel@tonic-gate 	 * statefile_size = all the headers + total pages +
2360Sstevel@tonic-gate 	 * number of pages used by the bitmaps.
2370Sstevel@tonic-gate 	 * Roundup will be done in the file allocation code.
2380Sstevel@tonic-gate 	 */
2390Sstevel@tonic-gate 	STAT->cs_nocomp_statefsz = sizeof (cdd_t) + sizeof (cmd_t) +
2400Sstevel@tonic-gate 		(sizeof (cbd_t) * cdump.cdd_bitmaprec) +
2410Sstevel@tonic-gate 		(sizeof (cpd_t) * cdump.cdd_dumppgsize) +
2420Sstevel@tonic-gate 		mmu_ptob(cdump.cdd_dumppgsize + bitmap_pages);
2430Sstevel@tonic-gate 
2440Sstevel@tonic-gate 	/*
2450Sstevel@tonic-gate 	 * If the estimated statefile is not big enough,
2460Sstevel@tonic-gate 	 * go retry now to save un-necessary operations.
2470Sstevel@tonic-gate 	 */
2480Sstevel@tonic-gate 	if (!(CPR->c_flags & C_COMPRESSING) &&
2490Sstevel@tonic-gate 		(STAT->cs_nocomp_statefsz > STAT->cs_est_statefsz)) {
2500Sstevel@tonic-gate 		if (cpr_debug & (LEVEL1 | LEVEL7))
2510Sstevel@tonic-gate 		    errp("cpr_write_header: STAT->cs_nocomp_statefsz > "
2520Sstevel@tonic-gate 			"STAT->cs_est_statefsz\n");
2530Sstevel@tonic-gate 		return (ENOSPC);
2540Sstevel@tonic-gate 	}
2550Sstevel@tonic-gate 
2560Sstevel@tonic-gate 	/* now write cpr dump descriptor */
2570Sstevel@tonic-gate 	return (cpr_write(vp, (caddr_t)&cdump, sizeof (cdd_t)));
2580Sstevel@tonic-gate }
2590Sstevel@tonic-gate 
2600Sstevel@tonic-gate 
2610Sstevel@tonic-gate /*
2620Sstevel@tonic-gate  * CPR dump tail record contains the following information:
2630Sstevel@tonic-gate  *	1. header magic -- unique to cpr state file
2640Sstevel@tonic-gate  *	2. all misc info that needs to be passed to cprboot or resumed kernel
2650Sstevel@tonic-gate  */
2660Sstevel@tonic-gate static int
2670Sstevel@tonic-gate cpr_write_terminator(vnode_t *vp)
2680Sstevel@tonic-gate {
2690Sstevel@tonic-gate 	cpr_term.magic = (uint_t)CPR_TERM_MAGIC;
2700Sstevel@tonic-gate 	cpr_term.va = (cpr_ptr)&cpr_term;
2710Sstevel@tonic-gate 	cpr_term.pfn = (cpr_ext)va_to_pfn(&cpr_term);
2720Sstevel@tonic-gate 
2730Sstevel@tonic-gate 	/* count the last one (flush) */
2740Sstevel@tonic-gate 	cpr_term.real_statef_size = STAT->cs_real_statefsz +
2750Sstevel@tonic-gate 		btod(cpr_wptr - cpr_buf) * DEV_BSIZE;
2760Sstevel@tonic-gate 
277931Smathue 	DEBUG9(errp("cpr_dump: Real Statefile Size: %ld\n",
2780Sstevel@tonic-gate 		STAT->cs_real_statefsz));
2790Sstevel@tonic-gate 
2800Sstevel@tonic-gate 	cpr_tod_get(&cpr_term.tm_shutdown);
2810Sstevel@tonic-gate 
2820Sstevel@tonic-gate 	return (cpr_write(vp, (caddr_t)&cpr_term, sizeof (cpr_term)));
2830Sstevel@tonic-gate }
2840Sstevel@tonic-gate 
2850Sstevel@tonic-gate /*
2860Sstevel@tonic-gate  * Write bitmap descriptor array, followed by merged bitmaps.
2870Sstevel@tonic-gate  */
2880Sstevel@tonic-gate static int
2890Sstevel@tonic-gate cpr_write_bitmap(vnode_t *vp)
2900Sstevel@tonic-gate {
2910Sstevel@tonic-gate 	char *rmap, *vmap, *dst, *tail;
2920Sstevel@tonic-gate 	size_t size, bytes;
2930Sstevel@tonic-gate 	cbd_t *dp;
2940Sstevel@tonic-gate 	int err;
2950Sstevel@tonic-gate 
2960Sstevel@tonic-gate 	dp = CPR->c_bmda;
2970Sstevel@tonic-gate 	if (err = cpr_write(vp, (caddr_t)dp, cpr_nbitmaps * sizeof (*dp)))
2980Sstevel@tonic-gate 		return (err);
2990Sstevel@tonic-gate 
3000Sstevel@tonic-gate 	/*
3010Sstevel@tonic-gate 	 * merge regular and volatile bitmaps into tmp space
3020Sstevel@tonic-gate 	 * and write to disk
3030Sstevel@tonic-gate 	 */
3040Sstevel@tonic-gate 	for (; dp->cbd_size; dp++) {
3050Sstevel@tonic-gate 		rmap = (char *)dp->cbd_reg_bitmap;
3060Sstevel@tonic-gate 		vmap = (char *)dp->cbd_vlt_bitmap;
3070Sstevel@tonic-gate 		for (size = dp->cbd_size; size; size -= bytes) {
3080Sstevel@tonic-gate 			bytes = min(size, sizeof (cpr_pagecopy));
3090Sstevel@tonic-gate 			tail = &cpr_pagecopy[bytes];
3100Sstevel@tonic-gate 			for (dst = cpr_pagecopy; dst < tail; dst++)
3110Sstevel@tonic-gate 				*dst = *rmap++ | *vmap++;
3120Sstevel@tonic-gate 			if (err = cpr_write(vp, cpr_pagecopy, bytes))
3130Sstevel@tonic-gate 				break;
3140Sstevel@tonic-gate 		}
3150Sstevel@tonic-gate 	}
3160Sstevel@tonic-gate 
3170Sstevel@tonic-gate 	return (err);
3180Sstevel@tonic-gate }
3190Sstevel@tonic-gate 
3200Sstevel@tonic-gate 
3210Sstevel@tonic-gate static int
3220Sstevel@tonic-gate cpr_write_statefile(vnode_t *vp)
3230Sstevel@tonic-gate {
3240Sstevel@tonic-gate 	uint_t error = 0;
3250Sstevel@tonic-gate 	extern	int	i_cpr_check_pgs_dumped();
3260Sstevel@tonic-gate 	void flush_windows(void);
3270Sstevel@tonic-gate 	pgcnt_t spages;
3280Sstevel@tonic-gate 	char *str;
3290Sstevel@tonic-gate 
3300Sstevel@tonic-gate 	flush_windows();
3310Sstevel@tonic-gate 
3320Sstevel@tonic-gate 	/*
3330Sstevel@tonic-gate 	 * to get an accurate view of kas, we need to untag sensitive
3340Sstevel@tonic-gate 	 * pages *before* dumping them because the disk driver makes
3350Sstevel@tonic-gate 	 * allocations and changes kas along the way.  The remaining
3360Sstevel@tonic-gate 	 * pages referenced in the bitmaps are dumped out later as
3370Sstevel@tonic-gate 	 * regular kpages.
3380Sstevel@tonic-gate 	 */
3390Sstevel@tonic-gate 	str = "cpr_write_statefile:";
3400Sstevel@tonic-gate 	spages = i_cpr_count_sensitive_kpages(REGULAR_BITMAP, cpr_clrbit);
3410Sstevel@tonic-gate 	DEBUG7(errp("%s untag %ld sens pages\n", str, spages));
3420Sstevel@tonic-gate 
3430Sstevel@tonic-gate 	/*
3440Sstevel@tonic-gate 	 * now it's OK to call a driver that makes allocations
3450Sstevel@tonic-gate 	 */
3460Sstevel@tonic-gate 	cpr_disk_writes_ok = 1;
3470Sstevel@tonic-gate 
3480Sstevel@tonic-gate 	/*
3490Sstevel@tonic-gate 	 * now write out the clean sensitive kpages
3500Sstevel@tonic-gate 	 * according to the sensitive descriptors
3510Sstevel@tonic-gate 	 */
3520Sstevel@tonic-gate 	error = i_cpr_dump_sensitive_kpages(vp);
3530Sstevel@tonic-gate 	if (error) {
3540Sstevel@tonic-gate 		DEBUG7(errp("%s cpr_dump_sensitive_kpages() failed!\n", str));
3550Sstevel@tonic-gate 		return (error);
3560Sstevel@tonic-gate 	}
3570Sstevel@tonic-gate 
3580Sstevel@tonic-gate 	/*
3590Sstevel@tonic-gate 	 * cpr_dump_regular_pages() counts cpr_regular_pgs_dumped
3600Sstevel@tonic-gate 	 */
3610Sstevel@tonic-gate 	error = cpr_dump_regular_pages(vp);
3620Sstevel@tonic-gate 	if (error) {
3630Sstevel@tonic-gate 		DEBUG7(errp("%s cpr_dump_regular_pages() failed!\n", str));
3640Sstevel@tonic-gate 		return (error);
3650Sstevel@tonic-gate 	}
3660Sstevel@tonic-gate 
3670Sstevel@tonic-gate 	/*
3680Sstevel@tonic-gate 	 * sanity check to verify the right number of pages were dumped
3690Sstevel@tonic-gate 	 */
3700Sstevel@tonic-gate 	error = i_cpr_check_pgs_dumped(cpr_pages_tobe_dumped,
3710Sstevel@tonic-gate 	    cpr_regular_pgs_dumped);
3720Sstevel@tonic-gate 
3730Sstevel@tonic-gate 	if (error) {
3740Sstevel@tonic-gate 		errp("\n%s page count mismatch!\n", str);
3750Sstevel@tonic-gate #ifdef DEBUG
3760Sstevel@tonic-gate 		if (cpr_test_mode)
3770Sstevel@tonic-gate 			debug_enter(NULL);
3780Sstevel@tonic-gate #endif
3790Sstevel@tonic-gate 	}
3800Sstevel@tonic-gate 
3810Sstevel@tonic-gate 	return (error);
3820Sstevel@tonic-gate }
3830Sstevel@tonic-gate 
3840Sstevel@tonic-gate 
3850Sstevel@tonic-gate /*
3860Sstevel@tonic-gate  * creates the CPR state file, the following sections are
3870Sstevel@tonic-gate  * written out in sequence:
3880Sstevel@tonic-gate  *    - writes the cpr dump header
3890Sstevel@tonic-gate  *    - writes the memory usage bitmaps
3900Sstevel@tonic-gate  *    - writes the platform dependent info
3910Sstevel@tonic-gate  *    - writes the remaining user pages
3920Sstevel@tonic-gate  *    - writes the kernel pages
3930Sstevel@tonic-gate  */
3940Sstevel@tonic-gate int
3950Sstevel@tonic-gate cpr_dump(vnode_t *vp)
3960Sstevel@tonic-gate {
3970Sstevel@tonic-gate 	int error;
3980Sstevel@tonic-gate 
3990Sstevel@tonic-gate 	if (cpr_buf == NULL) {
4000Sstevel@tonic-gate 		ASSERT(cpr_pagedata == NULL);
4010Sstevel@tonic-gate 		if (error = cpr_alloc_bufs())
4020Sstevel@tonic-gate 			return (error);
4030Sstevel@tonic-gate 	}
4040Sstevel@tonic-gate 	/* point to top of internal buffer */
4050Sstevel@tonic-gate 	cpr_wptr = cpr_buf;
4060Sstevel@tonic-gate 
4070Sstevel@tonic-gate 	/* initialize global variables used by the write operation */
4080Sstevel@tonic-gate 	cpr_file_bn = cpr_statefile_offset();
4090Sstevel@tonic-gate 	cpr_dev_space = 0;
4100Sstevel@tonic-gate 
4110Sstevel@tonic-gate 	/* allocate bitmaps */
4120Sstevel@tonic-gate 	if (CPR->c_bmda == NULL) {
4130Sstevel@tonic-gate 		if (error = i_cpr_alloc_bitmaps()) {
4140Sstevel@tonic-gate 			cpr_err(CE_WARN, "cannot allocate bitmaps");
4150Sstevel@tonic-gate 			return (error);
4160Sstevel@tonic-gate 		}
4170Sstevel@tonic-gate 	}
4180Sstevel@tonic-gate 
4190Sstevel@tonic-gate 	if (error = i_cpr_prom_pages(CPR_PROM_SAVE))
4200Sstevel@tonic-gate 		return (error);
4210Sstevel@tonic-gate 
4220Sstevel@tonic-gate 	if (error = i_cpr_dump_setup(vp))
4230Sstevel@tonic-gate 		return (error);
4240Sstevel@tonic-gate 
4250Sstevel@tonic-gate 	/*
4260Sstevel@tonic-gate 	 * set internal cross checking; we dont want to call
4270Sstevel@tonic-gate 	 * a disk driver that makes allocations until after
4280Sstevel@tonic-gate 	 * sensitive pages are saved
4290Sstevel@tonic-gate 	 */
4300Sstevel@tonic-gate 	cpr_disk_writes_ok = 0;
4310Sstevel@tonic-gate 
4320Sstevel@tonic-gate 	/*
4330Sstevel@tonic-gate 	 * 1253112: heap corruption due to memory allocation when dumpping
4340Sstevel@tonic-gate 	 *	    statefile.
4350Sstevel@tonic-gate 	 * Theoretically on Sun4u only the kernel data nucleus, kvalloc and
4360Sstevel@tonic-gate 	 * kvseg segments can be contaminated should memory allocations happen
4370Sstevel@tonic-gate 	 * during sddump, which is not supposed to happen after the system
4380Sstevel@tonic-gate 	 * is quiesced. Let's call the kernel pages that tend to be affected
4390Sstevel@tonic-gate 	 * 'sensitive kpages' here. To avoid saving inconsistent pages, we
4400Sstevel@tonic-gate 	 * will allocate some storage space to save the clean sensitive pages
4410Sstevel@tonic-gate 	 * aside before statefile dumping takes place. Since there may not be
4420Sstevel@tonic-gate 	 * much memory left at this stage, the sensitive pages will be
4430Sstevel@tonic-gate 	 * compressed before they are saved into the storage area.
4440Sstevel@tonic-gate 	 */
4450Sstevel@tonic-gate 	if (error = i_cpr_save_sensitive_kpages()) {
4460Sstevel@tonic-gate 		DEBUG7(errp("cpr_dump: save_sensitive_kpages failed!\n"));
4470Sstevel@tonic-gate 		return (error);
4480Sstevel@tonic-gate 	}
4490Sstevel@tonic-gate 
4500Sstevel@tonic-gate 	/*
4510Sstevel@tonic-gate 	 * since all cpr allocations are done (space for sensitive kpages,
4520Sstevel@tonic-gate 	 * bitmaps, cpr_buf), kas is stable, and now we can accurately
4530Sstevel@tonic-gate 	 * count regular and sensitive kpages.
4540Sstevel@tonic-gate 	 */
4550Sstevel@tonic-gate 	if (error = cpr_write_header(vp)) {
4560Sstevel@tonic-gate 		DEBUG7(errp("cpr_dump: cpr_write_header() failed!\n"));
4570Sstevel@tonic-gate 		return (error);
4580Sstevel@tonic-gate 	}
4590Sstevel@tonic-gate 
4600Sstevel@tonic-gate 	if (error = i_cpr_write_machdep(vp))
4610Sstevel@tonic-gate 		return (error);
4620Sstevel@tonic-gate 
4630Sstevel@tonic-gate 	if (error = i_cpr_blockzero(cpr_buf, &cpr_wptr, NULL, NULL))
4640Sstevel@tonic-gate 		return (error);
4650Sstevel@tonic-gate 
4660Sstevel@tonic-gate 	if (error = cpr_write_bitmap(vp))
4670Sstevel@tonic-gate 		return (error);
4680Sstevel@tonic-gate 
4690Sstevel@tonic-gate 	if (error = cpr_write_statefile(vp)) {
4700Sstevel@tonic-gate 		DEBUG7(errp("cpr_dump: cpr_write_statefile() failed!\n"));
4710Sstevel@tonic-gate 		return (error);
4720Sstevel@tonic-gate 	}
4730Sstevel@tonic-gate 
4740Sstevel@tonic-gate 	if (error = cpr_write_terminator(vp))
4750Sstevel@tonic-gate 		return (error);
4760Sstevel@tonic-gate 
4770Sstevel@tonic-gate 	if (error = cpr_flush_write(vp))
4780Sstevel@tonic-gate 		return (error);
4790Sstevel@tonic-gate 
4800Sstevel@tonic-gate 	if (error = i_cpr_blockzero(cpr_buf, &cpr_wptr, &cpr_file_bn, vp))
4810Sstevel@tonic-gate 		return (error);
4820Sstevel@tonic-gate 
4830Sstevel@tonic-gate 	return (0);
4840Sstevel@tonic-gate }
4850Sstevel@tonic-gate 
4860Sstevel@tonic-gate 
4870Sstevel@tonic-gate /*
4885Seg155566  * cpr_xwalk() is called many 100x with a range within kvseg or kvseg_reloc;
4890Sstevel@tonic-gate  * a page-count from each range is accumulated at arg->pages.
4900Sstevel@tonic-gate  */
4910Sstevel@tonic-gate static void
4925Seg155566 cpr_xwalk(void *arg, void *base, size_t size)
4930Sstevel@tonic-gate {
4940Sstevel@tonic-gate 	struct cpr_walkinfo *cwip = arg;
4950Sstevel@tonic-gate 
4960Sstevel@tonic-gate 	cwip->pages += cpr_count_pages(base, size,
4970Sstevel@tonic-gate 	    cwip->mapflag, cwip->bitfunc, DBG_DONTSHOWRANGE);
4980Sstevel@tonic-gate 	cwip->size += size;
4990Sstevel@tonic-gate 	cwip->ranges++;
5000Sstevel@tonic-gate }
5010Sstevel@tonic-gate 
5025Seg155566 /*
5035Seg155566  * cpr_walk() is called many 100x with a range within kvseg or kvseg_reloc;
5045Seg155566  * a page-count from each range is accumulated at arg->pages.
5055Seg155566  */
5065Seg155566 static void
5075Seg155566 cpr_walk(void *arg, void *base, size_t size)
5085Seg155566 {
5095Seg155566 	caddr_t addr = base;
5105Seg155566 	caddr_t addr_end = addr + size;
5115Seg155566 
5125Seg155566 	/*
5135Seg155566 	 * If we are about to start walking the range of addresses we
5145Seg155566 	 * carved out of the kernel heap for the large page heap walk
5155Seg155566 	 * heap_lp_arena to find what segments are actually populated
5165Seg155566 	 */
5175Seg155566 	if (SEGKMEM_USE_LARGEPAGES &&
5185Seg155566 	    addr == heap_lp_base && addr_end == heap_lp_end &&
5195Seg155566 	    vmem_size(heap_lp_arena, VMEM_ALLOC) < size) {
5205Seg155566 		vmem_walk(heap_lp_arena, VMEM_ALLOC, cpr_xwalk, arg);
5215Seg155566 	} else {
5225Seg155566 		cpr_xwalk(arg, base, size);
5235Seg155566 	}
5245Seg155566 }
5255Seg155566 
5260Sstevel@tonic-gate 
5270Sstevel@tonic-gate /*
5280Sstevel@tonic-gate  * faster scan of kvseg using vmem_walk() to visit
5290Sstevel@tonic-gate  * allocated ranges.
5300Sstevel@tonic-gate  */
5310Sstevel@tonic-gate pgcnt_t
5320Sstevel@tonic-gate cpr_scan_kvseg(int mapflag, bitfunc_t bitfunc, struct seg *seg)
5330Sstevel@tonic-gate {
5340Sstevel@tonic-gate 	struct cpr_walkinfo cwinfo;
5350Sstevel@tonic-gate 
5360Sstevel@tonic-gate 	bzero(&cwinfo, sizeof (cwinfo));
5370Sstevel@tonic-gate 	cwinfo.mapflag = mapflag;
5380Sstevel@tonic-gate 	cwinfo.bitfunc = bitfunc;
5390Sstevel@tonic-gate 
5400Sstevel@tonic-gate 	vmem_walk(heap_arena, VMEM_ALLOC, cpr_walk, &cwinfo);
5410Sstevel@tonic-gate 
5420Sstevel@tonic-gate 	if (cpr_debug & LEVEL7) {
5430Sstevel@tonic-gate 		errp("walked %d sub-ranges, total pages %ld\n",
5440Sstevel@tonic-gate 		    cwinfo.ranges, mmu_btop(cwinfo.size));
5450Sstevel@tonic-gate 		cpr_show_range(seg->s_base, seg->s_size,
5460Sstevel@tonic-gate 		    mapflag, bitfunc, cwinfo.pages);
5470Sstevel@tonic-gate 	}
5480Sstevel@tonic-gate 
5490Sstevel@tonic-gate 	return (cwinfo.pages);
5500Sstevel@tonic-gate }
5510Sstevel@tonic-gate 
5520Sstevel@tonic-gate 
5530Sstevel@tonic-gate /*
5540Sstevel@tonic-gate  * cpr_walk_kpm() is called for every used area within the large
5550Sstevel@tonic-gate  * segkpm virtual address window. A page-count is accumulated at
5560Sstevel@tonic-gate  * arg->pages.
5570Sstevel@tonic-gate  */
5580Sstevel@tonic-gate static void
5590Sstevel@tonic-gate cpr_walk_kpm(void *arg, void *base, size_t size)
5600Sstevel@tonic-gate {
5610Sstevel@tonic-gate 	struct cpr_walkinfo *cwip = arg;
5620Sstevel@tonic-gate 
5630Sstevel@tonic-gate 	cwip->pages += cpr_count_pages(base, size,
5640Sstevel@tonic-gate 	    cwip->mapflag, cwip->bitfunc, DBG_DONTSHOWRANGE);
5650Sstevel@tonic-gate 	cwip->size += size;
5660Sstevel@tonic-gate 	cwip->ranges++;
5670Sstevel@tonic-gate }
5680Sstevel@tonic-gate 
5690Sstevel@tonic-gate 
5700Sstevel@tonic-gate /*
5710Sstevel@tonic-gate  * faster scan of segkpm using hat_kpm_walk() to visit only used ranges.
5720Sstevel@tonic-gate  */
5730Sstevel@tonic-gate /*ARGSUSED*/
5740Sstevel@tonic-gate static pgcnt_t
5750Sstevel@tonic-gate cpr_scan_segkpm(int mapflag, bitfunc_t bitfunc, struct seg *seg)
5760Sstevel@tonic-gate {
5770Sstevel@tonic-gate 	struct cpr_walkinfo cwinfo;
5780Sstevel@tonic-gate 
5790Sstevel@tonic-gate 	if (kpm_enable == 0)
5800Sstevel@tonic-gate 		return (0);
5810Sstevel@tonic-gate 
5820Sstevel@tonic-gate 	bzero(&cwinfo, sizeof (cwinfo));
5830Sstevel@tonic-gate 	cwinfo.mapflag = mapflag;
5840Sstevel@tonic-gate 	cwinfo.bitfunc = bitfunc;
5850Sstevel@tonic-gate 	hat_kpm_walk(cpr_walk_kpm, &cwinfo);
5860Sstevel@tonic-gate 
5870Sstevel@tonic-gate 	if (cpr_debug & LEVEL7) {
5880Sstevel@tonic-gate 		errp("walked %d sub-ranges, total pages %ld\n",
5890Sstevel@tonic-gate 		    cwinfo.ranges, mmu_btop(cwinfo.size));
5900Sstevel@tonic-gate 		cpr_show_range(segkpm->s_base, segkpm->s_size,
5910Sstevel@tonic-gate 		    mapflag, bitfunc, cwinfo.pages);
5920Sstevel@tonic-gate 	}
5930Sstevel@tonic-gate 
5940Sstevel@tonic-gate 	return (cwinfo.pages);
5950Sstevel@tonic-gate }
5960Sstevel@tonic-gate 
5970Sstevel@tonic-gate 
5980Sstevel@tonic-gate /*
5990Sstevel@tonic-gate  * Sparsely filled kernel segments are registered in kseg_table for
6000Sstevel@tonic-gate  * easier lookup. See also block comment for cpr_count_seg_pages.
6010Sstevel@tonic-gate  */
6020Sstevel@tonic-gate 
6030Sstevel@tonic-gate #define	KSEG_SEG_ADDR	0	/* address of struct seg */
6040Sstevel@tonic-gate #define	KSEG_PTR_ADDR	1	/* address of pointer to struct seg */
6050Sstevel@tonic-gate 
6060Sstevel@tonic-gate typedef struct {
6070Sstevel@tonic-gate 	struct seg **st_seg;		/* segment pointer or segment address */
6080Sstevel@tonic-gate 	pgcnt_t	(*st_fcn)(int, bitfunc_t, struct seg *); /* function to call */
6090Sstevel@tonic-gate 	int	st_addrtype;		/* address type in st_seg */
6100Sstevel@tonic-gate } ksegtbl_entry_t;
6110Sstevel@tonic-gate 
6120Sstevel@tonic-gate ksegtbl_entry_t kseg_table[] = {
6130Sstevel@tonic-gate 	{(struct seg **)&kvseg,		cpr_scan_kvseg,		KSEG_SEG_ADDR},
6140Sstevel@tonic-gate 	{&segkpm,			cpr_scan_segkpm,	KSEG_PTR_ADDR},
6150Sstevel@tonic-gate 	{NULL,				0,			0}
6160Sstevel@tonic-gate };
6170Sstevel@tonic-gate 
6180Sstevel@tonic-gate 
6190Sstevel@tonic-gate /*
6200Sstevel@tonic-gate  * Compare seg with each entry in kseg_table; when there is a match
6210Sstevel@tonic-gate  * return the entry pointer, otherwise return NULL.
6220Sstevel@tonic-gate  */
6230Sstevel@tonic-gate static ksegtbl_entry_t *
6240Sstevel@tonic-gate cpr_sparse_seg_check(struct seg *seg)
6250Sstevel@tonic-gate {
6260Sstevel@tonic-gate 	ksegtbl_entry_t *ste = &kseg_table[0];
6270Sstevel@tonic-gate 	struct seg *tseg;
6280Sstevel@tonic-gate 
6290Sstevel@tonic-gate 	for (; ste->st_seg; ste++) {
6300Sstevel@tonic-gate 		tseg = (ste->st_addrtype == KSEG_PTR_ADDR) ?
6310Sstevel@tonic-gate 				*ste->st_seg : (struct seg *)ste->st_seg;
6320Sstevel@tonic-gate 		if (seg == tseg)
6330Sstevel@tonic-gate 			return (ste);
6340Sstevel@tonic-gate 	}
6350Sstevel@tonic-gate 
6360Sstevel@tonic-gate 	return ((ksegtbl_entry_t *)NULL);
6370Sstevel@tonic-gate }
6380Sstevel@tonic-gate 
6390Sstevel@tonic-gate 
6400Sstevel@tonic-gate /*
6410Sstevel@tonic-gate  * Count pages within each kernel segment; call cpr_sparse_seg_check()
6420Sstevel@tonic-gate  * to find out whether a sparsely filled segment needs special
6430Sstevel@tonic-gate  * treatment (e.g. kvseg).
6440Sstevel@tonic-gate  * Todo: A "SEGOP_CPR" like SEGOP_DUMP should be introduced, the cpr
6450Sstevel@tonic-gate  *       module shouldn't need to know segment details like if it is
6460Sstevel@tonic-gate  *       sparsely filled or not (makes kseg_table obsolete).
6470Sstevel@tonic-gate  */
6480Sstevel@tonic-gate pgcnt_t
6490Sstevel@tonic-gate cpr_count_seg_pages(int mapflag, bitfunc_t bitfunc)
6500Sstevel@tonic-gate {
6510Sstevel@tonic-gate 	struct seg *segp;
6520Sstevel@tonic-gate 	pgcnt_t pages;
6530Sstevel@tonic-gate 	ksegtbl_entry_t *ste;
6540Sstevel@tonic-gate 
6550Sstevel@tonic-gate 	pages = 0;
6560Sstevel@tonic-gate 	for (segp = AS_SEGFIRST(&kas); segp; segp = AS_SEGNEXT(&kas, segp)) {
6570Sstevel@tonic-gate 		if (ste = cpr_sparse_seg_check(segp)) {
6580Sstevel@tonic-gate 			pages += (ste->st_fcn)(mapflag, bitfunc, segp);
6590Sstevel@tonic-gate 		} else {
6600Sstevel@tonic-gate 			pages += cpr_count_pages(segp->s_base,
6610Sstevel@tonic-gate 			    segp->s_size, mapflag, bitfunc, DBG_SHOWRANGE);
6620Sstevel@tonic-gate 		}
6630Sstevel@tonic-gate 	}
6640Sstevel@tonic-gate 
6650Sstevel@tonic-gate 	return (pages);
6660Sstevel@tonic-gate }
6670Sstevel@tonic-gate 
6680Sstevel@tonic-gate 
6690Sstevel@tonic-gate /*
6700Sstevel@tonic-gate  * count kernel pages within kas and any special ranges
6710Sstevel@tonic-gate  */
6720Sstevel@tonic-gate pgcnt_t
6730Sstevel@tonic-gate cpr_count_kpages(int mapflag, bitfunc_t bitfunc)
6740Sstevel@tonic-gate {
6750Sstevel@tonic-gate 	pgcnt_t kas_cnt;
6760Sstevel@tonic-gate 
6770Sstevel@tonic-gate 	/*
6780Sstevel@tonic-gate 	 * Some pages need to be taken care of differently.
6790Sstevel@tonic-gate 	 * eg: panicbuf pages of sun4m are not in kas but they need
6800Sstevel@tonic-gate 	 * to be saved.  On sun4u, the physical pages of panicbuf are
6810Sstevel@tonic-gate 	 * allocated via prom_retain().
6820Sstevel@tonic-gate 	 */
6830Sstevel@tonic-gate 	kas_cnt = i_cpr_count_special_kpages(mapflag, bitfunc);
6840Sstevel@tonic-gate 	kas_cnt += cpr_count_seg_pages(mapflag, bitfunc);
6850Sstevel@tonic-gate 
686931Smathue 	DEBUG9(errp("cpr_count_kpages: kas_cnt=%ld\n", kas_cnt));
6870Sstevel@tonic-gate 	DEBUG7(errp("\ncpr_count_kpages: %ld pages, 0x%lx bytes\n",
6880Sstevel@tonic-gate 		kas_cnt, mmu_ptob(kas_cnt)));
6890Sstevel@tonic-gate 	return (kas_cnt);
6900Sstevel@tonic-gate }
6910Sstevel@tonic-gate 
6920Sstevel@tonic-gate 
6930Sstevel@tonic-gate /*
6940Sstevel@tonic-gate  * Set a bit corresponding to the arg phys page number;
6950Sstevel@tonic-gate  * returns 0 when the ppn is valid and the corresponding
6960Sstevel@tonic-gate  * map bit was clear, otherwise returns 1.
6970Sstevel@tonic-gate  */
6980Sstevel@tonic-gate int
6990Sstevel@tonic-gate cpr_setbit(pfn_t ppn, int mapflag)
7000Sstevel@tonic-gate {
7010Sstevel@tonic-gate 	char *bitmap;
7020Sstevel@tonic-gate 	cbd_t *dp;
7030Sstevel@tonic-gate 	pfn_t rel;
7040Sstevel@tonic-gate 	int clr;
7050Sstevel@tonic-gate 
7060Sstevel@tonic-gate 	for (dp = CPR->c_bmda; dp->cbd_size; dp++) {
7070Sstevel@tonic-gate 		if (PPN_IN_RANGE(ppn, dp)) {
7080Sstevel@tonic-gate 			bitmap = DESC_TO_MAP(dp, mapflag);
7090Sstevel@tonic-gate 			rel = ppn - dp->cbd_spfn;
7100Sstevel@tonic-gate 			if ((clr = isclr(bitmap, rel)) != 0)
7110Sstevel@tonic-gate 				setbit(bitmap, rel);
7120Sstevel@tonic-gate 			return (clr == 0);
7130Sstevel@tonic-gate 		}
7140Sstevel@tonic-gate 	}
7150Sstevel@tonic-gate 
7160Sstevel@tonic-gate 	return (1);
7170Sstevel@tonic-gate }
7180Sstevel@tonic-gate 
7190Sstevel@tonic-gate 
7200Sstevel@tonic-gate /*
7210Sstevel@tonic-gate  * Clear a bit corresponding to the arg phys page number.
7220Sstevel@tonic-gate  */
7230Sstevel@tonic-gate int
7240Sstevel@tonic-gate cpr_clrbit(pfn_t ppn, int mapflag)
7250Sstevel@tonic-gate {
7260Sstevel@tonic-gate 	char *bitmap;
7270Sstevel@tonic-gate 	cbd_t *dp;
7280Sstevel@tonic-gate 	pfn_t rel;
7290Sstevel@tonic-gate 	int set;
7300Sstevel@tonic-gate 
7310Sstevel@tonic-gate 	for (dp = CPR->c_bmda; dp->cbd_size; dp++) {
7320Sstevel@tonic-gate 		if (PPN_IN_RANGE(ppn, dp)) {
7330Sstevel@tonic-gate 			bitmap = DESC_TO_MAP(dp, mapflag);
7340Sstevel@tonic-gate 			rel = ppn - dp->cbd_spfn;
7350Sstevel@tonic-gate 			if ((set = isset(bitmap, rel)) != 0)
7360Sstevel@tonic-gate 				clrbit(bitmap, rel);
7370Sstevel@tonic-gate 			return (set == 0);
7380Sstevel@tonic-gate 		}
7390Sstevel@tonic-gate 	}
7400Sstevel@tonic-gate 
7410Sstevel@tonic-gate 	return (1);
7420Sstevel@tonic-gate }
7430Sstevel@tonic-gate 
7440Sstevel@tonic-gate 
7450Sstevel@tonic-gate /* ARGSUSED */
7460Sstevel@tonic-gate int
7470Sstevel@tonic-gate cpr_nobit(pfn_t ppn, int mapflag)
7480Sstevel@tonic-gate {
7490Sstevel@tonic-gate 	return (0);
7500Sstevel@tonic-gate }
7510Sstevel@tonic-gate 
7520Sstevel@tonic-gate 
7530Sstevel@tonic-gate /*
7540Sstevel@tonic-gate  * Lookup a bit corresponding to the arg phys page number.
7550Sstevel@tonic-gate  */
7560Sstevel@tonic-gate int
7570Sstevel@tonic-gate cpr_isset(pfn_t ppn, int mapflag)
7580Sstevel@tonic-gate {
7590Sstevel@tonic-gate 	char *bitmap;
7600Sstevel@tonic-gate 	cbd_t *dp;
7610Sstevel@tonic-gate 	pfn_t rel;
7620Sstevel@tonic-gate 
7630Sstevel@tonic-gate 	for (dp = CPR->c_bmda; dp->cbd_size; dp++) {
7640Sstevel@tonic-gate 		if (PPN_IN_RANGE(ppn, dp)) {
7650Sstevel@tonic-gate 			bitmap = DESC_TO_MAP(dp, mapflag);
7660Sstevel@tonic-gate 			rel = ppn - dp->cbd_spfn;
7670Sstevel@tonic-gate 			return (isset(bitmap, rel));
7680Sstevel@tonic-gate 		}
7690Sstevel@tonic-gate 	}
7700Sstevel@tonic-gate 
7710Sstevel@tonic-gate 	return (0);
7720Sstevel@tonic-gate }
7730Sstevel@tonic-gate 
7740Sstevel@tonic-gate 
7750Sstevel@tonic-gate /*
7760Sstevel@tonic-gate  * Go thru all pages and pick up any page not caught during the invalidation
7770Sstevel@tonic-gate  * stage. This is also used to save pages with cow lock or phys page lock held
7780Sstevel@tonic-gate  * (none zero p_lckcnt or p_cowcnt)
7790Sstevel@tonic-gate  */
7800Sstevel@tonic-gate static	int
7810Sstevel@tonic-gate cpr_count_upages(int mapflag, bitfunc_t bitfunc)
7820Sstevel@tonic-gate {
7830Sstevel@tonic-gate 	page_t *pp, *page0;
7840Sstevel@tonic-gate 	pgcnt_t dcnt = 0, tcnt = 0;
7850Sstevel@tonic-gate 	pfn_t pfn;
7860Sstevel@tonic-gate 
7870Sstevel@tonic-gate 	page0 = pp = page_first();
7880Sstevel@tonic-gate 
7890Sstevel@tonic-gate 	do {
7900Sstevel@tonic-gate #if defined(__sparc)
7910Sstevel@tonic-gate 		extern struct vnode prom_ppages;
792*3290Sjohansen 		if (pp->p_vnode == NULL || PP_ISKAS(pp) ||
7930Sstevel@tonic-gate 		    pp->p_vnode == &prom_ppages ||
7940Sstevel@tonic-gate 			PP_ISFREE(pp) && PP_ISAGED(pp))
7950Sstevel@tonic-gate #else
796*3290Sjohansen 		if (pp->p_vnode == NULL || PP_ISKAS(pp) ||
7970Sstevel@tonic-gate 		    PP_ISFREE(pp) && PP_ISAGED(pp))
7980Sstevel@tonic-gate #endif /* __sparc */
7990Sstevel@tonic-gate 			continue;
8000Sstevel@tonic-gate 
8010Sstevel@tonic-gate 		pfn = page_pptonum(pp);
8020Sstevel@tonic-gate 		if (pf_is_memory(pfn)) {
8030Sstevel@tonic-gate 			tcnt++;
8040Sstevel@tonic-gate 			if ((*bitfunc)(pfn, mapflag) == 0)
8050Sstevel@tonic-gate 				dcnt++; /* dirty count */
8060Sstevel@tonic-gate 		}
8070Sstevel@tonic-gate 	} while ((pp = page_next(pp)) != page0);
8080Sstevel@tonic-gate 
8090Sstevel@tonic-gate 	STAT->cs_upage2statef = dcnt;
8100Sstevel@tonic-gate 	DEBUG9(errp("cpr_count_upages: dirty=%ld total=%ld\n",
8110Sstevel@tonic-gate 		dcnt, tcnt));
8120Sstevel@tonic-gate 	DEBUG7(errp("cpr_count_upages: %ld pages, 0x%lx bytes\n",
8130Sstevel@tonic-gate 		dcnt, mmu_ptob(dcnt)));
8140Sstevel@tonic-gate 	return (dcnt);
8150Sstevel@tonic-gate }
8160Sstevel@tonic-gate 
8170Sstevel@tonic-gate 
8180Sstevel@tonic-gate /*
8190Sstevel@tonic-gate  * try compressing pages based on cflag,
8200Sstevel@tonic-gate  * and for DEBUG kernels, verify uncompressed data checksum;
8210Sstevel@tonic-gate  *
8220Sstevel@tonic-gate  * this routine replaces common code from
8230Sstevel@tonic-gate  * i_cpr_compress_and_save() and cpr_compress_and_write()
8240Sstevel@tonic-gate  */
8250Sstevel@tonic-gate char *
8260Sstevel@tonic-gate cpr_compress_pages(cpd_t *dp, pgcnt_t pages, int cflag)
8270Sstevel@tonic-gate {
8280Sstevel@tonic-gate 	size_t nbytes, clen, len;
8290Sstevel@tonic-gate 	uint32_t test_sum;
8300Sstevel@tonic-gate 	char *datap;
8310Sstevel@tonic-gate 
8320Sstevel@tonic-gate 	nbytes = mmu_ptob(pages);
8330Sstevel@tonic-gate 
8340Sstevel@tonic-gate 	/*
8350Sstevel@tonic-gate 	 * set length to the original uncompressed data size;
8360Sstevel@tonic-gate 	 * always init cpd_flag to zero
8370Sstevel@tonic-gate 	 */
8380Sstevel@tonic-gate 	dp->cpd_length = nbytes;
8390Sstevel@tonic-gate 	dp->cpd_flag = 0;
8400Sstevel@tonic-gate 
8410Sstevel@tonic-gate #ifdef	DEBUG
8420Sstevel@tonic-gate 	/*
8430Sstevel@tonic-gate 	 * Make a copy of the uncompressed data so we can checksum it.
8440Sstevel@tonic-gate 	 * Compress that copy so the checksum works at the other end
8450Sstevel@tonic-gate 	 */
8460Sstevel@tonic-gate 	cprbcopy(CPR->c_mapping_area, cpr_pagecopy, nbytes);
8470Sstevel@tonic-gate 	dp->cpd_usum = checksum32(cpr_pagecopy, nbytes);
8480Sstevel@tonic-gate 	dp->cpd_flag |= CPD_USUM;
8490Sstevel@tonic-gate 	datap = cpr_pagecopy;
8500Sstevel@tonic-gate #else
8510Sstevel@tonic-gate 	datap = CPR->c_mapping_area;
8520Sstevel@tonic-gate 	dp->cpd_usum = 0;
8530Sstevel@tonic-gate #endif
8540Sstevel@tonic-gate 
8550Sstevel@tonic-gate 	/*
8560Sstevel@tonic-gate 	 * try compressing the raw data to cpr_pagedata;
8570Sstevel@tonic-gate 	 * if there was a size reduction: record the new length,
8580Sstevel@tonic-gate 	 * flag the compression, and point to the compressed data.
8590Sstevel@tonic-gate 	 */
8600Sstevel@tonic-gate 	dp->cpd_csum = 0;
8610Sstevel@tonic-gate 	if (cflag) {
8620Sstevel@tonic-gate 		clen = compress(datap, cpr_pagedata, nbytes);
8630Sstevel@tonic-gate 		if (clen < nbytes) {
8640Sstevel@tonic-gate 			dp->cpd_flag |= CPD_COMPRESS;
8650Sstevel@tonic-gate 			dp->cpd_length = clen;
8660Sstevel@tonic-gate 			datap = cpr_pagedata;
8670Sstevel@tonic-gate #ifdef	DEBUG
8680Sstevel@tonic-gate 			dp->cpd_csum = checksum32(datap, clen);
8690Sstevel@tonic-gate 			dp->cpd_flag |= CPD_CSUM;
8700Sstevel@tonic-gate 
8710Sstevel@tonic-gate 			/*
8720Sstevel@tonic-gate 			 * decompress the data back to a scratch area
8730Sstevel@tonic-gate 			 * and compare the new checksum with the original
8740Sstevel@tonic-gate 			 * checksum to verify the compression.
8750Sstevel@tonic-gate 			 */
8760Sstevel@tonic-gate 			bzero(cpr_pagecopy, sizeof (cpr_pagecopy));
8770Sstevel@tonic-gate 			len = decompress(datap, cpr_pagecopy,
8780Sstevel@tonic-gate 			    clen, sizeof (cpr_pagecopy));
8790Sstevel@tonic-gate 			test_sum = checksum32(cpr_pagecopy, len);
8800Sstevel@tonic-gate 			ASSERT(test_sum == dp->cpd_usum);
8810Sstevel@tonic-gate #endif
8820Sstevel@tonic-gate 		}
8830Sstevel@tonic-gate 	}
8840Sstevel@tonic-gate 
8850Sstevel@tonic-gate 	return (datap);
8860Sstevel@tonic-gate }
8870Sstevel@tonic-gate 
8880Sstevel@tonic-gate 
8890Sstevel@tonic-gate /*
8900Sstevel@tonic-gate  * 1. Prepare cpr page descriptor and write it to file
8910Sstevel@tonic-gate  * 2. Compress page data and write it out
8920Sstevel@tonic-gate  */
8930Sstevel@tonic-gate static int
8940Sstevel@tonic-gate cpr_compress_and_write(vnode_t *vp, uint_t va, pfn_t pfn, pgcnt_t npg)
8950Sstevel@tonic-gate {
8960Sstevel@tonic-gate 	int error = 0;
8970Sstevel@tonic-gate 	char *datap;
8980Sstevel@tonic-gate 	cpd_t cpd;	/* cpr page descriptor */
8990Sstevel@tonic-gate 	extern void i_cpr_mapin(caddr_t, uint_t, pfn_t);
9000Sstevel@tonic-gate 	extern void i_cpr_mapout(caddr_t, uint_t);
9010Sstevel@tonic-gate 
9020Sstevel@tonic-gate 	i_cpr_mapin(CPR->c_mapping_area, npg, pfn);
9030Sstevel@tonic-gate 
904931Smathue 	DEBUG3(errp("mapped-in %ld pages, vaddr 0x%p, pfn 0x%lx\n",
9050Sstevel@tonic-gate 		npg, CPR->c_mapping_area, pfn));
9060Sstevel@tonic-gate 
9070Sstevel@tonic-gate 	/*
9080Sstevel@tonic-gate 	 * Fill cpr page descriptor.
9090Sstevel@tonic-gate 	 */
9100Sstevel@tonic-gate 	cpd.cpd_magic = (uint_t)CPR_PAGE_MAGIC;
9110Sstevel@tonic-gate 	cpd.cpd_pfn = pfn;
9120Sstevel@tonic-gate 	cpd.cpd_pages = npg;
9130Sstevel@tonic-gate 
9140Sstevel@tonic-gate 	STAT->cs_dumped_statefsz += mmu_ptob(npg);
9150Sstevel@tonic-gate 
9160Sstevel@tonic-gate 	datap = cpr_compress_pages(&cpd, npg, CPR->c_flags & C_COMPRESSING);
9170Sstevel@tonic-gate 
9180Sstevel@tonic-gate 	/* Write cpr page descriptor */
9190Sstevel@tonic-gate 	error = cpr_write(vp, (caddr_t)&cpd, sizeof (cpd_t));
9200Sstevel@tonic-gate 
9210Sstevel@tonic-gate 	/* Write compressed page data */
9220Sstevel@tonic-gate 	error = cpr_write(vp, (caddr_t)datap, cpd.cpd_length);
9230Sstevel@tonic-gate 
9240Sstevel@tonic-gate 	/*
9250Sstevel@tonic-gate 	 * Unmap the pages for tlb and vac flushing
9260Sstevel@tonic-gate 	 */
9270Sstevel@tonic-gate 	i_cpr_mapout(CPR->c_mapping_area, npg);
9280Sstevel@tonic-gate 
9290Sstevel@tonic-gate 	if (error) {
9300Sstevel@tonic-gate 		DEBUG1(errp("cpr_compress_and_write: vp 0x%p va 0x%x ",
9310Sstevel@tonic-gate 		    vp, va));
9320Sstevel@tonic-gate 		DEBUG1(errp("pfn 0x%lx blk %d err %d\n",
9330Sstevel@tonic-gate 		    pfn, cpr_file_bn, error));
9340Sstevel@tonic-gate 	} else {
9350Sstevel@tonic-gate 		cpr_regular_pgs_dumped += npg;
9360Sstevel@tonic-gate 	}
9370Sstevel@tonic-gate 
9380Sstevel@tonic-gate 	return (error);
9390Sstevel@tonic-gate }
9400Sstevel@tonic-gate 
9410Sstevel@tonic-gate 
9420Sstevel@tonic-gate int
9430Sstevel@tonic-gate cpr_write(vnode_t *vp, caddr_t buffer, size_t size)
9440Sstevel@tonic-gate {
9450Sstevel@tonic-gate 	caddr_t	fromp = buffer;
9460Sstevel@tonic-gate 	size_t bytes, wbytes;
9470Sstevel@tonic-gate 	int error;
9480Sstevel@tonic-gate 
9490Sstevel@tonic-gate 	if (cpr_dev_space == 0) {
9500Sstevel@tonic-gate 		if (vp->v_type == VBLK) {
9510Sstevel@tonic-gate 			cpr_dev_space = cpr_get_devsize(vp->v_rdev);
9520Sstevel@tonic-gate 			ASSERT(cpr_dev_space);
9530Sstevel@tonic-gate 		} else
9540Sstevel@tonic-gate 			cpr_dev_space = 1;	/* not used in this case */
9550Sstevel@tonic-gate 	}
9560Sstevel@tonic-gate 
9570Sstevel@tonic-gate 	/*
9580Sstevel@tonic-gate 	 * break the write into multiple part if request is large,
9590Sstevel@tonic-gate 	 * calculate count up to buf page boundary, then write it out.
9600Sstevel@tonic-gate 	 * repeat until done.
9610Sstevel@tonic-gate 	 */
9620Sstevel@tonic-gate 	while (size) {
9630Sstevel@tonic-gate 		bytes = MIN(size, cpr_buf_end - cpr_wptr);
9640Sstevel@tonic-gate 		cprbcopy(fromp, cpr_wptr, bytes);
9650Sstevel@tonic-gate 		cpr_wptr += bytes;
9660Sstevel@tonic-gate 		fromp += bytes;
9670Sstevel@tonic-gate 		size -= bytes;
9680Sstevel@tonic-gate 		if (cpr_wptr < cpr_buf_end)
9690Sstevel@tonic-gate 			return (0);	/* buffer not full yet */
9700Sstevel@tonic-gate 		ASSERT(cpr_wptr == cpr_buf_end);
9710Sstevel@tonic-gate 
9720Sstevel@tonic-gate 		wbytes = dbtob(cpr_file_bn + cpr_buf_blocks);
9730Sstevel@tonic-gate 		if (vp->v_type == VBLK) {
9740Sstevel@tonic-gate 			if (wbytes > cpr_dev_space)
9750Sstevel@tonic-gate 				return (ENOSPC);
9760Sstevel@tonic-gate 		} else {
9770Sstevel@tonic-gate 			if (wbytes > VTOI(vp)->i_size)
9780Sstevel@tonic-gate 				return (ENOSPC);
9790Sstevel@tonic-gate 		}
9800Sstevel@tonic-gate 
981931Smathue 		DEBUG3(errp("cpr_write: frmp=%p wptr=%p cnt=%lx...",
9820Sstevel@tonic-gate 			fromp, cpr_wptr, bytes));
9830Sstevel@tonic-gate 		/*
9840Sstevel@tonic-gate 		 * cross check, this should not happen!
9850Sstevel@tonic-gate 		 */
9860Sstevel@tonic-gate 		if (cpr_disk_writes_ok == 0) {
9870Sstevel@tonic-gate 			errp("cpr_write: disk write too early!\n");
9880Sstevel@tonic-gate 			return (EINVAL);
9890Sstevel@tonic-gate 		}
9900Sstevel@tonic-gate 
9910Sstevel@tonic-gate 		do_polled_io = 1;
9920Sstevel@tonic-gate 		error = VOP_DUMP(vp, cpr_buf, cpr_file_bn, cpr_buf_blocks);
9930Sstevel@tonic-gate 		do_polled_io = 0;
9940Sstevel@tonic-gate 		DEBUG3(errp("done\n"));
9950Sstevel@tonic-gate 
9960Sstevel@tonic-gate 		STAT->cs_real_statefsz += cpr_buf_size;
9970Sstevel@tonic-gate 
9980Sstevel@tonic-gate 		if (error) {
9990Sstevel@tonic-gate 			cpr_err(CE_WARN, "cpr_write error %d", error);
10000Sstevel@tonic-gate 			return (error);
10010Sstevel@tonic-gate 		}
10020Sstevel@tonic-gate 		cpr_file_bn += cpr_buf_blocks;	/* Increment block count */
10030Sstevel@tonic-gate 		cpr_wptr = cpr_buf;		/* back to top of buffer */
10040Sstevel@tonic-gate 	}
10050Sstevel@tonic-gate 	return (0);
10060Sstevel@tonic-gate }
10070Sstevel@tonic-gate 
10080Sstevel@tonic-gate 
10090Sstevel@tonic-gate int
10100Sstevel@tonic-gate cpr_flush_write(vnode_t *vp)
10110Sstevel@tonic-gate {
10120Sstevel@tonic-gate 	int	nblk;
10130Sstevel@tonic-gate 	int	error;
10140Sstevel@tonic-gate 
10150Sstevel@tonic-gate 	/*
10160Sstevel@tonic-gate 	 * Calculate remaining blocks in buffer, rounded up to nearest
10170Sstevel@tonic-gate 	 * disk block
10180Sstevel@tonic-gate 	 */
10190Sstevel@tonic-gate 	nblk = btod(cpr_wptr - cpr_buf);
10200Sstevel@tonic-gate 
10210Sstevel@tonic-gate 	do_polled_io = 1;
10220Sstevel@tonic-gate 	error = VOP_DUMP(vp, (caddr_t)cpr_buf, cpr_file_bn, nblk);
10230Sstevel@tonic-gate 	do_polled_io = 0;
10240Sstevel@tonic-gate 
10250Sstevel@tonic-gate 	cpr_file_bn += nblk;
10260Sstevel@tonic-gate 	if (error)
10270Sstevel@tonic-gate 		DEBUG2(errp("cpr_flush_write: error (%d)\n", error));
10280Sstevel@tonic-gate 	return (error);
10290Sstevel@tonic-gate }
10300Sstevel@tonic-gate 
10310Sstevel@tonic-gate void
10320Sstevel@tonic-gate cpr_clear_bitmaps(void)
10330Sstevel@tonic-gate {
10340Sstevel@tonic-gate 	cbd_t *dp;
10350Sstevel@tonic-gate 
10360Sstevel@tonic-gate 	for (dp = CPR->c_bmda; dp->cbd_size; dp++) {
10370Sstevel@tonic-gate 		bzero((void *)dp->cbd_reg_bitmap,
10380Sstevel@tonic-gate 		    (size_t)dp->cbd_size * 2);
10390Sstevel@tonic-gate 	}
10400Sstevel@tonic-gate 	DEBUG7(errp("\ncleared reg and vlt bitmaps\n"));
10410Sstevel@tonic-gate }
10420Sstevel@tonic-gate 
10430Sstevel@tonic-gate int
10440Sstevel@tonic-gate cpr_contig_pages(vnode_t *vp, int flag)
10450Sstevel@tonic-gate {
10460Sstevel@tonic-gate 	int chunks = 0, error = 0;
10470Sstevel@tonic-gate 	pgcnt_t i, j, totbit;
10480Sstevel@tonic-gate 	pfn_t spfn;
10490Sstevel@tonic-gate 	cbd_t *dp;
10500Sstevel@tonic-gate 	uint_t	spin_cnt = 0;
10510Sstevel@tonic-gate 	extern	int i_cpr_compress_and_save();
10520Sstevel@tonic-gate 
10530Sstevel@tonic-gate 	for (dp = CPR->c_bmda; dp->cbd_size; dp++) {
10540Sstevel@tonic-gate 		spfn = dp->cbd_spfn;
10550Sstevel@tonic-gate 		totbit = BTOb(dp->cbd_size);
10560Sstevel@tonic-gate 		i = 0; /* Beginning of bitmap */
10570Sstevel@tonic-gate 		j = 0;
10580Sstevel@tonic-gate 		while (i < totbit) {
10590Sstevel@tonic-gate 			while ((j < CPR_MAXCONTIG) && ((j + i) < totbit)) {
10600Sstevel@tonic-gate 				if (isset((char *)dp->cbd_reg_bitmap, j+i))
10610Sstevel@tonic-gate 					j++;
10620Sstevel@tonic-gate 				else /* not contiguous anymore */
10630Sstevel@tonic-gate 					break;
10640Sstevel@tonic-gate 			}
10650Sstevel@tonic-gate 
10660Sstevel@tonic-gate 			if (j) {
10670Sstevel@tonic-gate 				chunks++;
10680Sstevel@tonic-gate 				if (flag == SAVE_TO_STORAGE) {
10690Sstevel@tonic-gate 					error = i_cpr_compress_and_save(
10700Sstevel@tonic-gate 					    chunks, spfn + i, j);
10710Sstevel@tonic-gate 					if (error)
10720Sstevel@tonic-gate 						return (error);
10730Sstevel@tonic-gate 				} else if (flag == WRITE_TO_STATEFILE) {
10740Sstevel@tonic-gate 					error = cpr_compress_and_write(vp, 0,
10750Sstevel@tonic-gate 					    spfn + i, j);
10760Sstevel@tonic-gate 					if (error)
10770Sstevel@tonic-gate 						return (error);
10780Sstevel@tonic-gate 					else {
10790Sstevel@tonic-gate 						spin_cnt++;
10800Sstevel@tonic-gate 						if ((spin_cnt & 0x5F) == 1)
10810Sstevel@tonic-gate 							cpr_spinning_bar();
10820Sstevel@tonic-gate 					}
10830Sstevel@tonic-gate 				}
10840Sstevel@tonic-gate 			}
10850Sstevel@tonic-gate 
10860Sstevel@tonic-gate 			i += j;
10870Sstevel@tonic-gate 			if (j != CPR_MAXCONTIG) {
10880Sstevel@tonic-gate 				/* Stopped on a non-tagged page */
10890Sstevel@tonic-gate 				i++;
10900Sstevel@tonic-gate 			}
10910Sstevel@tonic-gate 
10920Sstevel@tonic-gate 			j = 0;
10930Sstevel@tonic-gate 		}
10940Sstevel@tonic-gate 	}
10950Sstevel@tonic-gate 
10960Sstevel@tonic-gate 	if (flag == STORAGE_DESC_ALLOC)
10970Sstevel@tonic-gate 		return (chunks);
10980Sstevel@tonic-gate 	else
10990Sstevel@tonic-gate 		return (0);
11000Sstevel@tonic-gate }
11010Sstevel@tonic-gate 
11020Sstevel@tonic-gate 
11030Sstevel@tonic-gate void
11040Sstevel@tonic-gate cpr_show_range(caddr_t vaddr, size_t size,
11050Sstevel@tonic-gate     int mapflag, bitfunc_t bitfunc, pgcnt_t count)
11060Sstevel@tonic-gate {
11070Sstevel@tonic-gate 	char *action, *bname;
11080Sstevel@tonic-gate 
11090Sstevel@tonic-gate 	bname = (mapflag == REGULAR_BITMAP) ? "regular" : "volatile";
11100Sstevel@tonic-gate 	if (bitfunc == cpr_setbit)
11110Sstevel@tonic-gate 		action = "tag";
11120Sstevel@tonic-gate 	else if (bitfunc == cpr_clrbit)
11130Sstevel@tonic-gate 		action = "untag";
11140Sstevel@tonic-gate 	else
11150Sstevel@tonic-gate 		action = "none";
11160Sstevel@tonic-gate 	errp("range (0x%p, 0x%p), %s bitmap, %s %ld\n",
11170Sstevel@tonic-gate 	    vaddr, vaddr + size, bname, action, count);
11180Sstevel@tonic-gate }
11190Sstevel@tonic-gate 
11200Sstevel@tonic-gate 
11210Sstevel@tonic-gate pgcnt_t
11220Sstevel@tonic-gate cpr_count_pages(caddr_t sva, size_t size,
11230Sstevel@tonic-gate     int mapflag, bitfunc_t bitfunc, int showrange)
11240Sstevel@tonic-gate {
11250Sstevel@tonic-gate 	caddr_t	va, eva;
11260Sstevel@tonic-gate 	pfn_t pfn;
11270Sstevel@tonic-gate 	pgcnt_t count = 0;
11280Sstevel@tonic-gate 
11290Sstevel@tonic-gate 	eva = sva + PAGE_ROUNDUP(size);
11300Sstevel@tonic-gate 	for (va = sva; va < eva; va += MMU_PAGESIZE) {
11310Sstevel@tonic-gate 		pfn = va_to_pfn(va);
11320Sstevel@tonic-gate 		if (pfn != PFN_INVALID && pf_is_memory(pfn)) {
11330Sstevel@tonic-gate 			if ((*bitfunc)(pfn, mapflag) == 0)
11340Sstevel@tonic-gate 				count++;
11350Sstevel@tonic-gate 		}
11360Sstevel@tonic-gate 	}
11370Sstevel@tonic-gate 
11380Sstevel@tonic-gate 	if ((cpr_debug & LEVEL7) && showrange == DBG_SHOWRANGE)
11390Sstevel@tonic-gate 		cpr_show_range(sva, size, mapflag, bitfunc, count);
11400Sstevel@tonic-gate 
11410Sstevel@tonic-gate 	return (count);
11420Sstevel@tonic-gate }
11430Sstevel@tonic-gate 
11440Sstevel@tonic-gate 
11450Sstevel@tonic-gate pgcnt_t
11460Sstevel@tonic-gate cpr_count_volatile_pages(int mapflag, bitfunc_t bitfunc)
11470Sstevel@tonic-gate {
11480Sstevel@tonic-gate 	pgcnt_t count = 0;
11490Sstevel@tonic-gate 
11500Sstevel@tonic-gate 	if (cpr_buf) {
11510Sstevel@tonic-gate 		count += cpr_count_pages(cpr_buf, cpr_buf_size,
11520Sstevel@tonic-gate 		    mapflag, bitfunc, DBG_SHOWRANGE);
11530Sstevel@tonic-gate 	}
11540Sstevel@tonic-gate 	if (cpr_pagedata) {
11550Sstevel@tonic-gate 		count += cpr_count_pages(cpr_pagedata, cpr_pagedata_size,
11560Sstevel@tonic-gate 		    mapflag, bitfunc, DBG_SHOWRANGE);
11570Sstevel@tonic-gate 	}
11580Sstevel@tonic-gate 	count += i_cpr_count_storage_pages(mapflag, bitfunc);
11590Sstevel@tonic-gate 
11600Sstevel@tonic-gate 	DEBUG7(errp("cpr_count_vpages: %ld pages, 0x%lx bytes\n",
11610Sstevel@tonic-gate 	    count, mmu_ptob(count)));
11620Sstevel@tonic-gate 	return (count);
11630Sstevel@tonic-gate }
11640Sstevel@tonic-gate 
11650Sstevel@tonic-gate 
11660Sstevel@tonic-gate static int
11670Sstevel@tonic-gate cpr_dump_regular_pages(vnode_t *vp)
11680Sstevel@tonic-gate {
11690Sstevel@tonic-gate 	int error;
11700Sstevel@tonic-gate 
11710Sstevel@tonic-gate 	cpr_regular_pgs_dumped = 0;
11720Sstevel@tonic-gate 	error = cpr_contig_pages(vp, WRITE_TO_STATEFILE);
11730Sstevel@tonic-gate 	if (!error)
11740Sstevel@tonic-gate 		DEBUG7(errp("cpr_dump_regular_pages() done.\n"));
11750Sstevel@tonic-gate 	return (error);
11760Sstevel@tonic-gate }
1177