10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
53290Sjohansen * Common Development and Distribution License (the "License").
63290Sjohansen * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
22*11474SJonathan.Adams@Sun.COM * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
230Sstevel@tonic-gate * Use is subject to license terms.
240Sstevel@tonic-gate */
250Sstevel@tonic-gate
260Sstevel@tonic-gate /*
270Sstevel@tonic-gate * Fill in and write out the cpr state file
280Sstevel@tonic-gate * 1. Allocate and write headers, ELF and cpr dump header
290Sstevel@tonic-gate * 2. Allocate bitmaps according to phys_install
300Sstevel@tonic-gate * 3. Tag kernel pages into corresponding bitmap
310Sstevel@tonic-gate * 4. Write bitmaps to state file
320Sstevel@tonic-gate * 5. Write actual physical page data to state file
330Sstevel@tonic-gate */
340Sstevel@tonic-gate
350Sstevel@tonic-gate #include <sys/types.h>
360Sstevel@tonic-gate #include <sys/systm.h>
370Sstevel@tonic-gate #include <sys/vm.h>
380Sstevel@tonic-gate #include <sys/memlist.h>
390Sstevel@tonic-gate #include <sys/kmem.h>
400Sstevel@tonic-gate #include <sys/vnode.h>
410Sstevel@tonic-gate #include <sys/fs/ufs_inode.h>
420Sstevel@tonic-gate #include <sys/errno.h>
430Sstevel@tonic-gate #include <sys/cmn_err.h>
440Sstevel@tonic-gate #include <sys/debug.h>
450Sstevel@tonic-gate #include <vm/page.h>
460Sstevel@tonic-gate #include <vm/seg.h>
470Sstevel@tonic-gate #include <vm/seg_kmem.h>
480Sstevel@tonic-gate #include <vm/seg_kpm.h>
490Sstevel@tonic-gate #include <vm/hat.h>
500Sstevel@tonic-gate #include <sys/cpr.h>
510Sstevel@tonic-gate #include <sys/conf.h>
520Sstevel@tonic-gate #include <sys/ddi.h>
530Sstevel@tonic-gate #include <sys/panic.h>
540Sstevel@tonic-gate #include <sys/thread.h>
555295Srandyf #include <sys/note.h>
560Sstevel@tonic-gate
570Sstevel@tonic-gate /* Local defines and variables */
580Sstevel@tonic-gate #define BTOb(bytes) ((bytes) << 3) /* Bytes to bits, log2(NBBY) */
590Sstevel@tonic-gate #define bTOB(bits) ((bits) >> 3) /* bits to Bytes, log2(NBBY) */
600Sstevel@tonic-gate
615295Srandyf #if defined(__sparc)
620Sstevel@tonic-gate static uint_t cpr_pages_tobe_dumped;
630Sstevel@tonic-gate static uint_t cpr_regular_pgs_dumped;
640Sstevel@tonic-gate static int cpr_dump_regular_pages(vnode_t *);
650Sstevel@tonic-gate static int cpr_count_upages(int, bitfunc_t);
660Sstevel@tonic-gate static int cpr_compress_and_write(vnode_t *, uint_t, pfn_t, pgcnt_t);
675295Srandyf #endif
685295Srandyf
690Sstevel@tonic-gate int cpr_flush_write(vnode_t *);
700Sstevel@tonic-gate
710Sstevel@tonic-gate int cpr_contig_pages(vnode_t *, int);
720Sstevel@tonic-gate
730Sstevel@tonic-gate void cpr_clear_bitmaps();
740Sstevel@tonic-gate
750Sstevel@tonic-gate extern size_t cpr_get_devsize(dev_t);
760Sstevel@tonic-gate extern int i_cpr_dump_setup(vnode_t *);
770Sstevel@tonic-gate extern int i_cpr_blockzero(char *, char **, int *, vnode_t *);
780Sstevel@tonic-gate extern int cpr_test_mode;
795295Srandyf int cpr_setbit(pfn_t, int);
805295Srandyf int cpr_clrbit(pfn_t, int);
810Sstevel@tonic-gate
820Sstevel@tonic-gate ctrm_t cpr_term;
830Sstevel@tonic-gate
840Sstevel@tonic-gate char *cpr_buf, *cpr_buf_end;
850Sstevel@tonic-gate int cpr_buf_blocks; /* size of cpr_buf in blocks */
860Sstevel@tonic-gate size_t cpr_buf_size; /* size of cpr_buf in bytes */
870Sstevel@tonic-gate size_t cpr_bitmap_size;
880Sstevel@tonic-gate int cpr_nbitmaps;
890Sstevel@tonic-gate
900Sstevel@tonic-gate char *cpr_pagedata; /* page buffer for compression / tmp copy */
910Sstevel@tonic-gate size_t cpr_pagedata_size; /* page buffer size in bytes */
920Sstevel@tonic-gate
935295Srandyf #if defined(__sparc)
940Sstevel@tonic-gate static char *cpr_wptr; /* keep track of where to write to next */
950Sstevel@tonic-gate static int cpr_file_bn; /* cpr state-file block offset */
960Sstevel@tonic-gate static int cpr_disk_writes_ok;
970Sstevel@tonic-gate static size_t cpr_dev_space = 0;
985295Srandyf #endif
990Sstevel@tonic-gate
1000Sstevel@tonic-gate char cpr_pagecopy[CPR_MAXCONTIG * MMU_PAGESIZE];
1010Sstevel@tonic-gate
1025295Srandyf #if defined(__sparc)
1030Sstevel@tonic-gate /*
1040Sstevel@tonic-gate * On some platforms bcopy may modify the thread structure
1050Sstevel@tonic-gate * during bcopy (eg, to prevent cpu migration). If the
1060Sstevel@tonic-gate * range we are currently writing out includes our own
1070Sstevel@tonic-gate * thread structure then it will be snapshotted by bcopy
1080Sstevel@tonic-gate * including those modified members - and the updates made
1090Sstevel@tonic-gate * on exit from bcopy will no longer be seen when we later
1100Sstevel@tonic-gate * restore the mid-bcopy kthread_t. So if the range we
1110Sstevel@tonic-gate * need to copy overlaps with our thread structure we will
1120Sstevel@tonic-gate * use a simple byte copy.
1130Sstevel@tonic-gate */
1140Sstevel@tonic-gate void
cprbcopy(void * from,void * to,size_t bytes)1150Sstevel@tonic-gate cprbcopy(void *from, void *to, size_t bytes)
1160Sstevel@tonic-gate {
1170Sstevel@tonic-gate extern int curthreadremapped;
1180Sstevel@tonic-gate caddr_t kthrend;
1190Sstevel@tonic-gate
1200Sstevel@tonic-gate kthrend = (caddr_t)curthread + sizeof (kthread_t) - 1;
1210Sstevel@tonic-gate if (curthreadremapped || (kthrend >= (caddr_t)from &&
1220Sstevel@tonic-gate kthrend < (caddr_t)from + bytes + sizeof (kthread_t) - 1)) {
1230Sstevel@tonic-gate caddr_t src = from, dst = to;
1240Sstevel@tonic-gate
1250Sstevel@tonic-gate while (bytes-- > 0)
1260Sstevel@tonic-gate *dst++ = *src++;
1270Sstevel@tonic-gate } else {
1280Sstevel@tonic-gate bcopy(from, to, bytes);
1290Sstevel@tonic-gate }
1300Sstevel@tonic-gate }
1310Sstevel@tonic-gate
1320Sstevel@tonic-gate /*
1330Sstevel@tonic-gate * Allocate pages for buffers used in writing out the statefile
1340Sstevel@tonic-gate */
1350Sstevel@tonic-gate static int
cpr_alloc_bufs(void)1360Sstevel@tonic-gate cpr_alloc_bufs(void)
1370Sstevel@tonic-gate {
1380Sstevel@tonic-gate char *allocerr = "Unable to allocate memory for cpr buffer";
1390Sstevel@tonic-gate size_t size;
1400Sstevel@tonic-gate
1410Sstevel@tonic-gate /*
1420Sstevel@tonic-gate * set the cpr write buffer size to at least the historic
1430Sstevel@tonic-gate * size (128k) or large enough to store the both the early
1440Sstevel@tonic-gate * set of statefile structures (well under 0x800) plus the
1450Sstevel@tonic-gate * bitmaps, and roundup to the next pagesize.
1460Sstevel@tonic-gate */
1470Sstevel@tonic-gate size = PAGE_ROUNDUP(dbtob(4) + cpr_bitmap_size);
1480Sstevel@tonic-gate cpr_buf_size = MAX(size, CPRBUFSZ);
1490Sstevel@tonic-gate cpr_buf_blocks = btodb(cpr_buf_size);
1500Sstevel@tonic-gate cpr_buf = kmem_alloc(cpr_buf_size, KM_NOSLEEP);
1510Sstevel@tonic-gate if (cpr_buf == NULL) {
1520Sstevel@tonic-gate cpr_err(CE_WARN, allocerr);
1530Sstevel@tonic-gate return (ENOMEM);
1540Sstevel@tonic-gate }
1550Sstevel@tonic-gate cpr_buf_end = cpr_buf + cpr_buf_size;
1560Sstevel@tonic-gate
1570Sstevel@tonic-gate cpr_pagedata_size = mmu_ptob(CPR_MAXCONTIG + 1);
1580Sstevel@tonic-gate cpr_pagedata = kmem_alloc(cpr_pagedata_size, KM_NOSLEEP);
1590Sstevel@tonic-gate if (cpr_pagedata == NULL) {
1600Sstevel@tonic-gate kmem_free(cpr_buf, cpr_buf_size);
1610Sstevel@tonic-gate cpr_buf = NULL;
1620Sstevel@tonic-gate cpr_err(CE_WARN, allocerr);
1630Sstevel@tonic-gate return (ENOMEM);
1640Sstevel@tonic-gate }
1650Sstevel@tonic-gate
1660Sstevel@tonic-gate return (0);
1670Sstevel@tonic-gate }
1680Sstevel@tonic-gate
1690Sstevel@tonic-gate
1700Sstevel@tonic-gate /*
1710Sstevel@tonic-gate * Set bitmap size in bytes based on phys_install.
1720Sstevel@tonic-gate */
1730Sstevel@tonic-gate void
cpr_set_bitmap_size(void)1740Sstevel@tonic-gate cpr_set_bitmap_size(void)
1750Sstevel@tonic-gate {
1760Sstevel@tonic-gate struct memlist *pmem;
1770Sstevel@tonic-gate size_t size = 0;
1780Sstevel@tonic-gate
1790Sstevel@tonic-gate memlist_read_lock();
180*11474SJonathan.Adams@Sun.COM for (pmem = phys_install; pmem; pmem = pmem->ml_next)
181*11474SJonathan.Adams@Sun.COM size += pmem->ml_size;
1820Sstevel@tonic-gate memlist_read_unlock();
1830Sstevel@tonic-gate cpr_bitmap_size = BITMAP_BYTES(size);
1840Sstevel@tonic-gate }
1850Sstevel@tonic-gate
1860Sstevel@tonic-gate
1870Sstevel@tonic-gate /*
1880Sstevel@tonic-gate * CPR dump header contains the following information:
1890Sstevel@tonic-gate * 1. header magic -- unique to cpr state file
1900Sstevel@tonic-gate * 2. kernel return pc & ppn for resume
1910Sstevel@tonic-gate * 3. current thread info
1920Sstevel@tonic-gate * 4. debug level and test mode
1930Sstevel@tonic-gate * 5. number of bitmaps allocated
1940Sstevel@tonic-gate * 6. number of page records
1950Sstevel@tonic-gate */
1960Sstevel@tonic-gate static int
cpr_write_header(vnode_t * vp)1970Sstevel@tonic-gate cpr_write_header(vnode_t *vp)
1980Sstevel@tonic-gate {
1990Sstevel@tonic-gate extern ushort_t cpr_mach_type;
2000Sstevel@tonic-gate struct cpr_dump_desc cdump;
2010Sstevel@tonic-gate pgcnt_t bitmap_pages;
2020Sstevel@tonic-gate pgcnt_t kpages, vpages, upages;
2035295Srandyf pgcnt_t cpr_count_kpages(int mapflag, bitfunc_t bitfunc);
2040Sstevel@tonic-gate
2050Sstevel@tonic-gate cdump.cdd_magic = (uint_t)CPR_DUMP_MAGIC;
2060Sstevel@tonic-gate cdump.cdd_version = CPR_VERSION;
2070Sstevel@tonic-gate cdump.cdd_machine = cpr_mach_type;
2080Sstevel@tonic-gate cdump.cdd_debug = cpr_debug;
2090Sstevel@tonic-gate cdump.cdd_test_mode = cpr_test_mode;
2100Sstevel@tonic-gate cdump.cdd_bitmaprec = cpr_nbitmaps;
2110Sstevel@tonic-gate
2120Sstevel@tonic-gate cpr_clear_bitmaps();
2130Sstevel@tonic-gate
2140Sstevel@tonic-gate /*
2150Sstevel@tonic-gate * Remember how many pages we plan to save to statefile.
2160Sstevel@tonic-gate * This information will be used for sanity checks.
2170Sstevel@tonic-gate * Untag those pages that will not be saved to statefile.
2180Sstevel@tonic-gate */
2190Sstevel@tonic-gate kpages = cpr_count_kpages(REGULAR_BITMAP, cpr_setbit);
2200Sstevel@tonic-gate vpages = cpr_count_volatile_pages(REGULAR_BITMAP, cpr_clrbit);
2210Sstevel@tonic-gate upages = cpr_count_upages(REGULAR_BITMAP, cpr_setbit);
2220Sstevel@tonic-gate cdump.cdd_dumppgsize = kpages - vpages + upages;
2230Sstevel@tonic-gate cpr_pages_tobe_dumped = cdump.cdd_dumppgsize;
2243446Smrj CPR_DEBUG(CPR_DEBUG7,
2250Sstevel@tonic-gate "\ncpr_write_header: kpages %ld - vpages %ld + upages %ld = %d\n",
2263446Smrj kpages, vpages, upages, cdump.cdd_dumppgsize);
2270Sstevel@tonic-gate
2280Sstevel@tonic-gate /*
2290Sstevel@tonic-gate * Some pages contain volatile data (cpr_buf and storage area for
2300Sstevel@tonic-gate * sensitive kpages), which are no longer needed after the statefile
2310Sstevel@tonic-gate * is dumped to disk. We have already untagged them from regular
2320Sstevel@tonic-gate * bitmaps. Now tag them into the volatile bitmaps. The pages in
2330Sstevel@tonic-gate * volatile bitmaps will be claimed during resume, and the resumed
2340Sstevel@tonic-gate * kernel will free them.
2350Sstevel@tonic-gate */
2360Sstevel@tonic-gate (void) cpr_count_volatile_pages(VOLATILE_BITMAP, cpr_setbit);
2370Sstevel@tonic-gate
2380Sstevel@tonic-gate bitmap_pages = mmu_btopr(cpr_bitmap_size);
2390Sstevel@tonic-gate
2400Sstevel@tonic-gate /*
2410Sstevel@tonic-gate * Export accurate statefile size for statefile allocation retry.
2420Sstevel@tonic-gate * statefile_size = all the headers + total pages +
2430Sstevel@tonic-gate * number of pages used by the bitmaps.
2440Sstevel@tonic-gate * Roundup will be done in the file allocation code.
2450Sstevel@tonic-gate */
2460Sstevel@tonic-gate STAT->cs_nocomp_statefsz = sizeof (cdd_t) + sizeof (cmd_t) +
2475295Srandyf (sizeof (cbd_t) * cdump.cdd_bitmaprec) +
2485295Srandyf (sizeof (cpd_t) * cdump.cdd_dumppgsize) +
2495295Srandyf mmu_ptob(cdump.cdd_dumppgsize + bitmap_pages);
2500Sstevel@tonic-gate
2510Sstevel@tonic-gate /*
2520Sstevel@tonic-gate * If the estimated statefile is not big enough,
2530Sstevel@tonic-gate * go retry now to save un-necessary operations.
2540Sstevel@tonic-gate */
2550Sstevel@tonic-gate if (!(CPR->c_flags & C_COMPRESSING) &&
2565295Srandyf (STAT->cs_nocomp_statefsz > STAT->cs_est_statefsz)) {
2573446Smrj if (cpr_debug & (CPR_DEBUG1 | CPR_DEBUG7))
2585295Srandyf prom_printf("cpr_write_header: "
2595295Srandyf "STAT->cs_nocomp_statefsz > "
2605295Srandyf "STAT->cs_est_statefsz\n");
2610Sstevel@tonic-gate return (ENOSPC);
2620Sstevel@tonic-gate }
2630Sstevel@tonic-gate
2640Sstevel@tonic-gate /* now write cpr dump descriptor */
2650Sstevel@tonic-gate return (cpr_write(vp, (caddr_t)&cdump, sizeof (cdd_t)));
2660Sstevel@tonic-gate }
2670Sstevel@tonic-gate
2680Sstevel@tonic-gate
2690Sstevel@tonic-gate /*
2700Sstevel@tonic-gate * CPR dump tail record contains the following information:
2710Sstevel@tonic-gate * 1. header magic -- unique to cpr state file
2720Sstevel@tonic-gate * 2. all misc info that needs to be passed to cprboot or resumed kernel
2730Sstevel@tonic-gate */
2740Sstevel@tonic-gate static int
cpr_write_terminator(vnode_t * vp)2750Sstevel@tonic-gate cpr_write_terminator(vnode_t *vp)
2760Sstevel@tonic-gate {
2770Sstevel@tonic-gate cpr_term.magic = (uint_t)CPR_TERM_MAGIC;
2780Sstevel@tonic-gate cpr_term.va = (cpr_ptr)&cpr_term;
2790Sstevel@tonic-gate cpr_term.pfn = (cpr_ext)va_to_pfn(&cpr_term);
2800Sstevel@tonic-gate
2810Sstevel@tonic-gate /* count the last one (flush) */
2820Sstevel@tonic-gate cpr_term.real_statef_size = STAT->cs_real_statefsz +
2835295Srandyf btod(cpr_wptr - cpr_buf) * DEV_BSIZE;
2840Sstevel@tonic-gate
2853446Smrj CPR_DEBUG(CPR_DEBUG9, "cpr_dump: Real Statefile Size: %ld\n",
2865295Srandyf STAT->cs_real_statefsz);
2870Sstevel@tonic-gate
2880Sstevel@tonic-gate cpr_tod_get(&cpr_term.tm_shutdown);
2890Sstevel@tonic-gate
2900Sstevel@tonic-gate return (cpr_write(vp, (caddr_t)&cpr_term, sizeof (cpr_term)));
2910Sstevel@tonic-gate }
2920Sstevel@tonic-gate
2930Sstevel@tonic-gate /*
2940Sstevel@tonic-gate * Write bitmap descriptor array, followed by merged bitmaps.
2950Sstevel@tonic-gate */
2960Sstevel@tonic-gate static int
cpr_write_bitmap(vnode_t * vp)2970Sstevel@tonic-gate cpr_write_bitmap(vnode_t *vp)
2980Sstevel@tonic-gate {
2990Sstevel@tonic-gate char *rmap, *vmap, *dst, *tail;
3000Sstevel@tonic-gate size_t size, bytes;
3010Sstevel@tonic-gate cbd_t *dp;
3020Sstevel@tonic-gate int err;
3030Sstevel@tonic-gate
3040Sstevel@tonic-gate dp = CPR->c_bmda;
3050Sstevel@tonic-gate if (err = cpr_write(vp, (caddr_t)dp, cpr_nbitmaps * sizeof (*dp)))
3060Sstevel@tonic-gate return (err);
3070Sstevel@tonic-gate
3080Sstevel@tonic-gate /*
3090Sstevel@tonic-gate * merge regular and volatile bitmaps into tmp space
3100Sstevel@tonic-gate * and write to disk
3110Sstevel@tonic-gate */
3120Sstevel@tonic-gate for (; dp->cbd_size; dp++) {
3130Sstevel@tonic-gate rmap = (char *)dp->cbd_reg_bitmap;
3140Sstevel@tonic-gate vmap = (char *)dp->cbd_vlt_bitmap;
3150Sstevel@tonic-gate for (size = dp->cbd_size; size; size -= bytes) {
3160Sstevel@tonic-gate bytes = min(size, sizeof (cpr_pagecopy));
3170Sstevel@tonic-gate tail = &cpr_pagecopy[bytes];
3180Sstevel@tonic-gate for (dst = cpr_pagecopy; dst < tail; dst++)
3190Sstevel@tonic-gate *dst = *rmap++ | *vmap++;
3200Sstevel@tonic-gate if (err = cpr_write(vp, cpr_pagecopy, bytes))
3210Sstevel@tonic-gate break;
3220Sstevel@tonic-gate }
3230Sstevel@tonic-gate }
3240Sstevel@tonic-gate
3250Sstevel@tonic-gate return (err);
3260Sstevel@tonic-gate }
3270Sstevel@tonic-gate
3280Sstevel@tonic-gate
3290Sstevel@tonic-gate static int
cpr_write_statefile(vnode_t * vp)3300Sstevel@tonic-gate cpr_write_statefile(vnode_t *vp)
3310Sstevel@tonic-gate {
3320Sstevel@tonic-gate uint_t error = 0;
3330Sstevel@tonic-gate extern int i_cpr_check_pgs_dumped();
3340Sstevel@tonic-gate void flush_windows(void);
3350Sstevel@tonic-gate pgcnt_t spages;
3360Sstevel@tonic-gate char *str;
3370Sstevel@tonic-gate
3380Sstevel@tonic-gate flush_windows();
3390Sstevel@tonic-gate
3400Sstevel@tonic-gate /*
3410Sstevel@tonic-gate * to get an accurate view of kas, we need to untag sensitive
3420Sstevel@tonic-gate * pages *before* dumping them because the disk driver makes
3430Sstevel@tonic-gate * allocations and changes kas along the way. The remaining
3440Sstevel@tonic-gate * pages referenced in the bitmaps are dumped out later as
3450Sstevel@tonic-gate * regular kpages.
3460Sstevel@tonic-gate */
3470Sstevel@tonic-gate str = "cpr_write_statefile:";
3480Sstevel@tonic-gate spages = i_cpr_count_sensitive_kpages(REGULAR_BITMAP, cpr_clrbit);
3493446Smrj CPR_DEBUG(CPR_DEBUG7, "%s untag %ld sens pages\n", str, spages);
3500Sstevel@tonic-gate
3510Sstevel@tonic-gate /*
3520Sstevel@tonic-gate * now it's OK to call a driver that makes allocations
3530Sstevel@tonic-gate */
3540Sstevel@tonic-gate cpr_disk_writes_ok = 1;
3550Sstevel@tonic-gate
3560Sstevel@tonic-gate /*
3570Sstevel@tonic-gate * now write out the clean sensitive kpages
3580Sstevel@tonic-gate * according to the sensitive descriptors
3590Sstevel@tonic-gate */
3600Sstevel@tonic-gate error = i_cpr_dump_sensitive_kpages(vp);
3610Sstevel@tonic-gate if (error) {
3623446Smrj CPR_DEBUG(CPR_DEBUG7,
3633446Smrj "%s cpr_dump_sensitive_kpages() failed!\n", str);
3640Sstevel@tonic-gate return (error);
3650Sstevel@tonic-gate }
3660Sstevel@tonic-gate
3670Sstevel@tonic-gate /*
3680Sstevel@tonic-gate * cpr_dump_regular_pages() counts cpr_regular_pgs_dumped
3690Sstevel@tonic-gate */
3700Sstevel@tonic-gate error = cpr_dump_regular_pages(vp);
3710Sstevel@tonic-gate if (error) {
3723446Smrj CPR_DEBUG(CPR_DEBUG7,
3733446Smrj "%s cpr_dump_regular_pages() failed!\n", str);
3740Sstevel@tonic-gate return (error);
3750Sstevel@tonic-gate }
3760Sstevel@tonic-gate
3770Sstevel@tonic-gate /*
3780Sstevel@tonic-gate * sanity check to verify the right number of pages were dumped
3790Sstevel@tonic-gate */
3800Sstevel@tonic-gate error = i_cpr_check_pgs_dumped(cpr_pages_tobe_dumped,
3810Sstevel@tonic-gate cpr_regular_pgs_dumped);
3820Sstevel@tonic-gate
3830Sstevel@tonic-gate if (error) {
3843446Smrj prom_printf("\n%s page count mismatch!\n", str);
3850Sstevel@tonic-gate #ifdef DEBUG
3860Sstevel@tonic-gate if (cpr_test_mode)
3870Sstevel@tonic-gate debug_enter(NULL);
3880Sstevel@tonic-gate #endif
3890Sstevel@tonic-gate }
3900Sstevel@tonic-gate
3910Sstevel@tonic-gate return (error);
3920Sstevel@tonic-gate }
3935295Srandyf #endif
3940Sstevel@tonic-gate
3950Sstevel@tonic-gate
3960Sstevel@tonic-gate /*
3970Sstevel@tonic-gate * creates the CPR state file, the following sections are
3980Sstevel@tonic-gate * written out in sequence:
3990Sstevel@tonic-gate * - writes the cpr dump header
4000Sstevel@tonic-gate * - writes the memory usage bitmaps
4010Sstevel@tonic-gate * - writes the platform dependent info
4020Sstevel@tonic-gate * - writes the remaining user pages
4030Sstevel@tonic-gate * - writes the kernel pages
4040Sstevel@tonic-gate */
4055295Srandyf #if defined(__x86)
4065295Srandyf _NOTE(ARGSUSED(0))
4075295Srandyf #endif
4080Sstevel@tonic-gate int
cpr_dump(vnode_t * vp)4090Sstevel@tonic-gate cpr_dump(vnode_t *vp)
4100Sstevel@tonic-gate {
4115295Srandyf #if defined(__sparc)
4120Sstevel@tonic-gate int error;
4130Sstevel@tonic-gate
4140Sstevel@tonic-gate if (cpr_buf == NULL) {
4150Sstevel@tonic-gate ASSERT(cpr_pagedata == NULL);
4160Sstevel@tonic-gate if (error = cpr_alloc_bufs())
4170Sstevel@tonic-gate return (error);
4180Sstevel@tonic-gate }
4190Sstevel@tonic-gate /* point to top of internal buffer */
4200Sstevel@tonic-gate cpr_wptr = cpr_buf;
4210Sstevel@tonic-gate
4220Sstevel@tonic-gate /* initialize global variables used by the write operation */
4230Sstevel@tonic-gate cpr_file_bn = cpr_statefile_offset();
4240Sstevel@tonic-gate cpr_dev_space = 0;
4250Sstevel@tonic-gate
4260Sstevel@tonic-gate /* allocate bitmaps */
4270Sstevel@tonic-gate if (CPR->c_bmda == NULL) {
4280Sstevel@tonic-gate if (error = i_cpr_alloc_bitmaps()) {
4290Sstevel@tonic-gate cpr_err(CE_WARN, "cannot allocate bitmaps");
4300Sstevel@tonic-gate return (error);
4310Sstevel@tonic-gate }
4320Sstevel@tonic-gate }
4330Sstevel@tonic-gate
4340Sstevel@tonic-gate if (error = i_cpr_prom_pages(CPR_PROM_SAVE))
4350Sstevel@tonic-gate return (error);
4360Sstevel@tonic-gate
4370Sstevel@tonic-gate if (error = i_cpr_dump_setup(vp))
4380Sstevel@tonic-gate return (error);
4390Sstevel@tonic-gate
4400Sstevel@tonic-gate /*
4410Sstevel@tonic-gate * set internal cross checking; we dont want to call
4420Sstevel@tonic-gate * a disk driver that makes allocations until after
4430Sstevel@tonic-gate * sensitive pages are saved
4440Sstevel@tonic-gate */
4450Sstevel@tonic-gate cpr_disk_writes_ok = 0;
4460Sstevel@tonic-gate
4470Sstevel@tonic-gate /*
4480Sstevel@tonic-gate * 1253112: heap corruption due to memory allocation when dumpping
4490Sstevel@tonic-gate * statefile.
4500Sstevel@tonic-gate * Theoretically on Sun4u only the kernel data nucleus, kvalloc and
4510Sstevel@tonic-gate * kvseg segments can be contaminated should memory allocations happen
4520Sstevel@tonic-gate * during sddump, which is not supposed to happen after the system
4530Sstevel@tonic-gate * is quiesced. Let's call the kernel pages that tend to be affected
4540Sstevel@tonic-gate * 'sensitive kpages' here. To avoid saving inconsistent pages, we
4550Sstevel@tonic-gate * will allocate some storage space to save the clean sensitive pages
4560Sstevel@tonic-gate * aside before statefile dumping takes place. Since there may not be
4570Sstevel@tonic-gate * much memory left at this stage, the sensitive pages will be
4580Sstevel@tonic-gate * compressed before they are saved into the storage area.
4590Sstevel@tonic-gate */
4600Sstevel@tonic-gate if (error = i_cpr_save_sensitive_kpages()) {
4613446Smrj CPR_DEBUG(CPR_DEBUG7,
4623446Smrj "cpr_dump: save_sensitive_kpages failed!\n");
4630Sstevel@tonic-gate return (error);
4640Sstevel@tonic-gate }
4650Sstevel@tonic-gate
4660Sstevel@tonic-gate /*
4670Sstevel@tonic-gate * since all cpr allocations are done (space for sensitive kpages,
4680Sstevel@tonic-gate * bitmaps, cpr_buf), kas is stable, and now we can accurately
4690Sstevel@tonic-gate * count regular and sensitive kpages.
4700Sstevel@tonic-gate */
4710Sstevel@tonic-gate if (error = cpr_write_header(vp)) {
4723446Smrj CPR_DEBUG(CPR_DEBUG7,
4733446Smrj "cpr_dump: cpr_write_header() failed!\n");
4740Sstevel@tonic-gate return (error);
4750Sstevel@tonic-gate }
4760Sstevel@tonic-gate
4770Sstevel@tonic-gate if (error = i_cpr_write_machdep(vp))
4780Sstevel@tonic-gate return (error);
4790Sstevel@tonic-gate
4800Sstevel@tonic-gate if (error = i_cpr_blockzero(cpr_buf, &cpr_wptr, NULL, NULL))
4810Sstevel@tonic-gate return (error);
4820Sstevel@tonic-gate
4830Sstevel@tonic-gate if (error = cpr_write_bitmap(vp))
4840Sstevel@tonic-gate return (error);
4850Sstevel@tonic-gate
4860Sstevel@tonic-gate if (error = cpr_write_statefile(vp)) {
4873446Smrj CPR_DEBUG(CPR_DEBUG7,
4883446Smrj "cpr_dump: cpr_write_statefile() failed!\n");
4890Sstevel@tonic-gate return (error);
4900Sstevel@tonic-gate }
4910Sstevel@tonic-gate
4920Sstevel@tonic-gate if (error = cpr_write_terminator(vp))
4930Sstevel@tonic-gate return (error);
4940Sstevel@tonic-gate
4950Sstevel@tonic-gate if (error = cpr_flush_write(vp))
4960Sstevel@tonic-gate return (error);
4970Sstevel@tonic-gate
4980Sstevel@tonic-gate if (error = i_cpr_blockzero(cpr_buf, &cpr_wptr, &cpr_file_bn, vp))
4990Sstevel@tonic-gate return (error);
5005295Srandyf #endif
5010Sstevel@tonic-gate
5020Sstevel@tonic-gate return (0);
5030Sstevel@tonic-gate }
5040Sstevel@tonic-gate
5050Sstevel@tonic-gate
5065295Srandyf #if defined(__sparc)
5070Sstevel@tonic-gate /*
5085Seg155566 * cpr_xwalk() is called many 100x with a range within kvseg or kvseg_reloc;
5090Sstevel@tonic-gate * a page-count from each range is accumulated at arg->pages.
5100Sstevel@tonic-gate */
5110Sstevel@tonic-gate static void
cpr_xwalk(void * arg,void * base,size_t size)5125Seg155566 cpr_xwalk(void *arg, void *base, size_t size)
5130Sstevel@tonic-gate {
5140Sstevel@tonic-gate struct cpr_walkinfo *cwip = arg;
5150Sstevel@tonic-gate
5160Sstevel@tonic-gate cwip->pages += cpr_count_pages(base, size,
5170Sstevel@tonic-gate cwip->mapflag, cwip->bitfunc, DBG_DONTSHOWRANGE);
5180Sstevel@tonic-gate cwip->size += size;
5190Sstevel@tonic-gate cwip->ranges++;
5200Sstevel@tonic-gate }
5210Sstevel@tonic-gate
5225Seg155566 /*
5235Seg155566 * cpr_walk() is called many 100x with a range within kvseg or kvseg_reloc;
5245Seg155566 * a page-count from each range is accumulated at arg->pages.
5255Seg155566 */
5265Seg155566 static void
cpr_walk(void * arg,void * base,size_t size)5275Seg155566 cpr_walk(void *arg, void *base, size_t size)
5285Seg155566 {
5295Seg155566 caddr_t addr = base;
5305Seg155566 caddr_t addr_end = addr + size;
5315Seg155566
5325Seg155566 /*
5335Seg155566 * If we are about to start walking the range of addresses we
5345Seg155566 * carved out of the kernel heap for the large page heap walk
5355Seg155566 * heap_lp_arena to find what segments are actually populated
5365Seg155566 */
5375Seg155566 if (SEGKMEM_USE_LARGEPAGES &&
5385Seg155566 addr == heap_lp_base && addr_end == heap_lp_end &&
5395Seg155566 vmem_size(heap_lp_arena, VMEM_ALLOC) < size) {
5405Seg155566 vmem_walk(heap_lp_arena, VMEM_ALLOC, cpr_xwalk, arg);
5415Seg155566 } else {
5425Seg155566 cpr_xwalk(arg, base, size);
5435Seg155566 }
5445Seg155566 }
5455Seg155566
5460Sstevel@tonic-gate
5470Sstevel@tonic-gate /*
5480Sstevel@tonic-gate * faster scan of kvseg using vmem_walk() to visit
5490Sstevel@tonic-gate * allocated ranges.
5500Sstevel@tonic-gate */
5510Sstevel@tonic-gate pgcnt_t
cpr_scan_kvseg(int mapflag,bitfunc_t bitfunc,struct seg * seg)5520Sstevel@tonic-gate cpr_scan_kvseg(int mapflag, bitfunc_t bitfunc, struct seg *seg)
5530Sstevel@tonic-gate {
5540Sstevel@tonic-gate struct cpr_walkinfo cwinfo;
5550Sstevel@tonic-gate
5560Sstevel@tonic-gate bzero(&cwinfo, sizeof (cwinfo));
5570Sstevel@tonic-gate cwinfo.mapflag = mapflag;
5580Sstevel@tonic-gate cwinfo.bitfunc = bitfunc;
5590Sstevel@tonic-gate
5600Sstevel@tonic-gate vmem_walk(heap_arena, VMEM_ALLOC, cpr_walk, &cwinfo);
5610Sstevel@tonic-gate
5623446Smrj if (cpr_debug & CPR_DEBUG7) {
5633446Smrj prom_printf("walked %d sub-ranges, total pages %ld\n",
5640Sstevel@tonic-gate cwinfo.ranges, mmu_btop(cwinfo.size));
5650Sstevel@tonic-gate cpr_show_range(seg->s_base, seg->s_size,
5660Sstevel@tonic-gate mapflag, bitfunc, cwinfo.pages);
5670Sstevel@tonic-gate }
5680Sstevel@tonic-gate
5690Sstevel@tonic-gate return (cwinfo.pages);
5700Sstevel@tonic-gate }
5710Sstevel@tonic-gate
5720Sstevel@tonic-gate
5730Sstevel@tonic-gate /*
5740Sstevel@tonic-gate * cpr_walk_kpm() is called for every used area within the large
5750Sstevel@tonic-gate * segkpm virtual address window. A page-count is accumulated at
5760Sstevel@tonic-gate * arg->pages.
5770Sstevel@tonic-gate */
5780Sstevel@tonic-gate static void
cpr_walk_kpm(void * arg,void * base,size_t size)5790Sstevel@tonic-gate cpr_walk_kpm(void *arg, void *base, size_t size)
5800Sstevel@tonic-gate {
5810Sstevel@tonic-gate struct cpr_walkinfo *cwip = arg;
5820Sstevel@tonic-gate
5830Sstevel@tonic-gate cwip->pages += cpr_count_pages(base, size,
5840Sstevel@tonic-gate cwip->mapflag, cwip->bitfunc, DBG_DONTSHOWRANGE);
5850Sstevel@tonic-gate cwip->size += size;
5860Sstevel@tonic-gate cwip->ranges++;
5870Sstevel@tonic-gate }
5880Sstevel@tonic-gate
5890Sstevel@tonic-gate
5900Sstevel@tonic-gate /*
5910Sstevel@tonic-gate * faster scan of segkpm using hat_kpm_walk() to visit only used ranges.
5920Sstevel@tonic-gate */
5930Sstevel@tonic-gate /*ARGSUSED*/
5940Sstevel@tonic-gate static pgcnt_t
cpr_scan_segkpm(int mapflag,bitfunc_t bitfunc,struct seg * seg)5950Sstevel@tonic-gate cpr_scan_segkpm(int mapflag, bitfunc_t bitfunc, struct seg *seg)
5960Sstevel@tonic-gate {
5970Sstevel@tonic-gate struct cpr_walkinfo cwinfo;
5980Sstevel@tonic-gate
5990Sstevel@tonic-gate if (kpm_enable == 0)
6000Sstevel@tonic-gate return (0);
6010Sstevel@tonic-gate
6020Sstevel@tonic-gate bzero(&cwinfo, sizeof (cwinfo));
6030Sstevel@tonic-gate cwinfo.mapflag = mapflag;
6040Sstevel@tonic-gate cwinfo.bitfunc = bitfunc;
6050Sstevel@tonic-gate hat_kpm_walk(cpr_walk_kpm, &cwinfo);
6060Sstevel@tonic-gate
6073446Smrj if (cpr_debug & CPR_DEBUG7) {
6083446Smrj prom_printf("walked %d sub-ranges, total pages %ld\n",
6090Sstevel@tonic-gate cwinfo.ranges, mmu_btop(cwinfo.size));
6100Sstevel@tonic-gate cpr_show_range(segkpm->s_base, segkpm->s_size,
6110Sstevel@tonic-gate mapflag, bitfunc, cwinfo.pages);
6120Sstevel@tonic-gate }
6130Sstevel@tonic-gate
6140Sstevel@tonic-gate return (cwinfo.pages);
6150Sstevel@tonic-gate }
6160Sstevel@tonic-gate
6170Sstevel@tonic-gate
6180Sstevel@tonic-gate /*
6190Sstevel@tonic-gate * Sparsely filled kernel segments are registered in kseg_table for
6200Sstevel@tonic-gate * easier lookup. See also block comment for cpr_count_seg_pages.
6210Sstevel@tonic-gate */
6220Sstevel@tonic-gate
6230Sstevel@tonic-gate #define KSEG_SEG_ADDR 0 /* address of struct seg */
6240Sstevel@tonic-gate #define KSEG_PTR_ADDR 1 /* address of pointer to struct seg */
6250Sstevel@tonic-gate
6260Sstevel@tonic-gate typedef struct {
6270Sstevel@tonic-gate struct seg **st_seg; /* segment pointer or segment address */
6280Sstevel@tonic-gate pgcnt_t (*st_fcn)(int, bitfunc_t, struct seg *); /* function to call */
6290Sstevel@tonic-gate int st_addrtype; /* address type in st_seg */
6300Sstevel@tonic-gate } ksegtbl_entry_t;
6310Sstevel@tonic-gate
6320Sstevel@tonic-gate ksegtbl_entry_t kseg_table[] = {
6330Sstevel@tonic-gate {(struct seg **)&kvseg, cpr_scan_kvseg, KSEG_SEG_ADDR},
6340Sstevel@tonic-gate {&segkpm, cpr_scan_segkpm, KSEG_PTR_ADDR},
6350Sstevel@tonic-gate {NULL, 0, 0}
6360Sstevel@tonic-gate };
6370Sstevel@tonic-gate
6380Sstevel@tonic-gate
6390Sstevel@tonic-gate /*
6400Sstevel@tonic-gate * Compare seg with each entry in kseg_table; when there is a match
6410Sstevel@tonic-gate * return the entry pointer, otherwise return NULL.
6420Sstevel@tonic-gate */
6430Sstevel@tonic-gate static ksegtbl_entry_t *
cpr_sparse_seg_check(struct seg * seg)6440Sstevel@tonic-gate cpr_sparse_seg_check(struct seg *seg)
6450Sstevel@tonic-gate {
6460Sstevel@tonic-gate ksegtbl_entry_t *ste = &kseg_table[0];
6470Sstevel@tonic-gate struct seg *tseg;
6480Sstevel@tonic-gate
6490Sstevel@tonic-gate for (; ste->st_seg; ste++) {
6500Sstevel@tonic-gate tseg = (ste->st_addrtype == KSEG_PTR_ADDR) ?
6515295Srandyf *ste->st_seg : (struct seg *)ste->st_seg;
6525295Srandyf
6530Sstevel@tonic-gate if (seg == tseg)
6540Sstevel@tonic-gate return (ste);
6550Sstevel@tonic-gate }
6560Sstevel@tonic-gate
6570Sstevel@tonic-gate return ((ksegtbl_entry_t *)NULL);
6580Sstevel@tonic-gate }
6590Sstevel@tonic-gate
6600Sstevel@tonic-gate
6610Sstevel@tonic-gate /*
6620Sstevel@tonic-gate * Count pages within each kernel segment; call cpr_sparse_seg_check()
6630Sstevel@tonic-gate * to find out whether a sparsely filled segment needs special
6640Sstevel@tonic-gate * treatment (e.g. kvseg).
6650Sstevel@tonic-gate * Todo: A "SEGOP_CPR" like SEGOP_DUMP should be introduced, the cpr
6660Sstevel@tonic-gate * module shouldn't need to know segment details like if it is
6670Sstevel@tonic-gate * sparsely filled or not (makes kseg_table obsolete).
6680Sstevel@tonic-gate */
6690Sstevel@tonic-gate pgcnt_t
cpr_count_seg_pages(int mapflag,bitfunc_t bitfunc)6700Sstevel@tonic-gate cpr_count_seg_pages(int mapflag, bitfunc_t bitfunc)
6710Sstevel@tonic-gate {
6720Sstevel@tonic-gate struct seg *segp;
6730Sstevel@tonic-gate pgcnt_t pages;
6740Sstevel@tonic-gate ksegtbl_entry_t *ste;
6750Sstevel@tonic-gate
6760Sstevel@tonic-gate pages = 0;
6770Sstevel@tonic-gate for (segp = AS_SEGFIRST(&kas); segp; segp = AS_SEGNEXT(&kas, segp)) {
6780Sstevel@tonic-gate if (ste = cpr_sparse_seg_check(segp)) {
6790Sstevel@tonic-gate pages += (ste->st_fcn)(mapflag, bitfunc, segp);
6800Sstevel@tonic-gate } else {
6810Sstevel@tonic-gate pages += cpr_count_pages(segp->s_base,
6820Sstevel@tonic-gate segp->s_size, mapflag, bitfunc, DBG_SHOWRANGE);
6830Sstevel@tonic-gate }
6840Sstevel@tonic-gate }
6850Sstevel@tonic-gate
6860Sstevel@tonic-gate return (pages);
6870Sstevel@tonic-gate }
6880Sstevel@tonic-gate
6890Sstevel@tonic-gate
6900Sstevel@tonic-gate /*
6910Sstevel@tonic-gate * count kernel pages within kas and any special ranges
6920Sstevel@tonic-gate */
6930Sstevel@tonic-gate pgcnt_t
cpr_count_kpages(int mapflag,bitfunc_t bitfunc)6940Sstevel@tonic-gate cpr_count_kpages(int mapflag, bitfunc_t bitfunc)
6950Sstevel@tonic-gate {
6960Sstevel@tonic-gate pgcnt_t kas_cnt;
6970Sstevel@tonic-gate
6980Sstevel@tonic-gate /*
6990Sstevel@tonic-gate * Some pages need to be taken care of differently.
7000Sstevel@tonic-gate * eg: panicbuf pages of sun4m are not in kas but they need
7010Sstevel@tonic-gate * to be saved. On sun4u, the physical pages of panicbuf are
7020Sstevel@tonic-gate * allocated via prom_retain().
7030Sstevel@tonic-gate */
7040Sstevel@tonic-gate kas_cnt = i_cpr_count_special_kpages(mapflag, bitfunc);
7050Sstevel@tonic-gate kas_cnt += cpr_count_seg_pages(mapflag, bitfunc);
7060Sstevel@tonic-gate
7073446Smrj CPR_DEBUG(CPR_DEBUG9, "cpr_count_kpages: kas_cnt=%ld\n", kas_cnt);
7083446Smrj CPR_DEBUG(CPR_DEBUG7, "\ncpr_count_kpages: %ld pages, 0x%lx bytes\n",
7095295Srandyf kas_cnt, mmu_ptob(kas_cnt));
7105295Srandyf
7110Sstevel@tonic-gate return (kas_cnt);
7120Sstevel@tonic-gate }
7130Sstevel@tonic-gate
7140Sstevel@tonic-gate
7150Sstevel@tonic-gate /*
7160Sstevel@tonic-gate * Set a bit corresponding to the arg phys page number;
7170Sstevel@tonic-gate * returns 0 when the ppn is valid and the corresponding
7180Sstevel@tonic-gate * map bit was clear, otherwise returns 1.
7190Sstevel@tonic-gate */
7200Sstevel@tonic-gate int
cpr_setbit(pfn_t ppn,int mapflag)7210Sstevel@tonic-gate cpr_setbit(pfn_t ppn, int mapflag)
7220Sstevel@tonic-gate {
7230Sstevel@tonic-gate char *bitmap;
7240Sstevel@tonic-gate cbd_t *dp;
7250Sstevel@tonic-gate pfn_t rel;
7260Sstevel@tonic-gate int clr;
7270Sstevel@tonic-gate
7280Sstevel@tonic-gate for (dp = CPR->c_bmda; dp->cbd_size; dp++) {
7290Sstevel@tonic-gate if (PPN_IN_RANGE(ppn, dp)) {
7300Sstevel@tonic-gate bitmap = DESC_TO_MAP(dp, mapflag);
7310Sstevel@tonic-gate rel = ppn - dp->cbd_spfn;
7320Sstevel@tonic-gate if ((clr = isclr(bitmap, rel)) != 0)
7330Sstevel@tonic-gate setbit(bitmap, rel);
7340Sstevel@tonic-gate return (clr == 0);
7350Sstevel@tonic-gate }
7360Sstevel@tonic-gate }
7370Sstevel@tonic-gate
7380Sstevel@tonic-gate return (1);
7390Sstevel@tonic-gate }
7400Sstevel@tonic-gate
7410Sstevel@tonic-gate
7420Sstevel@tonic-gate /*
7430Sstevel@tonic-gate * Clear a bit corresponding to the arg phys page number.
7440Sstevel@tonic-gate */
7450Sstevel@tonic-gate int
cpr_clrbit(pfn_t ppn,int mapflag)7460Sstevel@tonic-gate cpr_clrbit(pfn_t ppn, int mapflag)
7470Sstevel@tonic-gate {
7480Sstevel@tonic-gate char *bitmap;
7490Sstevel@tonic-gate cbd_t *dp;
7500Sstevel@tonic-gate pfn_t rel;
7510Sstevel@tonic-gate int set;
7520Sstevel@tonic-gate
7530Sstevel@tonic-gate for (dp = CPR->c_bmda; dp->cbd_size; dp++) {
7540Sstevel@tonic-gate if (PPN_IN_RANGE(ppn, dp)) {
7550Sstevel@tonic-gate bitmap = DESC_TO_MAP(dp, mapflag);
7560Sstevel@tonic-gate rel = ppn - dp->cbd_spfn;
7570Sstevel@tonic-gate if ((set = isset(bitmap, rel)) != 0)
7580Sstevel@tonic-gate clrbit(bitmap, rel);
7590Sstevel@tonic-gate return (set == 0);
7600Sstevel@tonic-gate }
7610Sstevel@tonic-gate }
7620Sstevel@tonic-gate
7630Sstevel@tonic-gate return (1);
7640Sstevel@tonic-gate }
7650Sstevel@tonic-gate
7660Sstevel@tonic-gate
7670Sstevel@tonic-gate /* ARGSUSED */
7680Sstevel@tonic-gate int
cpr_nobit(pfn_t ppn,int mapflag)7690Sstevel@tonic-gate cpr_nobit(pfn_t ppn, int mapflag)
7700Sstevel@tonic-gate {
7710Sstevel@tonic-gate return (0);
7720Sstevel@tonic-gate }
7730Sstevel@tonic-gate
7740Sstevel@tonic-gate
7750Sstevel@tonic-gate /*
7760Sstevel@tonic-gate * Lookup a bit corresponding to the arg phys page number.
7770Sstevel@tonic-gate */
7780Sstevel@tonic-gate int
cpr_isset(pfn_t ppn,int mapflag)7790Sstevel@tonic-gate cpr_isset(pfn_t ppn, int mapflag)
7800Sstevel@tonic-gate {
7810Sstevel@tonic-gate char *bitmap;
7820Sstevel@tonic-gate cbd_t *dp;
7830Sstevel@tonic-gate pfn_t rel;
7840Sstevel@tonic-gate
7850Sstevel@tonic-gate for (dp = CPR->c_bmda; dp->cbd_size; dp++) {
7860Sstevel@tonic-gate if (PPN_IN_RANGE(ppn, dp)) {
7870Sstevel@tonic-gate bitmap = DESC_TO_MAP(dp, mapflag);
7880Sstevel@tonic-gate rel = ppn - dp->cbd_spfn;
7890Sstevel@tonic-gate return (isset(bitmap, rel));
7900Sstevel@tonic-gate }
7910Sstevel@tonic-gate }
7920Sstevel@tonic-gate
7930Sstevel@tonic-gate return (0);
7940Sstevel@tonic-gate }
7950Sstevel@tonic-gate
7960Sstevel@tonic-gate
7970Sstevel@tonic-gate /*
7980Sstevel@tonic-gate * Go thru all pages and pick up any page not caught during the invalidation
7990Sstevel@tonic-gate * stage. This is also used to save pages with cow lock or phys page lock held
8000Sstevel@tonic-gate * (none zero p_lckcnt or p_cowcnt)
8010Sstevel@tonic-gate */
8020Sstevel@tonic-gate static int
cpr_count_upages(int mapflag,bitfunc_t bitfunc)8030Sstevel@tonic-gate cpr_count_upages(int mapflag, bitfunc_t bitfunc)
8040Sstevel@tonic-gate {
8050Sstevel@tonic-gate page_t *pp, *page0;
8060Sstevel@tonic-gate pgcnt_t dcnt = 0, tcnt = 0;
8070Sstevel@tonic-gate pfn_t pfn;
8080Sstevel@tonic-gate
8090Sstevel@tonic-gate page0 = pp = page_first();
8100Sstevel@tonic-gate
8110Sstevel@tonic-gate do {
8123290Sjohansen if (pp->p_vnode == NULL || PP_ISKAS(pp) ||
8130Sstevel@tonic-gate PP_ISFREE(pp) && PP_ISAGED(pp))
8140Sstevel@tonic-gate continue;
8150Sstevel@tonic-gate
8160Sstevel@tonic-gate pfn = page_pptonum(pp);
8170Sstevel@tonic-gate if (pf_is_memory(pfn)) {
8180Sstevel@tonic-gate tcnt++;
8190Sstevel@tonic-gate if ((*bitfunc)(pfn, mapflag) == 0)
8200Sstevel@tonic-gate dcnt++; /* dirty count */
8210Sstevel@tonic-gate }
8220Sstevel@tonic-gate } while ((pp = page_next(pp)) != page0);
8230Sstevel@tonic-gate
8240Sstevel@tonic-gate STAT->cs_upage2statef = dcnt;
8253446Smrj CPR_DEBUG(CPR_DEBUG9, "cpr_count_upages: dirty=%ld total=%ld\n",
8265295Srandyf dcnt, tcnt);
8273446Smrj CPR_DEBUG(CPR_DEBUG7, "cpr_count_upages: %ld pages, 0x%lx bytes\n",
8285295Srandyf dcnt, mmu_ptob(dcnt));
82911185SSean.McEnroe@Sun.COM page0 = NULL; /* for Lint */
8300Sstevel@tonic-gate return (dcnt);
8310Sstevel@tonic-gate }
8320Sstevel@tonic-gate
8330Sstevel@tonic-gate
8340Sstevel@tonic-gate /*
8350Sstevel@tonic-gate * try compressing pages based on cflag,
8360Sstevel@tonic-gate * and for DEBUG kernels, verify uncompressed data checksum;
8370Sstevel@tonic-gate *
8380Sstevel@tonic-gate * this routine replaces common code from
8390Sstevel@tonic-gate * i_cpr_compress_and_save() and cpr_compress_and_write()
8400Sstevel@tonic-gate */
8410Sstevel@tonic-gate char *
cpr_compress_pages(cpd_t * dp,pgcnt_t pages,int cflag)8420Sstevel@tonic-gate cpr_compress_pages(cpd_t *dp, pgcnt_t pages, int cflag)
8430Sstevel@tonic-gate {
8440Sstevel@tonic-gate size_t nbytes, clen, len;
8450Sstevel@tonic-gate uint32_t test_sum;
8460Sstevel@tonic-gate char *datap;
8470Sstevel@tonic-gate
8480Sstevel@tonic-gate nbytes = mmu_ptob(pages);
8490Sstevel@tonic-gate
8500Sstevel@tonic-gate /*
8510Sstevel@tonic-gate * set length to the original uncompressed data size;
8520Sstevel@tonic-gate * always init cpd_flag to zero
8530Sstevel@tonic-gate */
8540Sstevel@tonic-gate dp->cpd_length = nbytes;
8550Sstevel@tonic-gate dp->cpd_flag = 0;
8560Sstevel@tonic-gate
8570Sstevel@tonic-gate #ifdef DEBUG
8580Sstevel@tonic-gate /*
8590Sstevel@tonic-gate * Make a copy of the uncompressed data so we can checksum it.
8600Sstevel@tonic-gate * Compress that copy so the checksum works at the other end
8610Sstevel@tonic-gate */
8620Sstevel@tonic-gate cprbcopy(CPR->c_mapping_area, cpr_pagecopy, nbytes);
8630Sstevel@tonic-gate dp->cpd_usum = checksum32(cpr_pagecopy, nbytes);
8640Sstevel@tonic-gate dp->cpd_flag |= CPD_USUM;
8650Sstevel@tonic-gate datap = cpr_pagecopy;
8660Sstevel@tonic-gate #else
8670Sstevel@tonic-gate datap = CPR->c_mapping_area;
8680Sstevel@tonic-gate dp->cpd_usum = 0;
8690Sstevel@tonic-gate #endif
8700Sstevel@tonic-gate
8710Sstevel@tonic-gate /*
8720Sstevel@tonic-gate * try compressing the raw data to cpr_pagedata;
8730Sstevel@tonic-gate * if there was a size reduction: record the new length,
8740Sstevel@tonic-gate * flag the compression, and point to the compressed data.
8750Sstevel@tonic-gate */
8760Sstevel@tonic-gate dp->cpd_csum = 0;
8770Sstevel@tonic-gate if (cflag) {
8780Sstevel@tonic-gate clen = compress(datap, cpr_pagedata, nbytes);
8790Sstevel@tonic-gate if (clen < nbytes) {
8800Sstevel@tonic-gate dp->cpd_flag |= CPD_COMPRESS;
8810Sstevel@tonic-gate dp->cpd_length = clen;
8820Sstevel@tonic-gate datap = cpr_pagedata;
8830Sstevel@tonic-gate #ifdef DEBUG
8840Sstevel@tonic-gate dp->cpd_csum = checksum32(datap, clen);
8850Sstevel@tonic-gate dp->cpd_flag |= CPD_CSUM;
8860Sstevel@tonic-gate
8870Sstevel@tonic-gate /*
8880Sstevel@tonic-gate * decompress the data back to a scratch area
8890Sstevel@tonic-gate * and compare the new checksum with the original
8900Sstevel@tonic-gate * checksum to verify the compression.
8910Sstevel@tonic-gate */
8920Sstevel@tonic-gate bzero(cpr_pagecopy, sizeof (cpr_pagecopy));
8930Sstevel@tonic-gate len = decompress(datap, cpr_pagecopy,
8940Sstevel@tonic-gate clen, sizeof (cpr_pagecopy));
8950Sstevel@tonic-gate test_sum = checksum32(cpr_pagecopy, len);
8960Sstevel@tonic-gate ASSERT(test_sum == dp->cpd_usum);
8970Sstevel@tonic-gate #endif
8980Sstevel@tonic-gate }
8990Sstevel@tonic-gate }
9000Sstevel@tonic-gate
9010Sstevel@tonic-gate return (datap);
9020Sstevel@tonic-gate }
9030Sstevel@tonic-gate
9040Sstevel@tonic-gate
9050Sstevel@tonic-gate /*
9060Sstevel@tonic-gate * 1. Prepare cpr page descriptor and write it to file
9070Sstevel@tonic-gate * 2. Compress page data and write it out
9080Sstevel@tonic-gate */
9090Sstevel@tonic-gate static int
cpr_compress_and_write(vnode_t * vp,uint_t va,pfn_t pfn,pgcnt_t npg)9100Sstevel@tonic-gate cpr_compress_and_write(vnode_t *vp, uint_t va, pfn_t pfn, pgcnt_t npg)
9110Sstevel@tonic-gate {
9120Sstevel@tonic-gate int error = 0;
9130Sstevel@tonic-gate char *datap;
9140Sstevel@tonic-gate cpd_t cpd; /* cpr page descriptor */
9150Sstevel@tonic-gate extern void i_cpr_mapin(caddr_t, uint_t, pfn_t);
9160Sstevel@tonic-gate extern void i_cpr_mapout(caddr_t, uint_t);
9170Sstevel@tonic-gate
9180Sstevel@tonic-gate i_cpr_mapin(CPR->c_mapping_area, npg, pfn);
9190Sstevel@tonic-gate
9203446Smrj CPR_DEBUG(CPR_DEBUG3, "mapped-in %ld pages, vaddr 0x%p, pfn 0x%lx\n",
9217240Srh87107 npg, (void *)CPR->c_mapping_area, pfn);
9220Sstevel@tonic-gate
9230Sstevel@tonic-gate /*
9240Sstevel@tonic-gate * Fill cpr page descriptor.
9250Sstevel@tonic-gate */
9260Sstevel@tonic-gate cpd.cpd_magic = (uint_t)CPR_PAGE_MAGIC;
9270Sstevel@tonic-gate cpd.cpd_pfn = pfn;
9280Sstevel@tonic-gate cpd.cpd_pages = npg;
9290Sstevel@tonic-gate
9300Sstevel@tonic-gate STAT->cs_dumped_statefsz += mmu_ptob(npg);
9310Sstevel@tonic-gate
9320Sstevel@tonic-gate datap = cpr_compress_pages(&cpd, npg, CPR->c_flags & C_COMPRESSING);
9330Sstevel@tonic-gate
9340Sstevel@tonic-gate /* Write cpr page descriptor */
9350Sstevel@tonic-gate error = cpr_write(vp, (caddr_t)&cpd, sizeof (cpd_t));
9360Sstevel@tonic-gate
9370Sstevel@tonic-gate /* Write compressed page data */
9380Sstevel@tonic-gate error = cpr_write(vp, (caddr_t)datap, cpd.cpd_length);
9390Sstevel@tonic-gate
9400Sstevel@tonic-gate /*
9410Sstevel@tonic-gate * Unmap the pages for tlb and vac flushing
9420Sstevel@tonic-gate */
9430Sstevel@tonic-gate i_cpr_mapout(CPR->c_mapping_area, npg);
9440Sstevel@tonic-gate
9450Sstevel@tonic-gate if (error) {
9463446Smrj CPR_DEBUG(CPR_DEBUG1,
9477240Srh87107 "cpr_compress_and_write: vp 0x%p va 0x%x ", (void *)vp, va);
9483446Smrj CPR_DEBUG(CPR_DEBUG1, "pfn 0x%lx blk %d err %d\n",
9493446Smrj pfn, cpr_file_bn, error);
9500Sstevel@tonic-gate } else {
9510Sstevel@tonic-gate cpr_regular_pgs_dumped += npg;
9520Sstevel@tonic-gate }
9530Sstevel@tonic-gate
9540Sstevel@tonic-gate return (error);
9550Sstevel@tonic-gate }
9560Sstevel@tonic-gate
9570Sstevel@tonic-gate
9580Sstevel@tonic-gate int
cpr_write(vnode_t * vp,caddr_t buffer,size_t size)9590Sstevel@tonic-gate cpr_write(vnode_t *vp, caddr_t buffer, size_t size)
9600Sstevel@tonic-gate {
9610Sstevel@tonic-gate caddr_t fromp = buffer;
9620Sstevel@tonic-gate size_t bytes, wbytes;
9630Sstevel@tonic-gate int error;
9640Sstevel@tonic-gate
9650Sstevel@tonic-gate if (cpr_dev_space == 0) {
9660Sstevel@tonic-gate if (vp->v_type == VBLK) {
9670Sstevel@tonic-gate cpr_dev_space = cpr_get_devsize(vp->v_rdev);
9680Sstevel@tonic-gate ASSERT(cpr_dev_space);
9690Sstevel@tonic-gate } else
9700Sstevel@tonic-gate cpr_dev_space = 1; /* not used in this case */
9710Sstevel@tonic-gate }
9720Sstevel@tonic-gate
9730Sstevel@tonic-gate /*
9740Sstevel@tonic-gate * break the write into multiple part if request is large,
9750Sstevel@tonic-gate * calculate count up to buf page boundary, then write it out.
9760Sstevel@tonic-gate * repeat until done.
9770Sstevel@tonic-gate */
9780Sstevel@tonic-gate while (size) {
9790Sstevel@tonic-gate bytes = MIN(size, cpr_buf_end - cpr_wptr);
9800Sstevel@tonic-gate cprbcopy(fromp, cpr_wptr, bytes);
9810Sstevel@tonic-gate cpr_wptr += bytes;
9820Sstevel@tonic-gate fromp += bytes;
9830Sstevel@tonic-gate size -= bytes;
9840Sstevel@tonic-gate if (cpr_wptr < cpr_buf_end)
9850Sstevel@tonic-gate return (0); /* buffer not full yet */
9860Sstevel@tonic-gate ASSERT(cpr_wptr == cpr_buf_end);
9870Sstevel@tonic-gate
9880Sstevel@tonic-gate wbytes = dbtob(cpr_file_bn + cpr_buf_blocks);
9890Sstevel@tonic-gate if (vp->v_type == VBLK) {
9900Sstevel@tonic-gate if (wbytes > cpr_dev_space)
9910Sstevel@tonic-gate return (ENOSPC);
9920Sstevel@tonic-gate } else {
9930Sstevel@tonic-gate if (wbytes > VTOI(vp)->i_size)
9940Sstevel@tonic-gate return (ENOSPC);
9950Sstevel@tonic-gate }
9960Sstevel@tonic-gate
9973446Smrj CPR_DEBUG(CPR_DEBUG3,
9983446Smrj "cpr_write: frmp=%p wptr=%p cnt=%lx...",
9997240Srh87107 (void *)fromp, (void *)cpr_wptr, bytes);
10000Sstevel@tonic-gate /*
10010Sstevel@tonic-gate * cross check, this should not happen!
10020Sstevel@tonic-gate */
10030Sstevel@tonic-gate if (cpr_disk_writes_ok == 0) {
10043446Smrj prom_printf("cpr_write: disk write too early!\n");
10050Sstevel@tonic-gate return (EINVAL);
10060Sstevel@tonic-gate }
10070Sstevel@tonic-gate
10080Sstevel@tonic-gate do_polled_io = 1;
10095331Samw error = VOP_DUMP(vp, cpr_buf, cpr_file_bn, cpr_buf_blocks,
10105331Samw NULL);
10110Sstevel@tonic-gate do_polled_io = 0;
10123446Smrj CPR_DEBUG(CPR_DEBUG3, "done\n");
10130Sstevel@tonic-gate
10140Sstevel@tonic-gate STAT->cs_real_statefsz += cpr_buf_size;
10150Sstevel@tonic-gate
10160Sstevel@tonic-gate if (error) {
10170Sstevel@tonic-gate cpr_err(CE_WARN, "cpr_write error %d", error);
10180Sstevel@tonic-gate return (error);
10190Sstevel@tonic-gate }
10200Sstevel@tonic-gate cpr_file_bn += cpr_buf_blocks; /* Increment block count */
10210Sstevel@tonic-gate cpr_wptr = cpr_buf; /* back to top of buffer */
10220Sstevel@tonic-gate }
10230Sstevel@tonic-gate return (0);
10240Sstevel@tonic-gate }
10250Sstevel@tonic-gate
10260Sstevel@tonic-gate
10270Sstevel@tonic-gate int
cpr_flush_write(vnode_t * vp)10280Sstevel@tonic-gate cpr_flush_write(vnode_t *vp)
10290Sstevel@tonic-gate {
10300Sstevel@tonic-gate int nblk;
10310Sstevel@tonic-gate int error;
10320Sstevel@tonic-gate
10330Sstevel@tonic-gate /*
10340Sstevel@tonic-gate * Calculate remaining blocks in buffer, rounded up to nearest
10350Sstevel@tonic-gate * disk block
10360Sstevel@tonic-gate */
10370Sstevel@tonic-gate nblk = btod(cpr_wptr - cpr_buf);
10380Sstevel@tonic-gate
10390Sstevel@tonic-gate do_polled_io = 1;
10405331Samw error = VOP_DUMP(vp, (caddr_t)cpr_buf, cpr_file_bn, nblk, NULL);
10410Sstevel@tonic-gate do_polled_io = 0;
10420Sstevel@tonic-gate
10430Sstevel@tonic-gate cpr_file_bn += nblk;
10440Sstevel@tonic-gate if (error)
10453446Smrj CPR_DEBUG(CPR_DEBUG2, "cpr_flush_write: error (%d)\n",
10463446Smrj error);
10470Sstevel@tonic-gate return (error);
10480Sstevel@tonic-gate }
10490Sstevel@tonic-gate
10500Sstevel@tonic-gate void
cpr_clear_bitmaps(void)10510Sstevel@tonic-gate cpr_clear_bitmaps(void)
10520Sstevel@tonic-gate {
10530Sstevel@tonic-gate cbd_t *dp;
10540Sstevel@tonic-gate
10550Sstevel@tonic-gate for (dp = CPR->c_bmda; dp->cbd_size; dp++) {
10560Sstevel@tonic-gate bzero((void *)dp->cbd_reg_bitmap,
10570Sstevel@tonic-gate (size_t)dp->cbd_size * 2);
10580Sstevel@tonic-gate }
10593446Smrj CPR_DEBUG(CPR_DEBUG7, "\ncleared reg and vlt bitmaps\n");
10600Sstevel@tonic-gate }
10610Sstevel@tonic-gate
10620Sstevel@tonic-gate int
cpr_contig_pages(vnode_t * vp,int flag)10630Sstevel@tonic-gate cpr_contig_pages(vnode_t *vp, int flag)
10640Sstevel@tonic-gate {
10650Sstevel@tonic-gate int chunks = 0, error = 0;
10660Sstevel@tonic-gate pgcnt_t i, j, totbit;
10670Sstevel@tonic-gate pfn_t spfn;
10680Sstevel@tonic-gate cbd_t *dp;
10690Sstevel@tonic-gate uint_t spin_cnt = 0;
10700Sstevel@tonic-gate extern int i_cpr_compress_and_save();
10710Sstevel@tonic-gate
10720Sstevel@tonic-gate for (dp = CPR->c_bmda; dp->cbd_size; dp++) {
10730Sstevel@tonic-gate spfn = dp->cbd_spfn;
10740Sstevel@tonic-gate totbit = BTOb(dp->cbd_size);
10750Sstevel@tonic-gate i = 0; /* Beginning of bitmap */
10760Sstevel@tonic-gate j = 0;
10770Sstevel@tonic-gate while (i < totbit) {
10780Sstevel@tonic-gate while ((j < CPR_MAXCONTIG) && ((j + i) < totbit)) {
10790Sstevel@tonic-gate if (isset((char *)dp->cbd_reg_bitmap, j+i))
10800Sstevel@tonic-gate j++;
10810Sstevel@tonic-gate else /* not contiguous anymore */
10820Sstevel@tonic-gate break;
10830Sstevel@tonic-gate }
10840Sstevel@tonic-gate
10850Sstevel@tonic-gate if (j) {
10860Sstevel@tonic-gate chunks++;
10870Sstevel@tonic-gate if (flag == SAVE_TO_STORAGE) {
10880Sstevel@tonic-gate error = i_cpr_compress_and_save(
10890Sstevel@tonic-gate chunks, spfn + i, j);
10900Sstevel@tonic-gate if (error)
10910Sstevel@tonic-gate return (error);
10920Sstevel@tonic-gate } else if (flag == WRITE_TO_STATEFILE) {
10930Sstevel@tonic-gate error = cpr_compress_and_write(vp, 0,
10940Sstevel@tonic-gate spfn + i, j);
10950Sstevel@tonic-gate if (error)
10960Sstevel@tonic-gate return (error);
10970Sstevel@tonic-gate else {
10980Sstevel@tonic-gate spin_cnt++;
10990Sstevel@tonic-gate if ((spin_cnt & 0x5F) == 1)
11000Sstevel@tonic-gate cpr_spinning_bar();
11010Sstevel@tonic-gate }
11020Sstevel@tonic-gate }
11030Sstevel@tonic-gate }
11040Sstevel@tonic-gate
11050Sstevel@tonic-gate i += j;
11060Sstevel@tonic-gate if (j != CPR_MAXCONTIG) {
11070Sstevel@tonic-gate /* Stopped on a non-tagged page */
11080Sstevel@tonic-gate i++;
11090Sstevel@tonic-gate }
11100Sstevel@tonic-gate
11110Sstevel@tonic-gate j = 0;
11120Sstevel@tonic-gate }
11130Sstevel@tonic-gate }
11140Sstevel@tonic-gate
11150Sstevel@tonic-gate if (flag == STORAGE_DESC_ALLOC)
11160Sstevel@tonic-gate return (chunks);
11170Sstevel@tonic-gate else
11180Sstevel@tonic-gate return (0);
11190Sstevel@tonic-gate }
11200Sstevel@tonic-gate
11210Sstevel@tonic-gate
11220Sstevel@tonic-gate void
cpr_show_range(caddr_t vaddr,size_t size,int mapflag,bitfunc_t bitfunc,pgcnt_t count)11230Sstevel@tonic-gate cpr_show_range(caddr_t vaddr, size_t size,
11240Sstevel@tonic-gate int mapflag, bitfunc_t bitfunc, pgcnt_t count)
11250Sstevel@tonic-gate {
11260Sstevel@tonic-gate char *action, *bname;
11270Sstevel@tonic-gate
11280Sstevel@tonic-gate bname = (mapflag == REGULAR_BITMAP) ? "regular" : "volatile";
11290Sstevel@tonic-gate if (bitfunc == cpr_setbit)
11300Sstevel@tonic-gate action = "tag";
11310Sstevel@tonic-gate else if (bitfunc == cpr_clrbit)
11320Sstevel@tonic-gate action = "untag";
11330Sstevel@tonic-gate else
11340Sstevel@tonic-gate action = "none";
11353446Smrj prom_printf("range (0x%p, 0x%p), %s bitmap, %s %ld\n",
11367240Srh87107 (void *)vaddr, (void *)(vaddr + size), bname, action, count);
11370Sstevel@tonic-gate }
11380Sstevel@tonic-gate
11390Sstevel@tonic-gate
11400Sstevel@tonic-gate pgcnt_t
cpr_count_pages(caddr_t sva,size_t size,int mapflag,bitfunc_t bitfunc,int showrange)11410Sstevel@tonic-gate cpr_count_pages(caddr_t sva, size_t size,
11420Sstevel@tonic-gate int mapflag, bitfunc_t bitfunc, int showrange)
11430Sstevel@tonic-gate {
11440Sstevel@tonic-gate caddr_t va, eva;
11450Sstevel@tonic-gate pfn_t pfn;
11460Sstevel@tonic-gate pgcnt_t count = 0;
11470Sstevel@tonic-gate
11480Sstevel@tonic-gate eva = sva + PAGE_ROUNDUP(size);
11490Sstevel@tonic-gate for (va = sva; va < eva; va += MMU_PAGESIZE) {
11500Sstevel@tonic-gate pfn = va_to_pfn(va);
11510Sstevel@tonic-gate if (pfn != PFN_INVALID && pf_is_memory(pfn)) {
11520Sstevel@tonic-gate if ((*bitfunc)(pfn, mapflag) == 0)
11530Sstevel@tonic-gate count++;
11540Sstevel@tonic-gate }
11550Sstevel@tonic-gate }
11560Sstevel@tonic-gate
11573446Smrj if ((cpr_debug & CPR_DEBUG7) && showrange == DBG_SHOWRANGE)
11580Sstevel@tonic-gate cpr_show_range(sva, size, mapflag, bitfunc, count);
11590Sstevel@tonic-gate
11600Sstevel@tonic-gate return (count);
11610Sstevel@tonic-gate }
11620Sstevel@tonic-gate
11630Sstevel@tonic-gate
11640Sstevel@tonic-gate pgcnt_t
cpr_count_volatile_pages(int mapflag,bitfunc_t bitfunc)11650Sstevel@tonic-gate cpr_count_volatile_pages(int mapflag, bitfunc_t bitfunc)
11660Sstevel@tonic-gate {
11670Sstevel@tonic-gate pgcnt_t count = 0;
11680Sstevel@tonic-gate
11690Sstevel@tonic-gate if (cpr_buf) {
11700Sstevel@tonic-gate count += cpr_count_pages(cpr_buf, cpr_buf_size,
11710Sstevel@tonic-gate mapflag, bitfunc, DBG_SHOWRANGE);
11720Sstevel@tonic-gate }
11730Sstevel@tonic-gate if (cpr_pagedata) {
11740Sstevel@tonic-gate count += cpr_count_pages(cpr_pagedata, cpr_pagedata_size,
11750Sstevel@tonic-gate mapflag, bitfunc, DBG_SHOWRANGE);
11760Sstevel@tonic-gate }
11770Sstevel@tonic-gate count += i_cpr_count_storage_pages(mapflag, bitfunc);
11780Sstevel@tonic-gate
11793446Smrj CPR_DEBUG(CPR_DEBUG7, "cpr_count_vpages: %ld pages, 0x%lx bytes\n",
11803446Smrj count, mmu_ptob(count));
11810Sstevel@tonic-gate return (count);
11820Sstevel@tonic-gate }
11830Sstevel@tonic-gate
11840Sstevel@tonic-gate
11850Sstevel@tonic-gate static int
cpr_dump_regular_pages(vnode_t * vp)11860Sstevel@tonic-gate cpr_dump_regular_pages(vnode_t *vp)
11870Sstevel@tonic-gate {
11880Sstevel@tonic-gate int error;
11890Sstevel@tonic-gate
11900Sstevel@tonic-gate cpr_regular_pgs_dumped = 0;
11910Sstevel@tonic-gate error = cpr_contig_pages(vp, WRITE_TO_STATEFILE);
11920Sstevel@tonic-gate if (!error)
11933446Smrj CPR_DEBUG(CPR_DEBUG7, "cpr_dump_regular_pages() done.\n");
11940Sstevel@tonic-gate return (error);
11950Sstevel@tonic-gate }
11965295Srandyf #endif
1197