10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 53290Sjohansen * Common Development and Distribution License (the "License"). 63290Sjohansen * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 223446Smrj * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate /* 290Sstevel@tonic-gate * Fill in and write out the cpr state file 300Sstevel@tonic-gate * 1. Allocate and write headers, ELF and cpr dump header 310Sstevel@tonic-gate * 2. Allocate bitmaps according to phys_install 320Sstevel@tonic-gate * 3. Tag kernel pages into corresponding bitmap 330Sstevel@tonic-gate * 4. Write bitmaps to state file 340Sstevel@tonic-gate * 5. Write actual physical page data to state file 350Sstevel@tonic-gate */ 360Sstevel@tonic-gate 370Sstevel@tonic-gate #include <sys/types.h> 380Sstevel@tonic-gate #include <sys/systm.h> 390Sstevel@tonic-gate #include <sys/vm.h> 400Sstevel@tonic-gate #include <sys/memlist.h> 410Sstevel@tonic-gate #include <sys/kmem.h> 420Sstevel@tonic-gate #include <sys/vnode.h> 430Sstevel@tonic-gate #include <sys/fs/ufs_inode.h> 440Sstevel@tonic-gate #include <sys/errno.h> 450Sstevel@tonic-gate #include <sys/cmn_err.h> 460Sstevel@tonic-gate #include <sys/debug.h> 470Sstevel@tonic-gate #include <vm/page.h> 480Sstevel@tonic-gate #include <vm/seg.h> 490Sstevel@tonic-gate #include <vm/seg_kmem.h> 500Sstevel@tonic-gate #include <vm/seg_kpm.h> 510Sstevel@tonic-gate #include <vm/hat.h> 520Sstevel@tonic-gate #include <sys/cpr.h> 530Sstevel@tonic-gate #include <sys/conf.h> 540Sstevel@tonic-gate #include <sys/ddi.h> 550Sstevel@tonic-gate #include <sys/panic.h> 560Sstevel@tonic-gate #include <sys/thread.h> 575295Srandyf #include <sys/note.h> 580Sstevel@tonic-gate 590Sstevel@tonic-gate /* Local defines and variables */ 600Sstevel@tonic-gate #define BTOb(bytes) ((bytes) << 3) /* Bytes to bits, log2(NBBY) */ 610Sstevel@tonic-gate #define bTOB(bits) ((bits) >> 3) /* bits to Bytes, log2(NBBY) */ 620Sstevel@tonic-gate 635295Srandyf #if defined(__sparc) 640Sstevel@tonic-gate static uint_t cpr_pages_tobe_dumped; 650Sstevel@tonic-gate static uint_t cpr_regular_pgs_dumped; 660Sstevel@tonic-gate static int cpr_dump_regular_pages(vnode_t *); 670Sstevel@tonic-gate static int cpr_count_upages(int, bitfunc_t); 680Sstevel@tonic-gate static int cpr_compress_and_write(vnode_t *, uint_t, pfn_t, pgcnt_t); 695295Srandyf #endif 705295Srandyf 710Sstevel@tonic-gate int cpr_flush_write(vnode_t *); 720Sstevel@tonic-gate 730Sstevel@tonic-gate int cpr_contig_pages(vnode_t *, int); 740Sstevel@tonic-gate 750Sstevel@tonic-gate void cpr_clear_bitmaps(); 760Sstevel@tonic-gate 770Sstevel@tonic-gate extern size_t cpr_get_devsize(dev_t); 780Sstevel@tonic-gate extern int i_cpr_dump_setup(vnode_t *); 790Sstevel@tonic-gate extern int i_cpr_blockzero(char *, char **, int *, vnode_t *); 800Sstevel@tonic-gate extern int cpr_test_mode; 815295Srandyf int cpr_setbit(pfn_t, int); 825295Srandyf int cpr_clrbit(pfn_t, int); 830Sstevel@tonic-gate 840Sstevel@tonic-gate ctrm_t cpr_term; 850Sstevel@tonic-gate 860Sstevel@tonic-gate char *cpr_buf, *cpr_buf_end; 870Sstevel@tonic-gate int cpr_buf_blocks; /* size of cpr_buf in blocks */ 880Sstevel@tonic-gate size_t cpr_buf_size; /* size of cpr_buf in bytes */ 890Sstevel@tonic-gate size_t cpr_bitmap_size; 900Sstevel@tonic-gate int cpr_nbitmaps; 910Sstevel@tonic-gate 920Sstevel@tonic-gate char *cpr_pagedata; /* page buffer for compression / tmp copy */ 930Sstevel@tonic-gate size_t cpr_pagedata_size; /* page buffer size in bytes */ 940Sstevel@tonic-gate 955295Srandyf #if defined(__sparc) 960Sstevel@tonic-gate static char *cpr_wptr; /* keep track of where to write to next */ 970Sstevel@tonic-gate static int cpr_file_bn; /* cpr state-file block offset */ 980Sstevel@tonic-gate static int cpr_disk_writes_ok; 990Sstevel@tonic-gate static size_t cpr_dev_space = 0; 1005295Srandyf #endif 1010Sstevel@tonic-gate 1020Sstevel@tonic-gate char cpr_pagecopy[CPR_MAXCONTIG * MMU_PAGESIZE]; 1030Sstevel@tonic-gate 1045295Srandyf #if defined(__sparc) 1050Sstevel@tonic-gate /* 1060Sstevel@tonic-gate * On some platforms bcopy may modify the thread structure 1070Sstevel@tonic-gate * during bcopy (eg, to prevent cpu migration). If the 1080Sstevel@tonic-gate * range we are currently writing out includes our own 1090Sstevel@tonic-gate * thread structure then it will be snapshotted by bcopy 1100Sstevel@tonic-gate * including those modified members - and the updates made 1110Sstevel@tonic-gate * on exit from bcopy will no longer be seen when we later 1120Sstevel@tonic-gate * restore the mid-bcopy kthread_t. So if the range we 1130Sstevel@tonic-gate * need to copy overlaps with our thread structure we will 1140Sstevel@tonic-gate * use a simple byte copy. 1150Sstevel@tonic-gate */ 1160Sstevel@tonic-gate void 1170Sstevel@tonic-gate cprbcopy(void *from, void *to, size_t bytes) 1180Sstevel@tonic-gate { 1190Sstevel@tonic-gate extern int curthreadremapped; 1200Sstevel@tonic-gate caddr_t kthrend; 1210Sstevel@tonic-gate 1220Sstevel@tonic-gate kthrend = (caddr_t)curthread + sizeof (kthread_t) - 1; 1230Sstevel@tonic-gate if (curthreadremapped || (kthrend >= (caddr_t)from && 1240Sstevel@tonic-gate kthrend < (caddr_t)from + bytes + sizeof (kthread_t) - 1)) { 1250Sstevel@tonic-gate caddr_t src = from, dst = to; 1260Sstevel@tonic-gate 1270Sstevel@tonic-gate while (bytes-- > 0) 1280Sstevel@tonic-gate *dst++ = *src++; 1290Sstevel@tonic-gate } else { 1300Sstevel@tonic-gate bcopy(from, to, bytes); 1310Sstevel@tonic-gate } 1320Sstevel@tonic-gate } 1330Sstevel@tonic-gate 1340Sstevel@tonic-gate /* 1350Sstevel@tonic-gate * Allocate pages for buffers used in writing out the statefile 1360Sstevel@tonic-gate */ 1370Sstevel@tonic-gate static int 1380Sstevel@tonic-gate cpr_alloc_bufs(void) 1390Sstevel@tonic-gate { 1400Sstevel@tonic-gate char *allocerr = "Unable to allocate memory for cpr buffer"; 1410Sstevel@tonic-gate size_t size; 1420Sstevel@tonic-gate 1430Sstevel@tonic-gate /* 1440Sstevel@tonic-gate * set the cpr write buffer size to at least the historic 1450Sstevel@tonic-gate * size (128k) or large enough to store the both the early 1460Sstevel@tonic-gate * set of statefile structures (well under 0x800) plus the 1470Sstevel@tonic-gate * bitmaps, and roundup to the next pagesize. 1480Sstevel@tonic-gate */ 1490Sstevel@tonic-gate size = PAGE_ROUNDUP(dbtob(4) + cpr_bitmap_size); 1500Sstevel@tonic-gate cpr_buf_size = MAX(size, CPRBUFSZ); 1510Sstevel@tonic-gate cpr_buf_blocks = btodb(cpr_buf_size); 1520Sstevel@tonic-gate cpr_buf = kmem_alloc(cpr_buf_size, KM_NOSLEEP); 1530Sstevel@tonic-gate if (cpr_buf == NULL) { 1540Sstevel@tonic-gate cpr_err(CE_WARN, allocerr); 1550Sstevel@tonic-gate return (ENOMEM); 1560Sstevel@tonic-gate } 1570Sstevel@tonic-gate cpr_buf_end = cpr_buf + cpr_buf_size; 1580Sstevel@tonic-gate 1590Sstevel@tonic-gate cpr_pagedata_size = mmu_ptob(CPR_MAXCONTIG + 1); 1600Sstevel@tonic-gate cpr_pagedata = kmem_alloc(cpr_pagedata_size, KM_NOSLEEP); 1610Sstevel@tonic-gate if (cpr_pagedata == NULL) { 1620Sstevel@tonic-gate kmem_free(cpr_buf, cpr_buf_size); 1630Sstevel@tonic-gate cpr_buf = NULL; 1640Sstevel@tonic-gate cpr_err(CE_WARN, allocerr); 1650Sstevel@tonic-gate return (ENOMEM); 1660Sstevel@tonic-gate } 1670Sstevel@tonic-gate 1680Sstevel@tonic-gate return (0); 1690Sstevel@tonic-gate } 1700Sstevel@tonic-gate 1710Sstevel@tonic-gate 1720Sstevel@tonic-gate /* 1730Sstevel@tonic-gate * Set bitmap size in bytes based on phys_install. 1740Sstevel@tonic-gate */ 1750Sstevel@tonic-gate void 1760Sstevel@tonic-gate cpr_set_bitmap_size(void) 1770Sstevel@tonic-gate { 1780Sstevel@tonic-gate struct memlist *pmem; 1790Sstevel@tonic-gate size_t size = 0; 1800Sstevel@tonic-gate 1810Sstevel@tonic-gate memlist_read_lock(); 1820Sstevel@tonic-gate for (pmem = phys_install; pmem; pmem = pmem->next) 1830Sstevel@tonic-gate size += pmem->size; 1840Sstevel@tonic-gate memlist_read_unlock(); 1850Sstevel@tonic-gate cpr_bitmap_size = BITMAP_BYTES(size); 1860Sstevel@tonic-gate } 1870Sstevel@tonic-gate 1880Sstevel@tonic-gate 1890Sstevel@tonic-gate /* 1900Sstevel@tonic-gate * CPR dump header contains the following information: 1910Sstevel@tonic-gate * 1. header magic -- unique to cpr state file 1920Sstevel@tonic-gate * 2. kernel return pc & ppn for resume 1930Sstevel@tonic-gate * 3. current thread info 1940Sstevel@tonic-gate * 4. debug level and test mode 1950Sstevel@tonic-gate * 5. number of bitmaps allocated 1960Sstevel@tonic-gate * 6. number of page records 1970Sstevel@tonic-gate */ 1980Sstevel@tonic-gate static int 1990Sstevel@tonic-gate cpr_write_header(vnode_t *vp) 2000Sstevel@tonic-gate { 2010Sstevel@tonic-gate extern ushort_t cpr_mach_type; 2020Sstevel@tonic-gate struct cpr_dump_desc cdump; 2030Sstevel@tonic-gate pgcnt_t bitmap_pages; 2040Sstevel@tonic-gate pgcnt_t kpages, vpages, upages; 2055295Srandyf pgcnt_t cpr_count_kpages(int mapflag, bitfunc_t bitfunc); 2060Sstevel@tonic-gate 2070Sstevel@tonic-gate cdump.cdd_magic = (uint_t)CPR_DUMP_MAGIC; 2080Sstevel@tonic-gate cdump.cdd_version = CPR_VERSION; 2090Sstevel@tonic-gate cdump.cdd_machine = cpr_mach_type; 2100Sstevel@tonic-gate cdump.cdd_debug = cpr_debug; 2110Sstevel@tonic-gate cdump.cdd_test_mode = cpr_test_mode; 2120Sstevel@tonic-gate cdump.cdd_bitmaprec = cpr_nbitmaps; 2130Sstevel@tonic-gate 2140Sstevel@tonic-gate cpr_clear_bitmaps(); 2150Sstevel@tonic-gate 2160Sstevel@tonic-gate /* 2170Sstevel@tonic-gate * Remember how many pages we plan to save to statefile. 2180Sstevel@tonic-gate * This information will be used for sanity checks. 2190Sstevel@tonic-gate * Untag those pages that will not be saved to statefile. 2200Sstevel@tonic-gate */ 2210Sstevel@tonic-gate kpages = cpr_count_kpages(REGULAR_BITMAP, cpr_setbit); 2220Sstevel@tonic-gate vpages = cpr_count_volatile_pages(REGULAR_BITMAP, cpr_clrbit); 2230Sstevel@tonic-gate upages = cpr_count_upages(REGULAR_BITMAP, cpr_setbit); 2240Sstevel@tonic-gate cdump.cdd_dumppgsize = kpages - vpages + upages; 2250Sstevel@tonic-gate cpr_pages_tobe_dumped = cdump.cdd_dumppgsize; 2263446Smrj CPR_DEBUG(CPR_DEBUG7, 2270Sstevel@tonic-gate "\ncpr_write_header: kpages %ld - vpages %ld + upages %ld = %d\n", 2283446Smrj kpages, vpages, upages, cdump.cdd_dumppgsize); 2290Sstevel@tonic-gate 2300Sstevel@tonic-gate /* 2310Sstevel@tonic-gate * Some pages contain volatile data (cpr_buf and storage area for 2320Sstevel@tonic-gate * sensitive kpages), which are no longer needed after the statefile 2330Sstevel@tonic-gate * is dumped to disk. We have already untagged them from regular 2340Sstevel@tonic-gate * bitmaps. Now tag them into the volatile bitmaps. The pages in 2350Sstevel@tonic-gate * volatile bitmaps will be claimed during resume, and the resumed 2360Sstevel@tonic-gate * kernel will free them. 2370Sstevel@tonic-gate */ 2380Sstevel@tonic-gate (void) cpr_count_volatile_pages(VOLATILE_BITMAP, cpr_setbit); 2390Sstevel@tonic-gate 2400Sstevel@tonic-gate bitmap_pages = mmu_btopr(cpr_bitmap_size); 2410Sstevel@tonic-gate 2420Sstevel@tonic-gate /* 2430Sstevel@tonic-gate * Export accurate statefile size for statefile allocation retry. 2440Sstevel@tonic-gate * statefile_size = all the headers + total pages + 2450Sstevel@tonic-gate * number of pages used by the bitmaps. 2460Sstevel@tonic-gate * Roundup will be done in the file allocation code. 2470Sstevel@tonic-gate */ 2480Sstevel@tonic-gate STAT->cs_nocomp_statefsz = sizeof (cdd_t) + sizeof (cmd_t) + 2495295Srandyf (sizeof (cbd_t) * cdump.cdd_bitmaprec) + 2505295Srandyf (sizeof (cpd_t) * cdump.cdd_dumppgsize) + 2515295Srandyf mmu_ptob(cdump.cdd_dumppgsize + bitmap_pages); 2520Sstevel@tonic-gate 2530Sstevel@tonic-gate /* 2540Sstevel@tonic-gate * If the estimated statefile is not big enough, 2550Sstevel@tonic-gate * go retry now to save un-necessary operations. 2560Sstevel@tonic-gate */ 2570Sstevel@tonic-gate if (!(CPR->c_flags & C_COMPRESSING) && 2585295Srandyf (STAT->cs_nocomp_statefsz > STAT->cs_est_statefsz)) { 2593446Smrj if (cpr_debug & (CPR_DEBUG1 | CPR_DEBUG7)) 2605295Srandyf prom_printf("cpr_write_header: " 2615295Srandyf "STAT->cs_nocomp_statefsz > " 2625295Srandyf "STAT->cs_est_statefsz\n"); 2630Sstevel@tonic-gate return (ENOSPC); 2640Sstevel@tonic-gate } 2650Sstevel@tonic-gate 2660Sstevel@tonic-gate /* now write cpr dump descriptor */ 2670Sstevel@tonic-gate return (cpr_write(vp, (caddr_t)&cdump, sizeof (cdd_t))); 2680Sstevel@tonic-gate } 2690Sstevel@tonic-gate 2700Sstevel@tonic-gate 2710Sstevel@tonic-gate /* 2720Sstevel@tonic-gate * CPR dump tail record contains the following information: 2730Sstevel@tonic-gate * 1. header magic -- unique to cpr state file 2740Sstevel@tonic-gate * 2. all misc info that needs to be passed to cprboot or resumed kernel 2750Sstevel@tonic-gate */ 2760Sstevel@tonic-gate static int 2770Sstevel@tonic-gate cpr_write_terminator(vnode_t *vp) 2780Sstevel@tonic-gate { 2790Sstevel@tonic-gate cpr_term.magic = (uint_t)CPR_TERM_MAGIC; 2800Sstevel@tonic-gate cpr_term.va = (cpr_ptr)&cpr_term; 2810Sstevel@tonic-gate cpr_term.pfn = (cpr_ext)va_to_pfn(&cpr_term); 2820Sstevel@tonic-gate 2830Sstevel@tonic-gate /* count the last one (flush) */ 2840Sstevel@tonic-gate cpr_term.real_statef_size = STAT->cs_real_statefsz + 2855295Srandyf btod(cpr_wptr - cpr_buf) * DEV_BSIZE; 2860Sstevel@tonic-gate 2873446Smrj CPR_DEBUG(CPR_DEBUG9, "cpr_dump: Real Statefile Size: %ld\n", 2885295Srandyf STAT->cs_real_statefsz); 2890Sstevel@tonic-gate 2900Sstevel@tonic-gate cpr_tod_get(&cpr_term.tm_shutdown); 2910Sstevel@tonic-gate 2920Sstevel@tonic-gate return (cpr_write(vp, (caddr_t)&cpr_term, sizeof (cpr_term))); 2930Sstevel@tonic-gate } 2940Sstevel@tonic-gate 2950Sstevel@tonic-gate /* 2960Sstevel@tonic-gate * Write bitmap descriptor array, followed by merged bitmaps. 2970Sstevel@tonic-gate */ 2980Sstevel@tonic-gate static int 2990Sstevel@tonic-gate cpr_write_bitmap(vnode_t *vp) 3000Sstevel@tonic-gate { 3010Sstevel@tonic-gate char *rmap, *vmap, *dst, *tail; 3020Sstevel@tonic-gate size_t size, bytes; 3030Sstevel@tonic-gate cbd_t *dp; 3040Sstevel@tonic-gate int err; 3050Sstevel@tonic-gate 3060Sstevel@tonic-gate dp = CPR->c_bmda; 3070Sstevel@tonic-gate if (err = cpr_write(vp, (caddr_t)dp, cpr_nbitmaps * sizeof (*dp))) 3080Sstevel@tonic-gate return (err); 3090Sstevel@tonic-gate 3100Sstevel@tonic-gate /* 3110Sstevel@tonic-gate * merge regular and volatile bitmaps into tmp space 3120Sstevel@tonic-gate * and write to disk 3130Sstevel@tonic-gate */ 3140Sstevel@tonic-gate for (; dp->cbd_size; dp++) { 3150Sstevel@tonic-gate rmap = (char *)dp->cbd_reg_bitmap; 3160Sstevel@tonic-gate vmap = (char *)dp->cbd_vlt_bitmap; 3170Sstevel@tonic-gate for (size = dp->cbd_size; size; size -= bytes) { 3180Sstevel@tonic-gate bytes = min(size, sizeof (cpr_pagecopy)); 3190Sstevel@tonic-gate tail = &cpr_pagecopy[bytes]; 3200Sstevel@tonic-gate for (dst = cpr_pagecopy; dst < tail; dst++) 3210Sstevel@tonic-gate *dst = *rmap++ | *vmap++; 3220Sstevel@tonic-gate if (err = cpr_write(vp, cpr_pagecopy, bytes)) 3230Sstevel@tonic-gate break; 3240Sstevel@tonic-gate } 3250Sstevel@tonic-gate } 3260Sstevel@tonic-gate 3270Sstevel@tonic-gate return (err); 3280Sstevel@tonic-gate } 3290Sstevel@tonic-gate 3300Sstevel@tonic-gate 3310Sstevel@tonic-gate static int 3320Sstevel@tonic-gate cpr_write_statefile(vnode_t *vp) 3330Sstevel@tonic-gate { 3340Sstevel@tonic-gate uint_t error = 0; 3350Sstevel@tonic-gate extern int i_cpr_check_pgs_dumped(); 3360Sstevel@tonic-gate void flush_windows(void); 3370Sstevel@tonic-gate pgcnt_t spages; 3380Sstevel@tonic-gate char *str; 3390Sstevel@tonic-gate 3400Sstevel@tonic-gate flush_windows(); 3410Sstevel@tonic-gate 3420Sstevel@tonic-gate /* 3430Sstevel@tonic-gate * to get an accurate view of kas, we need to untag sensitive 3440Sstevel@tonic-gate * pages *before* dumping them because the disk driver makes 3450Sstevel@tonic-gate * allocations and changes kas along the way. The remaining 3460Sstevel@tonic-gate * pages referenced in the bitmaps are dumped out later as 3470Sstevel@tonic-gate * regular kpages. 3480Sstevel@tonic-gate */ 3490Sstevel@tonic-gate str = "cpr_write_statefile:"; 3500Sstevel@tonic-gate spages = i_cpr_count_sensitive_kpages(REGULAR_BITMAP, cpr_clrbit); 3513446Smrj CPR_DEBUG(CPR_DEBUG7, "%s untag %ld sens pages\n", str, spages); 3520Sstevel@tonic-gate 3530Sstevel@tonic-gate /* 3540Sstevel@tonic-gate * now it's OK to call a driver that makes allocations 3550Sstevel@tonic-gate */ 3560Sstevel@tonic-gate cpr_disk_writes_ok = 1; 3570Sstevel@tonic-gate 3580Sstevel@tonic-gate /* 3590Sstevel@tonic-gate * now write out the clean sensitive kpages 3600Sstevel@tonic-gate * according to the sensitive descriptors 3610Sstevel@tonic-gate */ 3620Sstevel@tonic-gate error = i_cpr_dump_sensitive_kpages(vp); 3630Sstevel@tonic-gate if (error) { 3643446Smrj CPR_DEBUG(CPR_DEBUG7, 3653446Smrj "%s cpr_dump_sensitive_kpages() failed!\n", str); 3660Sstevel@tonic-gate return (error); 3670Sstevel@tonic-gate } 3680Sstevel@tonic-gate 3690Sstevel@tonic-gate /* 3700Sstevel@tonic-gate * cpr_dump_regular_pages() counts cpr_regular_pgs_dumped 3710Sstevel@tonic-gate */ 3720Sstevel@tonic-gate error = cpr_dump_regular_pages(vp); 3730Sstevel@tonic-gate if (error) { 3743446Smrj CPR_DEBUG(CPR_DEBUG7, 3753446Smrj "%s cpr_dump_regular_pages() failed!\n", str); 3760Sstevel@tonic-gate return (error); 3770Sstevel@tonic-gate } 3780Sstevel@tonic-gate 3790Sstevel@tonic-gate /* 3800Sstevel@tonic-gate * sanity check to verify the right number of pages were dumped 3810Sstevel@tonic-gate */ 3820Sstevel@tonic-gate error = i_cpr_check_pgs_dumped(cpr_pages_tobe_dumped, 3830Sstevel@tonic-gate cpr_regular_pgs_dumped); 3840Sstevel@tonic-gate 3850Sstevel@tonic-gate if (error) { 3863446Smrj prom_printf("\n%s page count mismatch!\n", str); 3870Sstevel@tonic-gate #ifdef DEBUG 3880Sstevel@tonic-gate if (cpr_test_mode) 3890Sstevel@tonic-gate debug_enter(NULL); 3900Sstevel@tonic-gate #endif 3910Sstevel@tonic-gate } 3920Sstevel@tonic-gate 3930Sstevel@tonic-gate return (error); 3940Sstevel@tonic-gate } 3955295Srandyf #endif 3960Sstevel@tonic-gate 3970Sstevel@tonic-gate 3980Sstevel@tonic-gate /* 3990Sstevel@tonic-gate * creates the CPR state file, the following sections are 4000Sstevel@tonic-gate * written out in sequence: 4010Sstevel@tonic-gate * - writes the cpr dump header 4020Sstevel@tonic-gate * - writes the memory usage bitmaps 4030Sstevel@tonic-gate * - writes the platform dependent info 4040Sstevel@tonic-gate * - writes the remaining user pages 4050Sstevel@tonic-gate * - writes the kernel pages 4060Sstevel@tonic-gate */ 4075295Srandyf #if defined(__x86) 4085295Srandyf _NOTE(ARGSUSED(0)) 4095295Srandyf #endif 4100Sstevel@tonic-gate int 4110Sstevel@tonic-gate cpr_dump(vnode_t *vp) 4120Sstevel@tonic-gate { 4135295Srandyf #if defined(__sparc) 4140Sstevel@tonic-gate int error; 4150Sstevel@tonic-gate 4160Sstevel@tonic-gate if (cpr_buf == NULL) { 4170Sstevel@tonic-gate ASSERT(cpr_pagedata == NULL); 4180Sstevel@tonic-gate if (error = cpr_alloc_bufs()) 4190Sstevel@tonic-gate return (error); 4200Sstevel@tonic-gate } 4210Sstevel@tonic-gate /* point to top of internal buffer */ 4220Sstevel@tonic-gate cpr_wptr = cpr_buf; 4230Sstevel@tonic-gate 4240Sstevel@tonic-gate /* initialize global variables used by the write operation */ 4250Sstevel@tonic-gate cpr_file_bn = cpr_statefile_offset(); 4260Sstevel@tonic-gate cpr_dev_space = 0; 4270Sstevel@tonic-gate 4280Sstevel@tonic-gate /* allocate bitmaps */ 4290Sstevel@tonic-gate if (CPR->c_bmda == NULL) { 4300Sstevel@tonic-gate if (error = i_cpr_alloc_bitmaps()) { 4310Sstevel@tonic-gate cpr_err(CE_WARN, "cannot allocate bitmaps"); 4320Sstevel@tonic-gate return (error); 4330Sstevel@tonic-gate } 4340Sstevel@tonic-gate } 4350Sstevel@tonic-gate 4360Sstevel@tonic-gate if (error = i_cpr_prom_pages(CPR_PROM_SAVE)) 4370Sstevel@tonic-gate return (error); 4380Sstevel@tonic-gate 4390Sstevel@tonic-gate if (error = i_cpr_dump_setup(vp)) 4400Sstevel@tonic-gate return (error); 4410Sstevel@tonic-gate 4420Sstevel@tonic-gate /* 4430Sstevel@tonic-gate * set internal cross checking; we dont want to call 4440Sstevel@tonic-gate * a disk driver that makes allocations until after 4450Sstevel@tonic-gate * sensitive pages are saved 4460Sstevel@tonic-gate */ 4470Sstevel@tonic-gate cpr_disk_writes_ok = 0; 4480Sstevel@tonic-gate 4490Sstevel@tonic-gate /* 4500Sstevel@tonic-gate * 1253112: heap corruption due to memory allocation when dumpping 4510Sstevel@tonic-gate * statefile. 4520Sstevel@tonic-gate * Theoretically on Sun4u only the kernel data nucleus, kvalloc and 4530Sstevel@tonic-gate * kvseg segments can be contaminated should memory allocations happen 4540Sstevel@tonic-gate * during sddump, which is not supposed to happen after the system 4550Sstevel@tonic-gate * is quiesced. Let's call the kernel pages that tend to be affected 4560Sstevel@tonic-gate * 'sensitive kpages' here. To avoid saving inconsistent pages, we 4570Sstevel@tonic-gate * will allocate some storage space to save the clean sensitive pages 4580Sstevel@tonic-gate * aside before statefile dumping takes place. Since there may not be 4590Sstevel@tonic-gate * much memory left at this stage, the sensitive pages will be 4600Sstevel@tonic-gate * compressed before they are saved into the storage area. 4610Sstevel@tonic-gate */ 4620Sstevel@tonic-gate if (error = i_cpr_save_sensitive_kpages()) { 4633446Smrj CPR_DEBUG(CPR_DEBUG7, 4643446Smrj "cpr_dump: save_sensitive_kpages failed!\n"); 4650Sstevel@tonic-gate return (error); 4660Sstevel@tonic-gate } 4670Sstevel@tonic-gate 4680Sstevel@tonic-gate /* 4690Sstevel@tonic-gate * since all cpr allocations are done (space for sensitive kpages, 4700Sstevel@tonic-gate * bitmaps, cpr_buf), kas is stable, and now we can accurately 4710Sstevel@tonic-gate * count regular and sensitive kpages. 4720Sstevel@tonic-gate */ 4730Sstevel@tonic-gate if (error = cpr_write_header(vp)) { 4743446Smrj CPR_DEBUG(CPR_DEBUG7, 4753446Smrj "cpr_dump: cpr_write_header() failed!\n"); 4760Sstevel@tonic-gate return (error); 4770Sstevel@tonic-gate } 4780Sstevel@tonic-gate 4790Sstevel@tonic-gate if (error = i_cpr_write_machdep(vp)) 4800Sstevel@tonic-gate return (error); 4810Sstevel@tonic-gate 4820Sstevel@tonic-gate if (error = i_cpr_blockzero(cpr_buf, &cpr_wptr, NULL, NULL)) 4830Sstevel@tonic-gate return (error); 4840Sstevel@tonic-gate 4850Sstevel@tonic-gate if (error = cpr_write_bitmap(vp)) 4860Sstevel@tonic-gate return (error); 4870Sstevel@tonic-gate 4880Sstevel@tonic-gate if (error = cpr_write_statefile(vp)) { 4893446Smrj CPR_DEBUG(CPR_DEBUG7, 4903446Smrj "cpr_dump: cpr_write_statefile() failed!\n"); 4910Sstevel@tonic-gate return (error); 4920Sstevel@tonic-gate } 4930Sstevel@tonic-gate 4940Sstevel@tonic-gate if (error = cpr_write_terminator(vp)) 4950Sstevel@tonic-gate return (error); 4960Sstevel@tonic-gate 4970Sstevel@tonic-gate if (error = cpr_flush_write(vp)) 4980Sstevel@tonic-gate return (error); 4990Sstevel@tonic-gate 5000Sstevel@tonic-gate if (error = i_cpr_blockzero(cpr_buf, &cpr_wptr, &cpr_file_bn, vp)) 5010Sstevel@tonic-gate return (error); 5025295Srandyf #endif 5030Sstevel@tonic-gate 5040Sstevel@tonic-gate return (0); 5050Sstevel@tonic-gate } 5060Sstevel@tonic-gate 5070Sstevel@tonic-gate 5085295Srandyf #if defined(__sparc) 5090Sstevel@tonic-gate /* 5105Seg155566 * cpr_xwalk() is called many 100x with a range within kvseg or kvseg_reloc; 5110Sstevel@tonic-gate * a page-count from each range is accumulated at arg->pages. 5120Sstevel@tonic-gate */ 5130Sstevel@tonic-gate static void 5145Seg155566 cpr_xwalk(void *arg, void *base, size_t size) 5150Sstevel@tonic-gate { 5160Sstevel@tonic-gate struct cpr_walkinfo *cwip = arg; 5170Sstevel@tonic-gate 5180Sstevel@tonic-gate cwip->pages += cpr_count_pages(base, size, 5190Sstevel@tonic-gate cwip->mapflag, cwip->bitfunc, DBG_DONTSHOWRANGE); 5200Sstevel@tonic-gate cwip->size += size; 5210Sstevel@tonic-gate cwip->ranges++; 5220Sstevel@tonic-gate } 5230Sstevel@tonic-gate 5245Seg155566 /* 5255Seg155566 * cpr_walk() is called many 100x with a range within kvseg or kvseg_reloc; 5265Seg155566 * a page-count from each range is accumulated at arg->pages. 5275Seg155566 */ 5285Seg155566 static void 5295Seg155566 cpr_walk(void *arg, void *base, size_t size) 5305Seg155566 { 5315Seg155566 caddr_t addr = base; 5325Seg155566 caddr_t addr_end = addr + size; 5335Seg155566 5345Seg155566 /* 5355Seg155566 * If we are about to start walking the range of addresses we 5365Seg155566 * carved out of the kernel heap for the large page heap walk 5375Seg155566 * heap_lp_arena to find what segments are actually populated 5385Seg155566 */ 5395Seg155566 if (SEGKMEM_USE_LARGEPAGES && 5405Seg155566 addr == heap_lp_base && addr_end == heap_lp_end && 5415Seg155566 vmem_size(heap_lp_arena, VMEM_ALLOC) < size) { 5425Seg155566 vmem_walk(heap_lp_arena, VMEM_ALLOC, cpr_xwalk, arg); 5435Seg155566 } else { 5445Seg155566 cpr_xwalk(arg, base, size); 5455Seg155566 } 5465Seg155566 } 5475Seg155566 5480Sstevel@tonic-gate 5490Sstevel@tonic-gate /* 5500Sstevel@tonic-gate * faster scan of kvseg using vmem_walk() to visit 5510Sstevel@tonic-gate * allocated ranges. 5520Sstevel@tonic-gate */ 5530Sstevel@tonic-gate pgcnt_t 5540Sstevel@tonic-gate cpr_scan_kvseg(int mapflag, bitfunc_t bitfunc, struct seg *seg) 5550Sstevel@tonic-gate { 5560Sstevel@tonic-gate struct cpr_walkinfo cwinfo; 5570Sstevel@tonic-gate 5580Sstevel@tonic-gate bzero(&cwinfo, sizeof (cwinfo)); 5590Sstevel@tonic-gate cwinfo.mapflag = mapflag; 5600Sstevel@tonic-gate cwinfo.bitfunc = bitfunc; 5610Sstevel@tonic-gate 5620Sstevel@tonic-gate vmem_walk(heap_arena, VMEM_ALLOC, cpr_walk, &cwinfo); 5630Sstevel@tonic-gate 5643446Smrj if (cpr_debug & CPR_DEBUG7) { 5653446Smrj prom_printf("walked %d sub-ranges, total pages %ld\n", 5660Sstevel@tonic-gate cwinfo.ranges, mmu_btop(cwinfo.size)); 5670Sstevel@tonic-gate cpr_show_range(seg->s_base, seg->s_size, 5680Sstevel@tonic-gate mapflag, bitfunc, cwinfo.pages); 5690Sstevel@tonic-gate } 5700Sstevel@tonic-gate 5710Sstevel@tonic-gate return (cwinfo.pages); 5720Sstevel@tonic-gate } 5730Sstevel@tonic-gate 5740Sstevel@tonic-gate 5750Sstevel@tonic-gate /* 5760Sstevel@tonic-gate * cpr_walk_kpm() is called for every used area within the large 5770Sstevel@tonic-gate * segkpm virtual address window. A page-count is accumulated at 5780Sstevel@tonic-gate * arg->pages. 5790Sstevel@tonic-gate */ 5800Sstevel@tonic-gate static void 5810Sstevel@tonic-gate cpr_walk_kpm(void *arg, void *base, size_t size) 5820Sstevel@tonic-gate { 5830Sstevel@tonic-gate struct cpr_walkinfo *cwip = arg; 5840Sstevel@tonic-gate 5850Sstevel@tonic-gate cwip->pages += cpr_count_pages(base, size, 5860Sstevel@tonic-gate cwip->mapflag, cwip->bitfunc, DBG_DONTSHOWRANGE); 5870Sstevel@tonic-gate cwip->size += size; 5880Sstevel@tonic-gate cwip->ranges++; 5890Sstevel@tonic-gate } 5900Sstevel@tonic-gate 5910Sstevel@tonic-gate 5920Sstevel@tonic-gate /* 5930Sstevel@tonic-gate * faster scan of segkpm using hat_kpm_walk() to visit only used ranges. 5940Sstevel@tonic-gate */ 5950Sstevel@tonic-gate /*ARGSUSED*/ 5960Sstevel@tonic-gate static pgcnt_t 5970Sstevel@tonic-gate cpr_scan_segkpm(int mapflag, bitfunc_t bitfunc, struct seg *seg) 5980Sstevel@tonic-gate { 5990Sstevel@tonic-gate struct cpr_walkinfo cwinfo; 6000Sstevel@tonic-gate 6010Sstevel@tonic-gate if (kpm_enable == 0) 6020Sstevel@tonic-gate return (0); 6030Sstevel@tonic-gate 6040Sstevel@tonic-gate bzero(&cwinfo, sizeof (cwinfo)); 6050Sstevel@tonic-gate cwinfo.mapflag = mapflag; 6060Sstevel@tonic-gate cwinfo.bitfunc = bitfunc; 6070Sstevel@tonic-gate hat_kpm_walk(cpr_walk_kpm, &cwinfo); 6080Sstevel@tonic-gate 6093446Smrj if (cpr_debug & CPR_DEBUG7) { 6103446Smrj prom_printf("walked %d sub-ranges, total pages %ld\n", 6110Sstevel@tonic-gate cwinfo.ranges, mmu_btop(cwinfo.size)); 6120Sstevel@tonic-gate cpr_show_range(segkpm->s_base, segkpm->s_size, 6130Sstevel@tonic-gate mapflag, bitfunc, cwinfo.pages); 6140Sstevel@tonic-gate } 6150Sstevel@tonic-gate 6160Sstevel@tonic-gate return (cwinfo.pages); 6170Sstevel@tonic-gate } 6180Sstevel@tonic-gate 6190Sstevel@tonic-gate 6200Sstevel@tonic-gate /* 6210Sstevel@tonic-gate * Sparsely filled kernel segments are registered in kseg_table for 6220Sstevel@tonic-gate * easier lookup. See also block comment for cpr_count_seg_pages. 6230Sstevel@tonic-gate */ 6240Sstevel@tonic-gate 6250Sstevel@tonic-gate #define KSEG_SEG_ADDR 0 /* address of struct seg */ 6260Sstevel@tonic-gate #define KSEG_PTR_ADDR 1 /* address of pointer to struct seg */ 6270Sstevel@tonic-gate 6280Sstevel@tonic-gate typedef struct { 6290Sstevel@tonic-gate struct seg **st_seg; /* segment pointer or segment address */ 6300Sstevel@tonic-gate pgcnt_t (*st_fcn)(int, bitfunc_t, struct seg *); /* function to call */ 6310Sstevel@tonic-gate int st_addrtype; /* address type in st_seg */ 6320Sstevel@tonic-gate } ksegtbl_entry_t; 6330Sstevel@tonic-gate 6340Sstevel@tonic-gate ksegtbl_entry_t kseg_table[] = { 6350Sstevel@tonic-gate {(struct seg **)&kvseg, cpr_scan_kvseg, KSEG_SEG_ADDR}, 6360Sstevel@tonic-gate {&segkpm, cpr_scan_segkpm, KSEG_PTR_ADDR}, 6370Sstevel@tonic-gate {NULL, 0, 0} 6380Sstevel@tonic-gate }; 6390Sstevel@tonic-gate 6400Sstevel@tonic-gate 6410Sstevel@tonic-gate /* 6420Sstevel@tonic-gate * Compare seg with each entry in kseg_table; when there is a match 6430Sstevel@tonic-gate * return the entry pointer, otherwise return NULL. 6440Sstevel@tonic-gate */ 6450Sstevel@tonic-gate static ksegtbl_entry_t * 6460Sstevel@tonic-gate cpr_sparse_seg_check(struct seg *seg) 6470Sstevel@tonic-gate { 6480Sstevel@tonic-gate ksegtbl_entry_t *ste = &kseg_table[0]; 6490Sstevel@tonic-gate struct seg *tseg; 6500Sstevel@tonic-gate 6510Sstevel@tonic-gate for (; ste->st_seg; ste++) { 6520Sstevel@tonic-gate tseg = (ste->st_addrtype == KSEG_PTR_ADDR) ? 6535295Srandyf *ste->st_seg : (struct seg *)ste->st_seg; 6545295Srandyf 6550Sstevel@tonic-gate if (seg == tseg) 6560Sstevel@tonic-gate return (ste); 6570Sstevel@tonic-gate } 6580Sstevel@tonic-gate 6590Sstevel@tonic-gate return ((ksegtbl_entry_t *)NULL); 6600Sstevel@tonic-gate } 6610Sstevel@tonic-gate 6620Sstevel@tonic-gate 6630Sstevel@tonic-gate /* 6640Sstevel@tonic-gate * Count pages within each kernel segment; call cpr_sparse_seg_check() 6650Sstevel@tonic-gate * to find out whether a sparsely filled segment needs special 6660Sstevel@tonic-gate * treatment (e.g. kvseg). 6670Sstevel@tonic-gate * Todo: A "SEGOP_CPR" like SEGOP_DUMP should be introduced, the cpr 6680Sstevel@tonic-gate * module shouldn't need to know segment details like if it is 6690Sstevel@tonic-gate * sparsely filled or not (makes kseg_table obsolete). 6700Sstevel@tonic-gate */ 6710Sstevel@tonic-gate pgcnt_t 6720Sstevel@tonic-gate cpr_count_seg_pages(int mapflag, bitfunc_t bitfunc) 6730Sstevel@tonic-gate { 6740Sstevel@tonic-gate struct seg *segp; 6750Sstevel@tonic-gate pgcnt_t pages; 6760Sstevel@tonic-gate ksegtbl_entry_t *ste; 6770Sstevel@tonic-gate 6780Sstevel@tonic-gate pages = 0; 6790Sstevel@tonic-gate for (segp = AS_SEGFIRST(&kas); segp; segp = AS_SEGNEXT(&kas, segp)) { 6800Sstevel@tonic-gate if (ste = cpr_sparse_seg_check(segp)) { 6810Sstevel@tonic-gate pages += (ste->st_fcn)(mapflag, bitfunc, segp); 6820Sstevel@tonic-gate } else { 6830Sstevel@tonic-gate pages += cpr_count_pages(segp->s_base, 6840Sstevel@tonic-gate segp->s_size, mapflag, bitfunc, DBG_SHOWRANGE); 6850Sstevel@tonic-gate } 6860Sstevel@tonic-gate } 6870Sstevel@tonic-gate 6880Sstevel@tonic-gate return (pages); 6890Sstevel@tonic-gate } 6900Sstevel@tonic-gate 6910Sstevel@tonic-gate 6920Sstevel@tonic-gate /* 6930Sstevel@tonic-gate * count kernel pages within kas and any special ranges 6940Sstevel@tonic-gate */ 6950Sstevel@tonic-gate pgcnt_t 6960Sstevel@tonic-gate cpr_count_kpages(int mapflag, bitfunc_t bitfunc) 6970Sstevel@tonic-gate { 6980Sstevel@tonic-gate pgcnt_t kas_cnt; 6990Sstevel@tonic-gate 7000Sstevel@tonic-gate /* 7010Sstevel@tonic-gate * Some pages need to be taken care of differently. 7020Sstevel@tonic-gate * eg: panicbuf pages of sun4m are not in kas but they need 7030Sstevel@tonic-gate * to be saved. On sun4u, the physical pages of panicbuf are 7040Sstevel@tonic-gate * allocated via prom_retain(). 7050Sstevel@tonic-gate */ 7060Sstevel@tonic-gate kas_cnt = i_cpr_count_special_kpages(mapflag, bitfunc); 7070Sstevel@tonic-gate kas_cnt += cpr_count_seg_pages(mapflag, bitfunc); 7080Sstevel@tonic-gate 7093446Smrj CPR_DEBUG(CPR_DEBUG9, "cpr_count_kpages: kas_cnt=%ld\n", kas_cnt); 7103446Smrj CPR_DEBUG(CPR_DEBUG7, "\ncpr_count_kpages: %ld pages, 0x%lx bytes\n", 7115295Srandyf kas_cnt, mmu_ptob(kas_cnt)); 7125295Srandyf 7130Sstevel@tonic-gate return (kas_cnt); 7140Sstevel@tonic-gate } 7150Sstevel@tonic-gate 7160Sstevel@tonic-gate 7170Sstevel@tonic-gate /* 7180Sstevel@tonic-gate * Set a bit corresponding to the arg phys page number; 7190Sstevel@tonic-gate * returns 0 when the ppn is valid and the corresponding 7200Sstevel@tonic-gate * map bit was clear, otherwise returns 1. 7210Sstevel@tonic-gate */ 7220Sstevel@tonic-gate int 7230Sstevel@tonic-gate cpr_setbit(pfn_t ppn, int mapflag) 7240Sstevel@tonic-gate { 7250Sstevel@tonic-gate char *bitmap; 7260Sstevel@tonic-gate cbd_t *dp; 7270Sstevel@tonic-gate pfn_t rel; 7280Sstevel@tonic-gate int clr; 7290Sstevel@tonic-gate 7300Sstevel@tonic-gate for (dp = CPR->c_bmda; dp->cbd_size; dp++) { 7310Sstevel@tonic-gate if (PPN_IN_RANGE(ppn, dp)) { 7320Sstevel@tonic-gate bitmap = DESC_TO_MAP(dp, mapflag); 7330Sstevel@tonic-gate rel = ppn - dp->cbd_spfn; 7340Sstevel@tonic-gate if ((clr = isclr(bitmap, rel)) != 0) 7350Sstevel@tonic-gate setbit(bitmap, rel); 7360Sstevel@tonic-gate return (clr == 0); 7370Sstevel@tonic-gate } 7380Sstevel@tonic-gate } 7390Sstevel@tonic-gate 7400Sstevel@tonic-gate return (1); 7410Sstevel@tonic-gate } 7420Sstevel@tonic-gate 7430Sstevel@tonic-gate 7440Sstevel@tonic-gate /* 7450Sstevel@tonic-gate * Clear a bit corresponding to the arg phys page number. 7460Sstevel@tonic-gate */ 7470Sstevel@tonic-gate int 7480Sstevel@tonic-gate cpr_clrbit(pfn_t ppn, int mapflag) 7490Sstevel@tonic-gate { 7500Sstevel@tonic-gate char *bitmap; 7510Sstevel@tonic-gate cbd_t *dp; 7520Sstevel@tonic-gate pfn_t rel; 7530Sstevel@tonic-gate int set; 7540Sstevel@tonic-gate 7550Sstevel@tonic-gate for (dp = CPR->c_bmda; dp->cbd_size; dp++) { 7560Sstevel@tonic-gate if (PPN_IN_RANGE(ppn, dp)) { 7570Sstevel@tonic-gate bitmap = DESC_TO_MAP(dp, mapflag); 7580Sstevel@tonic-gate rel = ppn - dp->cbd_spfn; 7590Sstevel@tonic-gate if ((set = isset(bitmap, rel)) != 0) 7600Sstevel@tonic-gate clrbit(bitmap, rel); 7610Sstevel@tonic-gate return (set == 0); 7620Sstevel@tonic-gate } 7630Sstevel@tonic-gate } 7640Sstevel@tonic-gate 7650Sstevel@tonic-gate return (1); 7660Sstevel@tonic-gate } 7670Sstevel@tonic-gate 7680Sstevel@tonic-gate 7690Sstevel@tonic-gate /* ARGSUSED */ 7700Sstevel@tonic-gate int 7710Sstevel@tonic-gate cpr_nobit(pfn_t ppn, int mapflag) 7720Sstevel@tonic-gate { 7730Sstevel@tonic-gate return (0); 7740Sstevel@tonic-gate } 7750Sstevel@tonic-gate 7760Sstevel@tonic-gate 7770Sstevel@tonic-gate /* 7780Sstevel@tonic-gate * Lookup a bit corresponding to the arg phys page number. 7790Sstevel@tonic-gate */ 7800Sstevel@tonic-gate int 7810Sstevel@tonic-gate cpr_isset(pfn_t ppn, int mapflag) 7820Sstevel@tonic-gate { 7830Sstevel@tonic-gate char *bitmap; 7840Sstevel@tonic-gate cbd_t *dp; 7850Sstevel@tonic-gate pfn_t rel; 7860Sstevel@tonic-gate 7870Sstevel@tonic-gate for (dp = CPR->c_bmda; dp->cbd_size; dp++) { 7880Sstevel@tonic-gate if (PPN_IN_RANGE(ppn, dp)) { 7890Sstevel@tonic-gate bitmap = DESC_TO_MAP(dp, mapflag); 7900Sstevel@tonic-gate rel = ppn - dp->cbd_spfn; 7910Sstevel@tonic-gate return (isset(bitmap, rel)); 7920Sstevel@tonic-gate } 7930Sstevel@tonic-gate } 7940Sstevel@tonic-gate 7950Sstevel@tonic-gate return (0); 7960Sstevel@tonic-gate } 7970Sstevel@tonic-gate 7980Sstevel@tonic-gate 7990Sstevel@tonic-gate /* 8000Sstevel@tonic-gate * Go thru all pages and pick up any page not caught during the invalidation 8010Sstevel@tonic-gate * stage. This is also used to save pages with cow lock or phys page lock held 8020Sstevel@tonic-gate * (none zero p_lckcnt or p_cowcnt) 8030Sstevel@tonic-gate */ 8040Sstevel@tonic-gate static int 8050Sstevel@tonic-gate cpr_count_upages(int mapflag, bitfunc_t bitfunc) 8060Sstevel@tonic-gate { 8070Sstevel@tonic-gate page_t *pp, *page0; 8080Sstevel@tonic-gate pgcnt_t dcnt = 0, tcnt = 0; 8090Sstevel@tonic-gate pfn_t pfn; 8100Sstevel@tonic-gate 8110Sstevel@tonic-gate page0 = pp = page_first(); 8120Sstevel@tonic-gate 8130Sstevel@tonic-gate do { 8140Sstevel@tonic-gate #if defined(__sparc) 8150Sstevel@tonic-gate extern struct vnode prom_ppages; 8163290Sjohansen if (pp->p_vnode == NULL || PP_ISKAS(pp) || 8170Sstevel@tonic-gate pp->p_vnode == &prom_ppages || 8185295Srandyf PP_ISFREE(pp) && PP_ISAGED(pp)) 8190Sstevel@tonic-gate #else 8203290Sjohansen if (pp->p_vnode == NULL || PP_ISKAS(pp) || 8210Sstevel@tonic-gate PP_ISFREE(pp) && PP_ISAGED(pp)) 8220Sstevel@tonic-gate #endif /* __sparc */ 8230Sstevel@tonic-gate continue; 8240Sstevel@tonic-gate 8250Sstevel@tonic-gate pfn = page_pptonum(pp); 8260Sstevel@tonic-gate if (pf_is_memory(pfn)) { 8270Sstevel@tonic-gate tcnt++; 8280Sstevel@tonic-gate if ((*bitfunc)(pfn, mapflag) == 0) 8290Sstevel@tonic-gate dcnt++; /* dirty count */ 8300Sstevel@tonic-gate } 8310Sstevel@tonic-gate } while ((pp = page_next(pp)) != page0); 8320Sstevel@tonic-gate 8330Sstevel@tonic-gate STAT->cs_upage2statef = dcnt; 8343446Smrj CPR_DEBUG(CPR_DEBUG9, "cpr_count_upages: dirty=%ld total=%ld\n", 8355295Srandyf dcnt, tcnt); 8363446Smrj CPR_DEBUG(CPR_DEBUG7, "cpr_count_upages: %ld pages, 0x%lx bytes\n", 8375295Srandyf dcnt, mmu_ptob(dcnt)); 8385295Srandyf 8390Sstevel@tonic-gate return (dcnt); 8400Sstevel@tonic-gate } 8410Sstevel@tonic-gate 8420Sstevel@tonic-gate 8430Sstevel@tonic-gate /* 8440Sstevel@tonic-gate * try compressing pages based on cflag, 8450Sstevel@tonic-gate * and for DEBUG kernels, verify uncompressed data checksum; 8460Sstevel@tonic-gate * 8470Sstevel@tonic-gate * this routine replaces common code from 8480Sstevel@tonic-gate * i_cpr_compress_and_save() and cpr_compress_and_write() 8490Sstevel@tonic-gate */ 8500Sstevel@tonic-gate char * 8510Sstevel@tonic-gate cpr_compress_pages(cpd_t *dp, pgcnt_t pages, int cflag) 8520Sstevel@tonic-gate { 8530Sstevel@tonic-gate size_t nbytes, clen, len; 8540Sstevel@tonic-gate uint32_t test_sum; 8550Sstevel@tonic-gate char *datap; 8560Sstevel@tonic-gate 8570Sstevel@tonic-gate nbytes = mmu_ptob(pages); 8580Sstevel@tonic-gate 8590Sstevel@tonic-gate /* 8600Sstevel@tonic-gate * set length to the original uncompressed data size; 8610Sstevel@tonic-gate * always init cpd_flag to zero 8620Sstevel@tonic-gate */ 8630Sstevel@tonic-gate dp->cpd_length = nbytes; 8640Sstevel@tonic-gate dp->cpd_flag = 0; 8650Sstevel@tonic-gate 8660Sstevel@tonic-gate #ifdef DEBUG 8670Sstevel@tonic-gate /* 8680Sstevel@tonic-gate * Make a copy of the uncompressed data so we can checksum it. 8690Sstevel@tonic-gate * Compress that copy so the checksum works at the other end 8700Sstevel@tonic-gate */ 8710Sstevel@tonic-gate cprbcopy(CPR->c_mapping_area, cpr_pagecopy, nbytes); 8720Sstevel@tonic-gate dp->cpd_usum = checksum32(cpr_pagecopy, nbytes); 8730Sstevel@tonic-gate dp->cpd_flag |= CPD_USUM; 8740Sstevel@tonic-gate datap = cpr_pagecopy; 8750Sstevel@tonic-gate #else 8760Sstevel@tonic-gate datap = CPR->c_mapping_area; 8770Sstevel@tonic-gate dp->cpd_usum = 0; 8780Sstevel@tonic-gate #endif 8790Sstevel@tonic-gate 8800Sstevel@tonic-gate /* 8810Sstevel@tonic-gate * try compressing the raw data to cpr_pagedata; 8820Sstevel@tonic-gate * if there was a size reduction: record the new length, 8830Sstevel@tonic-gate * flag the compression, and point to the compressed data. 8840Sstevel@tonic-gate */ 8850Sstevel@tonic-gate dp->cpd_csum = 0; 8860Sstevel@tonic-gate if (cflag) { 8870Sstevel@tonic-gate clen = compress(datap, cpr_pagedata, nbytes); 8880Sstevel@tonic-gate if (clen < nbytes) { 8890Sstevel@tonic-gate dp->cpd_flag |= CPD_COMPRESS; 8900Sstevel@tonic-gate dp->cpd_length = clen; 8910Sstevel@tonic-gate datap = cpr_pagedata; 8920Sstevel@tonic-gate #ifdef DEBUG 8930Sstevel@tonic-gate dp->cpd_csum = checksum32(datap, clen); 8940Sstevel@tonic-gate dp->cpd_flag |= CPD_CSUM; 8950Sstevel@tonic-gate 8960Sstevel@tonic-gate /* 8970Sstevel@tonic-gate * decompress the data back to a scratch area 8980Sstevel@tonic-gate * and compare the new checksum with the original 8990Sstevel@tonic-gate * checksum to verify the compression. 9000Sstevel@tonic-gate */ 9010Sstevel@tonic-gate bzero(cpr_pagecopy, sizeof (cpr_pagecopy)); 9020Sstevel@tonic-gate len = decompress(datap, cpr_pagecopy, 9030Sstevel@tonic-gate clen, sizeof (cpr_pagecopy)); 9040Sstevel@tonic-gate test_sum = checksum32(cpr_pagecopy, len); 9050Sstevel@tonic-gate ASSERT(test_sum == dp->cpd_usum); 9060Sstevel@tonic-gate #endif 9070Sstevel@tonic-gate } 9080Sstevel@tonic-gate } 9090Sstevel@tonic-gate 9100Sstevel@tonic-gate return (datap); 9110Sstevel@tonic-gate } 9120Sstevel@tonic-gate 9130Sstevel@tonic-gate 9140Sstevel@tonic-gate /* 9150Sstevel@tonic-gate * 1. Prepare cpr page descriptor and write it to file 9160Sstevel@tonic-gate * 2. Compress page data and write it out 9170Sstevel@tonic-gate */ 9180Sstevel@tonic-gate static int 9190Sstevel@tonic-gate cpr_compress_and_write(vnode_t *vp, uint_t va, pfn_t pfn, pgcnt_t npg) 9200Sstevel@tonic-gate { 9210Sstevel@tonic-gate int error = 0; 9220Sstevel@tonic-gate char *datap; 9230Sstevel@tonic-gate cpd_t cpd; /* cpr page descriptor */ 9240Sstevel@tonic-gate extern void i_cpr_mapin(caddr_t, uint_t, pfn_t); 9250Sstevel@tonic-gate extern void i_cpr_mapout(caddr_t, uint_t); 9260Sstevel@tonic-gate 9270Sstevel@tonic-gate i_cpr_mapin(CPR->c_mapping_area, npg, pfn); 9280Sstevel@tonic-gate 9293446Smrj CPR_DEBUG(CPR_DEBUG3, "mapped-in %ld pages, vaddr 0x%p, pfn 0x%lx\n", 9305295Srandyf npg, CPR->c_mapping_area, pfn); 9310Sstevel@tonic-gate 9320Sstevel@tonic-gate /* 9330Sstevel@tonic-gate * Fill cpr page descriptor. 9340Sstevel@tonic-gate */ 9350Sstevel@tonic-gate cpd.cpd_magic = (uint_t)CPR_PAGE_MAGIC; 9360Sstevel@tonic-gate cpd.cpd_pfn = pfn; 9370Sstevel@tonic-gate cpd.cpd_pages = npg; 9380Sstevel@tonic-gate 9390Sstevel@tonic-gate STAT->cs_dumped_statefsz += mmu_ptob(npg); 9400Sstevel@tonic-gate 9410Sstevel@tonic-gate datap = cpr_compress_pages(&cpd, npg, CPR->c_flags & C_COMPRESSING); 9420Sstevel@tonic-gate 9430Sstevel@tonic-gate /* Write cpr page descriptor */ 9440Sstevel@tonic-gate error = cpr_write(vp, (caddr_t)&cpd, sizeof (cpd_t)); 9450Sstevel@tonic-gate 9460Sstevel@tonic-gate /* Write compressed page data */ 9470Sstevel@tonic-gate error = cpr_write(vp, (caddr_t)datap, cpd.cpd_length); 9480Sstevel@tonic-gate 9490Sstevel@tonic-gate /* 9500Sstevel@tonic-gate * Unmap the pages for tlb and vac flushing 9510Sstevel@tonic-gate */ 9520Sstevel@tonic-gate i_cpr_mapout(CPR->c_mapping_area, npg); 9530Sstevel@tonic-gate 9540Sstevel@tonic-gate if (error) { 9553446Smrj CPR_DEBUG(CPR_DEBUG1, 9563446Smrj "cpr_compress_and_write: vp 0x%p va 0x%x ", vp, va); 9573446Smrj CPR_DEBUG(CPR_DEBUG1, "pfn 0x%lx blk %d err %d\n", 9583446Smrj pfn, cpr_file_bn, error); 9590Sstevel@tonic-gate } else { 9600Sstevel@tonic-gate cpr_regular_pgs_dumped += npg; 9610Sstevel@tonic-gate } 9620Sstevel@tonic-gate 9630Sstevel@tonic-gate return (error); 9640Sstevel@tonic-gate } 9650Sstevel@tonic-gate 9660Sstevel@tonic-gate 9670Sstevel@tonic-gate int 9680Sstevel@tonic-gate cpr_write(vnode_t *vp, caddr_t buffer, size_t size) 9690Sstevel@tonic-gate { 9700Sstevel@tonic-gate caddr_t fromp = buffer; 9710Sstevel@tonic-gate size_t bytes, wbytes; 9720Sstevel@tonic-gate int error; 9730Sstevel@tonic-gate 9740Sstevel@tonic-gate if (cpr_dev_space == 0) { 9750Sstevel@tonic-gate if (vp->v_type == VBLK) { 9760Sstevel@tonic-gate cpr_dev_space = cpr_get_devsize(vp->v_rdev); 9770Sstevel@tonic-gate ASSERT(cpr_dev_space); 9780Sstevel@tonic-gate } else 9790Sstevel@tonic-gate cpr_dev_space = 1; /* not used in this case */ 9800Sstevel@tonic-gate } 9810Sstevel@tonic-gate 9820Sstevel@tonic-gate /* 9830Sstevel@tonic-gate * break the write into multiple part if request is large, 9840Sstevel@tonic-gate * calculate count up to buf page boundary, then write it out. 9850Sstevel@tonic-gate * repeat until done. 9860Sstevel@tonic-gate */ 9870Sstevel@tonic-gate while (size) { 9880Sstevel@tonic-gate bytes = MIN(size, cpr_buf_end - cpr_wptr); 9890Sstevel@tonic-gate cprbcopy(fromp, cpr_wptr, bytes); 9900Sstevel@tonic-gate cpr_wptr += bytes; 9910Sstevel@tonic-gate fromp += bytes; 9920Sstevel@tonic-gate size -= bytes; 9930Sstevel@tonic-gate if (cpr_wptr < cpr_buf_end) 9940Sstevel@tonic-gate return (0); /* buffer not full yet */ 9950Sstevel@tonic-gate ASSERT(cpr_wptr == cpr_buf_end); 9960Sstevel@tonic-gate 9970Sstevel@tonic-gate wbytes = dbtob(cpr_file_bn + cpr_buf_blocks); 9980Sstevel@tonic-gate if (vp->v_type == VBLK) { 9990Sstevel@tonic-gate if (wbytes > cpr_dev_space) 10000Sstevel@tonic-gate return (ENOSPC); 10010Sstevel@tonic-gate } else { 10020Sstevel@tonic-gate if (wbytes > VTOI(vp)->i_size) 10030Sstevel@tonic-gate return (ENOSPC); 10040Sstevel@tonic-gate } 10050Sstevel@tonic-gate 10063446Smrj CPR_DEBUG(CPR_DEBUG3, 10073446Smrj "cpr_write: frmp=%p wptr=%p cnt=%lx...", 10083446Smrj fromp, cpr_wptr, bytes); 10090Sstevel@tonic-gate /* 10100Sstevel@tonic-gate * cross check, this should not happen! 10110Sstevel@tonic-gate */ 10120Sstevel@tonic-gate if (cpr_disk_writes_ok == 0) { 10133446Smrj prom_printf("cpr_write: disk write too early!\n"); 10140Sstevel@tonic-gate return (EINVAL); 10150Sstevel@tonic-gate } 10160Sstevel@tonic-gate 10170Sstevel@tonic-gate do_polled_io = 1; 1018*5331Samw error = VOP_DUMP(vp, cpr_buf, cpr_file_bn, cpr_buf_blocks, 1019*5331Samw NULL); 10200Sstevel@tonic-gate do_polled_io = 0; 10213446Smrj CPR_DEBUG(CPR_DEBUG3, "done\n"); 10220Sstevel@tonic-gate 10230Sstevel@tonic-gate STAT->cs_real_statefsz += cpr_buf_size; 10240Sstevel@tonic-gate 10250Sstevel@tonic-gate if (error) { 10260Sstevel@tonic-gate cpr_err(CE_WARN, "cpr_write error %d", error); 10270Sstevel@tonic-gate return (error); 10280Sstevel@tonic-gate } 10290Sstevel@tonic-gate cpr_file_bn += cpr_buf_blocks; /* Increment block count */ 10300Sstevel@tonic-gate cpr_wptr = cpr_buf; /* back to top of buffer */ 10310Sstevel@tonic-gate } 10320Sstevel@tonic-gate return (0); 10330Sstevel@tonic-gate } 10340Sstevel@tonic-gate 10350Sstevel@tonic-gate 10360Sstevel@tonic-gate int 10370Sstevel@tonic-gate cpr_flush_write(vnode_t *vp) 10380Sstevel@tonic-gate { 10390Sstevel@tonic-gate int nblk; 10400Sstevel@tonic-gate int error; 10410Sstevel@tonic-gate 10420Sstevel@tonic-gate /* 10430Sstevel@tonic-gate * Calculate remaining blocks in buffer, rounded up to nearest 10440Sstevel@tonic-gate * disk block 10450Sstevel@tonic-gate */ 10460Sstevel@tonic-gate nblk = btod(cpr_wptr - cpr_buf); 10470Sstevel@tonic-gate 10480Sstevel@tonic-gate do_polled_io = 1; 1049*5331Samw error = VOP_DUMP(vp, (caddr_t)cpr_buf, cpr_file_bn, nblk, NULL); 10500Sstevel@tonic-gate do_polled_io = 0; 10510Sstevel@tonic-gate 10520Sstevel@tonic-gate cpr_file_bn += nblk; 10530Sstevel@tonic-gate if (error) 10543446Smrj CPR_DEBUG(CPR_DEBUG2, "cpr_flush_write: error (%d)\n", 10553446Smrj error); 10560Sstevel@tonic-gate return (error); 10570Sstevel@tonic-gate } 10580Sstevel@tonic-gate 10590Sstevel@tonic-gate void 10600Sstevel@tonic-gate cpr_clear_bitmaps(void) 10610Sstevel@tonic-gate { 10620Sstevel@tonic-gate cbd_t *dp; 10630Sstevel@tonic-gate 10640Sstevel@tonic-gate for (dp = CPR->c_bmda; dp->cbd_size; dp++) { 10650Sstevel@tonic-gate bzero((void *)dp->cbd_reg_bitmap, 10660Sstevel@tonic-gate (size_t)dp->cbd_size * 2); 10670Sstevel@tonic-gate } 10683446Smrj CPR_DEBUG(CPR_DEBUG7, "\ncleared reg and vlt bitmaps\n"); 10690Sstevel@tonic-gate } 10700Sstevel@tonic-gate 10710Sstevel@tonic-gate int 10720Sstevel@tonic-gate cpr_contig_pages(vnode_t *vp, int flag) 10730Sstevel@tonic-gate { 10740Sstevel@tonic-gate int chunks = 0, error = 0; 10750Sstevel@tonic-gate pgcnt_t i, j, totbit; 10760Sstevel@tonic-gate pfn_t spfn; 10770Sstevel@tonic-gate cbd_t *dp; 10780Sstevel@tonic-gate uint_t spin_cnt = 0; 10790Sstevel@tonic-gate extern int i_cpr_compress_and_save(); 10800Sstevel@tonic-gate 10810Sstevel@tonic-gate for (dp = CPR->c_bmda; dp->cbd_size; dp++) { 10820Sstevel@tonic-gate spfn = dp->cbd_spfn; 10830Sstevel@tonic-gate totbit = BTOb(dp->cbd_size); 10840Sstevel@tonic-gate i = 0; /* Beginning of bitmap */ 10850Sstevel@tonic-gate j = 0; 10860Sstevel@tonic-gate while (i < totbit) { 10870Sstevel@tonic-gate while ((j < CPR_MAXCONTIG) && ((j + i) < totbit)) { 10880Sstevel@tonic-gate if (isset((char *)dp->cbd_reg_bitmap, j+i)) 10890Sstevel@tonic-gate j++; 10900Sstevel@tonic-gate else /* not contiguous anymore */ 10910Sstevel@tonic-gate break; 10920Sstevel@tonic-gate } 10930Sstevel@tonic-gate 10940Sstevel@tonic-gate if (j) { 10950Sstevel@tonic-gate chunks++; 10960Sstevel@tonic-gate if (flag == SAVE_TO_STORAGE) { 10970Sstevel@tonic-gate error = i_cpr_compress_and_save( 10980Sstevel@tonic-gate chunks, spfn + i, j); 10990Sstevel@tonic-gate if (error) 11000Sstevel@tonic-gate return (error); 11010Sstevel@tonic-gate } else if (flag == WRITE_TO_STATEFILE) { 11020Sstevel@tonic-gate error = cpr_compress_and_write(vp, 0, 11030Sstevel@tonic-gate spfn + i, j); 11040Sstevel@tonic-gate if (error) 11050Sstevel@tonic-gate return (error); 11060Sstevel@tonic-gate else { 11070Sstevel@tonic-gate spin_cnt++; 11080Sstevel@tonic-gate if ((spin_cnt & 0x5F) == 1) 11090Sstevel@tonic-gate cpr_spinning_bar(); 11100Sstevel@tonic-gate } 11110Sstevel@tonic-gate } 11120Sstevel@tonic-gate } 11130Sstevel@tonic-gate 11140Sstevel@tonic-gate i += j; 11150Sstevel@tonic-gate if (j != CPR_MAXCONTIG) { 11160Sstevel@tonic-gate /* Stopped on a non-tagged page */ 11170Sstevel@tonic-gate i++; 11180Sstevel@tonic-gate } 11190Sstevel@tonic-gate 11200Sstevel@tonic-gate j = 0; 11210Sstevel@tonic-gate } 11220Sstevel@tonic-gate } 11230Sstevel@tonic-gate 11240Sstevel@tonic-gate if (flag == STORAGE_DESC_ALLOC) 11250Sstevel@tonic-gate return (chunks); 11260Sstevel@tonic-gate else 11270Sstevel@tonic-gate return (0); 11280Sstevel@tonic-gate } 11290Sstevel@tonic-gate 11300Sstevel@tonic-gate 11310Sstevel@tonic-gate void 11320Sstevel@tonic-gate cpr_show_range(caddr_t vaddr, size_t size, 11330Sstevel@tonic-gate int mapflag, bitfunc_t bitfunc, pgcnt_t count) 11340Sstevel@tonic-gate { 11350Sstevel@tonic-gate char *action, *bname; 11360Sstevel@tonic-gate 11370Sstevel@tonic-gate bname = (mapflag == REGULAR_BITMAP) ? "regular" : "volatile"; 11380Sstevel@tonic-gate if (bitfunc == cpr_setbit) 11390Sstevel@tonic-gate action = "tag"; 11400Sstevel@tonic-gate else if (bitfunc == cpr_clrbit) 11410Sstevel@tonic-gate action = "untag"; 11420Sstevel@tonic-gate else 11430Sstevel@tonic-gate action = "none"; 11443446Smrj prom_printf("range (0x%p, 0x%p), %s bitmap, %s %ld\n", 11450Sstevel@tonic-gate vaddr, vaddr + size, bname, action, count); 11460Sstevel@tonic-gate } 11470Sstevel@tonic-gate 11480Sstevel@tonic-gate 11490Sstevel@tonic-gate pgcnt_t 11500Sstevel@tonic-gate cpr_count_pages(caddr_t sva, size_t size, 11510Sstevel@tonic-gate int mapflag, bitfunc_t bitfunc, int showrange) 11520Sstevel@tonic-gate { 11530Sstevel@tonic-gate caddr_t va, eva; 11540Sstevel@tonic-gate pfn_t pfn; 11550Sstevel@tonic-gate pgcnt_t count = 0; 11560Sstevel@tonic-gate 11570Sstevel@tonic-gate eva = sva + PAGE_ROUNDUP(size); 11580Sstevel@tonic-gate for (va = sva; va < eva; va += MMU_PAGESIZE) { 11590Sstevel@tonic-gate pfn = va_to_pfn(va); 11600Sstevel@tonic-gate if (pfn != PFN_INVALID && pf_is_memory(pfn)) { 11610Sstevel@tonic-gate if ((*bitfunc)(pfn, mapflag) == 0) 11620Sstevel@tonic-gate count++; 11630Sstevel@tonic-gate } 11640Sstevel@tonic-gate } 11650Sstevel@tonic-gate 11663446Smrj if ((cpr_debug & CPR_DEBUG7) && showrange == DBG_SHOWRANGE) 11670Sstevel@tonic-gate cpr_show_range(sva, size, mapflag, bitfunc, count); 11680Sstevel@tonic-gate 11690Sstevel@tonic-gate return (count); 11700Sstevel@tonic-gate } 11710Sstevel@tonic-gate 11720Sstevel@tonic-gate 11730Sstevel@tonic-gate pgcnt_t 11740Sstevel@tonic-gate cpr_count_volatile_pages(int mapflag, bitfunc_t bitfunc) 11750Sstevel@tonic-gate { 11760Sstevel@tonic-gate pgcnt_t count = 0; 11770Sstevel@tonic-gate 11780Sstevel@tonic-gate if (cpr_buf) { 11790Sstevel@tonic-gate count += cpr_count_pages(cpr_buf, cpr_buf_size, 11800Sstevel@tonic-gate mapflag, bitfunc, DBG_SHOWRANGE); 11810Sstevel@tonic-gate } 11820Sstevel@tonic-gate if (cpr_pagedata) { 11830Sstevel@tonic-gate count += cpr_count_pages(cpr_pagedata, cpr_pagedata_size, 11840Sstevel@tonic-gate mapflag, bitfunc, DBG_SHOWRANGE); 11850Sstevel@tonic-gate } 11860Sstevel@tonic-gate count += i_cpr_count_storage_pages(mapflag, bitfunc); 11870Sstevel@tonic-gate 11883446Smrj CPR_DEBUG(CPR_DEBUG7, "cpr_count_vpages: %ld pages, 0x%lx bytes\n", 11893446Smrj count, mmu_ptob(count)); 11900Sstevel@tonic-gate return (count); 11910Sstevel@tonic-gate } 11920Sstevel@tonic-gate 11930Sstevel@tonic-gate 11940Sstevel@tonic-gate static int 11950Sstevel@tonic-gate cpr_dump_regular_pages(vnode_t *vp) 11960Sstevel@tonic-gate { 11970Sstevel@tonic-gate int error; 11980Sstevel@tonic-gate 11990Sstevel@tonic-gate cpr_regular_pgs_dumped = 0; 12000Sstevel@tonic-gate error = cpr_contig_pages(vp, WRITE_TO_STATEFILE); 12010Sstevel@tonic-gate if (!error) 12023446Smrj CPR_DEBUG(CPR_DEBUG7, "cpr_dump_regular_pages() done.\n"); 12030Sstevel@tonic-gate return (error); 12040Sstevel@tonic-gate } 12055295Srandyf #endif 1206