10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51772Sjl139090 * Common Development and Distribution License (the "License"). 61772Sjl139090 * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 223441Sjesusm * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate /* 290Sstevel@tonic-gate * Platform specific implementation code 300Sstevel@tonic-gate */ 310Sstevel@tonic-gate 320Sstevel@tonic-gate #define SUNDDI_IMPL 330Sstevel@tonic-gate 340Sstevel@tonic-gate #include <sys/types.h> 350Sstevel@tonic-gate #include <sys/promif.h> 360Sstevel@tonic-gate #include <sys/prom_isa.h> 370Sstevel@tonic-gate #include <sys/prom_plat.h> 380Sstevel@tonic-gate #include <sys/mmu.h> 390Sstevel@tonic-gate #include <vm/hat_sfmmu.h> 400Sstevel@tonic-gate #include <sys/iommu.h> 410Sstevel@tonic-gate #include <sys/scb.h> 420Sstevel@tonic-gate #include <sys/cpuvar.h> 430Sstevel@tonic-gate #include <sys/intreg.h> 440Sstevel@tonic-gate #include <sys/pte.h> 450Sstevel@tonic-gate #include <vm/hat.h> 460Sstevel@tonic-gate #include <vm/page.h> 470Sstevel@tonic-gate #include <vm/as.h> 480Sstevel@tonic-gate #include <sys/cpr.h> 490Sstevel@tonic-gate #include <sys/kmem.h> 500Sstevel@tonic-gate #include <sys/clock.h> 510Sstevel@tonic-gate #include <sys/kmem.h> 520Sstevel@tonic-gate #include <sys/panic.h> 530Sstevel@tonic-gate #include <vm/seg_kmem.h> 540Sstevel@tonic-gate #include <sys/cpu_module.h> 550Sstevel@tonic-gate #include <sys/callb.h> 560Sstevel@tonic-gate #include <sys/machsystm.h> 570Sstevel@tonic-gate #include <sys/vmsystm.h> 580Sstevel@tonic-gate #include <sys/systm.h> 590Sstevel@tonic-gate #include <sys/archsystm.h> 600Sstevel@tonic-gate #include <sys/stack.h> 610Sstevel@tonic-gate #include <sys/fs/ufs_fs.h> 620Sstevel@tonic-gate #include <sys/memlist.h> 630Sstevel@tonic-gate #include <sys/bootconf.h> 640Sstevel@tonic-gate #include <sys/thread.h> 655Seg155566 #include <vm/vm_dep.h> 660Sstevel@tonic-gate 670Sstevel@tonic-gate extern void cpr_clear_bitmaps(void); 685295Srandyf extern int cpr_setbit(pfn_t ppn, int mapflag); 695295Srandyf extern int cpr_clrbit(pfn_t ppn, int mapflag); 705295Srandyf extern pgcnt_t cpr_scan_kvseg(int mapflag, bitfunc_t bitfunc, struct seg *seg); 715295Srandyf extern pgcnt_t cpr_count_seg_pages(int mapflag, bitfunc_t bitfunc); 720Sstevel@tonic-gate extern void dtlb_wr_entry(uint_t, tte_t *, uint64_t *); 730Sstevel@tonic-gate extern void itlb_wr_entry(uint_t, tte_t *, uint64_t *); 740Sstevel@tonic-gate 750Sstevel@tonic-gate static int i_cpr_storage_desc_alloc(csd_t **, pgcnt_t *, csd_t **, int); 760Sstevel@tonic-gate static void i_cpr_storage_desc_init(csd_t *, pgcnt_t, csd_t *); 770Sstevel@tonic-gate static caddr_t i_cpr_storage_data_alloc(pgcnt_t, pgcnt_t *, int); 780Sstevel@tonic-gate static int cpr_dump_sensitive(vnode_t *, csd_t *); 790Sstevel@tonic-gate static void i_cpr_clear_entries(uint64_t, uint64_t); 800Sstevel@tonic-gate static void i_cpr_xcall(xcfunc_t); 810Sstevel@tonic-gate 820Sstevel@tonic-gate void i_cpr_storage_free(void); 830Sstevel@tonic-gate 840Sstevel@tonic-gate extern void *i_cpr_data_page; 850Sstevel@tonic-gate extern int cpr_test_mode; 860Sstevel@tonic-gate extern int cpr_nbitmaps; 870Sstevel@tonic-gate extern char cpr_default_path[]; 880Sstevel@tonic-gate extern caddr_t textva, datava; 890Sstevel@tonic-gate 900Sstevel@tonic-gate static struct cpr_map_info cpr_prom_retain[CPR_PROM_RETAIN_CNT]; 910Sstevel@tonic-gate caddr_t cpr_vaddr = NULL; 920Sstevel@tonic-gate 930Sstevel@tonic-gate static uint_t sensitive_pages_saved; 940Sstevel@tonic-gate static uint_t sensitive_size_saved; 950Sstevel@tonic-gate 960Sstevel@tonic-gate caddr_t i_cpr_storage_data_base; 970Sstevel@tonic-gate caddr_t i_cpr_storage_data_end; 980Sstevel@tonic-gate csd_t *i_cpr_storage_desc_base; 990Sstevel@tonic-gate csd_t *i_cpr_storage_desc_end; /* one byte beyond last used descp */ 1000Sstevel@tonic-gate csd_t *i_cpr_storage_desc_last_used; /* last used descriptor */ 1010Sstevel@tonic-gate caddr_t sensitive_write_ptr; /* position for next storage write */ 1020Sstevel@tonic-gate 1030Sstevel@tonic-gate size_t i_cpr_sensitive_bytes_dumped; 1040Sstevel@tonic-gate pgcnt_t i_cpr_sensitive_pgs_dumped; 1050Sstevel@tonic-gate pgcnt_t i_cpr_storage_data_sz; /* in pages */ 1060Sstevel@tonic-gate pgcnt_t i_cpr_storage_desc_pgcnt; /* in pages */ 1070Sstevel@tonic-gate 1080Sstevel@tonic-gate ushort_t cpr_mach_type = CPR_MACHTYPE_4U; 1090Sstevel@tonic-gate static csu_md_t m_info; 1100Sstevel@tonic-gate 1110Sstevel@tonic-gate 1120Sstevel@tonic-gate #define MAX_STORAGE_RETRY 3 1130Sstevel@tonic-gate #define MAX_STORAGE_ALLOC_RETRY 3 1140Sstevel@tonic-gate #define INITIAL_ALLOC_PCNT 40 /* starting allocation percentage */ 1150Sstevel@tonic-gate #define INTEGRAL 100 /* to get 1% precision */ 1160Sstevel@tonic-gate 1170Sstevel@tonic-gate #define EXTRA_RATE 2 /* add EXTRA_RATE% extra space */ 1180Sstevel@tonic-gate #define EXTRA_DESCS 10 1190Sstevel@tonic-gate 1200Sstevel@tonic-gate #define CPR_NO_STORAGE_DESC 1 1210Sstevel@tonic-gate #define CPR_NO_STORAGE_DATA 2 1220Sstevel@tonic-gate 1230Sstevel@tonic-gate #define CIF_SPLICE 0 1240Sstevel@tonic-gate #define CIF_UNLINK 1 1250Sstevel@tonic-gate 1260Sstevel@tonic-gate 1270Sstevel@tonic-gate /* 1280Sstevel@tonic-gate * CPR miscellaneous support routines 1290Sstevel@tonic-gate */ 1300Sstevel@tonic-gate #define cpr_open(path, mode, vpp) (vn_open(path, UIO_SYSSPACE, \ 1310Sstevel@tonic-gate mode, 0600, vpp, CRCREAT, 0)) 1320Sstevel@tonic-gate #define cpr_rdwr(rw, vp, basep, cnt) (vn_rdwr(rw, vp, (caddr_t)(basep), \ 1330Sstevel@tonic-gate cnt, 0LL, UIO_SYSSPACE, 0, (rlim64_t)MAXOFF_T, CRED(), \ 1340Sstevel@tonic-gate (ssize_t *)NULL)) 1350Sstevel@tonic-gate 1360Sstevel@tonic-gate /* 1370Sstevel@tonic-gate * definitions for saving/restoring prom pages 1380Sstevel@tonic-gate */ 1390Sstevel@tonic-gate static void *ppage_buf; 1400Sstevel@tonic-gate static pgcnt_t ppage_count; 1410Sstevel@tonic-gate static pfn_t *pphys_list; 1420Sstevel@tonic-gate static size_t pphys_list_size; 1430Sstevel@tonic-gate 1440Sstevel@tonic-gate typedef void (*tlb_rw_t)(uint_t, tte_t *, uint64_t *); 1450Sstevel@tonic-gate typedef void (*tlb_filter_t)(int, tte_t *, uint64_t, void *); 1460Sstevel@tonic-gate 1470Sstevel@tonic-gate /* 1480Sstevel@tonic-gate * private struct for tlb handling 1490Sstevel@tonic-gate */ 1500Sstevel@tonic-gate struct cpr_trans_info { 1510Sstevel@tonic-gate sutlb_t *dst; 1520Sstevel@tonic-gate sutlb_t *tail; 1530Sstevel@tonic-gate tlb_rw_t reader; 1540Sstevel@tonic-gate tlb_rw_t writer; 1550Sstevel@tonic-gate tlb_filter_t filter; 1560Sstevel@tonic-gate int index; 1570Sstevel@tonic-gate uint64_t skip; /* assumes TLB <= 64 locked entries */ 1580Sstevel@tonic-gate }; 1590Sstevel@tonic-gate typedef struct cpr_trans_info cti_t; 1600Sstevel@tonic-gate 1610Sstevel@tonic-gate 1620Sstevel@tonic-gate /* 1630Sstevel@tonic-gate * special handling for tlb info 1640Sstevel@tonic-gate */ 1650Sstevel@tonic-gate #define WITHIN_OFW(va) \ 1660Sstevel@tonic-gate (((va) > (uint64_t)OFW_START_ADDR) && ((va) < (uint64_t)OFW_END_ADDR)) 1670Sstevel@tonic-gate 1680Sstevel@tonic-gate #define WITHIN_NUCLEUS(va, base) \ 1690Sstevel@tonic-gate (((va) >= (base)) && \ 1700Sstevel@tonic-gate (((va) + MMU_PAGESIZE) <= ((base) + MMU_PAGESIZE4M))) 1710Sstevel@tonic-gate 1720Sstevel@tonic-gate #define IS_BIGKTSB(va) \ 1730Sstevel@tonic-gate (enable_bigktsb && \ 1740Sstevel@tonic-gate ((va) >= (uint64_t)ktsb_base) && \ 1750Sstevel@tonic-gate ((va) < (uint64_t)(ktsb_base + ktsb_sz))) 1760Sstevel@tonic-gate 1770Sstevel@tonic-gate 1780Sstevel@tonic-gate /* 1790Sstevel@tonic-gate * WARNING: 1800Sstevel@tonic-gate * the text from this file is linked to follow cpr_resume_setup.o; 1810Sstevel@tonic-gate * only add text between here and i_cpr_end_jumpback when it needs 1820Sstevel@tonic-gate * to be called during resume before we switch back to the kernel 1830Sstevel@tonic-gate * trap table. all the text in this range must fit within a page. 1840Sstevel@tonic-gate */ 1850Sstevel@tonic-gate 1860Sstevel@tonic-gate 1870Sstevel@tonic-gate /* 1880Sstevel@tonic-gate * each time a machine is reset, the prom uses an inconsistent set of phys 1890Sstevel@tonic-gate * pages and the cif cookie may differ as well. so prior to restoring the 1900Sstevel@tonic-gate * original prom, we have to use to use the new/tmp prom's translations 1910Sstevel@tonic-gate * when requesting prom services. 1920Sstevel@tonic-gate * 1930Sstevel@tonic-gate * cif_handler starts out as the original prom cookie, and that gets used 1940Sstevel@tonic-gate * by client_handler() to jump into the prom. here we splice-in a wrapper 1950Sstevel@tonic-gate * routine by writing cif_handler; client_handler() will now jump to the 1960Sstevel@tonic-gate * wrapper which switches the %tba to the new/tmp prom's trap table then 1970Sstevel@tonic-gate * jumps to the new cookie. 1980Sstevel@tonic-gate */ 1990Sstevel@tonic-gate void 2000Sstevel@tonic-gate i_cpr_cif_setup(int action) 2010Sstevel@tonic-gate { 2020Sstevel@tonic-gate extern void *i_cpr_orig_cif, *cif_handler; 2030Sstevel@tonic-gate extern int i_cpr_cif_wrapper(void *); 2040Sstevel@tonic-gate 2050Sstevel@tonic-gate /* 2060Sstevel@tonic-gate * save the original cookie and change the current cookie to the 2070Sstevel@tonic-gate * wrapper routine. later we just restore the original cookie. 2080Sstevel@tonic-gate */ 2090Sstevel@tonic-gate if (action == CIF_SPLICE) { 2100Sstevel@tonic-gate i_cpr_orig_cif = cif_handler; 2110Sstevel@tonic-gate cif_handler = (void *)i_cpr_cif_wrapper; 2120Sstevel@tonic-gate } else if (action == CIF_UNLINK) 2130Sstevel@tonic-gate cif_handler = i_cpr_orig_cif; 2140Sstevel@tonic-gate } 2150Sstevel@tonic-gate 2160Sstevel@tonic-gate 2170Sstevel@tonic-gate /* 2180Sstevel@tonic-gate * launch slave cpus into kernel text, pause them, 2190Sstevel@tonic-gate * and restore the original prom pages 2200Sstevel@tonic-gate */ 2210Sstevel@tonic-gate void 2220Sstevel@tonic-gate i_cpr_mp_setup(void) 2230Sstevel@tonic-gate { 2240Sstevel@tonic-gate extern void restart_other_cpu(int); 2250Sstevel@tonic-gate cpu_t *cp; 2260Sstevel@tonic-gate 2275Seg155566 uint64_t kctx = kcontextreg; 2285Seg155566 2295Seg155566 /* 2305Seg155566 * Do not allow setting page size codes in MMU primary context 2315Seg155566 * register while using cif wrapper. This is needed to work 232*5331Samw * around OBP incorrect handling of this MMU register. 2335Seg155566 */ 2345Seg155566 kcontextreg = 0; 2355Seg155566 2360Sstevel@tonic-gate /* 2370Sstevel@tonic-gate * reset cpu_ready_set so x_calls work properly 2380Sstevel@tonic-gate */ 2390Sstevel@tonic-gate CPUSET_ZERO(cpu_ready_set); 2400Sstevel@tonic-gate CPUSET_ADD(cpu_ready_set, getprocessorid()); 2410Sstevel@tonic-gate 2420Sstevel@tonic-gate /* 2430Sstevel@tonic-gate * setup cif to use the cookie from the new/tmp prom 2440Sstevel@tonic-gate * and setup tmp handling for calling prom services. 2450Sstevel@tonic-gate */ 2460Sstevel@tonic-gate i_cpr_cif_setup(CIF_SPLICE); 2470Sstevel@tonic-gate 2480Sstevel@tonic-gate /* 2490Sstevel@tonic-gate * at this point, only the nucleus and a few cpr pages are 2500Sstevel@tonic-gate * mapped in. once we switch to the kernel trap table, 2510Sstevel@tonic-gate * we can access the rest of kernel space. 2520Sstevel@tonic-gate */ 2530Sstevel@tonic-gate prom_set_traptable(&trap_table); 2540Sstevel@tonic-gate 2550Sstevel@tonic-gate if (ncpus > 1) { 2560Sstevel@tonic-gate sfmmu_init_tsbs(); 2570Sstevel@tonic-gate 2580Sstevel@tonic-gate mutex_enter(&cpu_lock); 2590Sstevel@tonic-gate /* 2600Sstevel@tonic-gate * All of the slave cpus are not ready at this time, 2610Sstevel@tonic-gate * yet the cpu structures have various cpu_flags set; 2620Sstevel@tonic-gate * clear cpu_flags and mutex_ready. 2630Sstevel@tonic-gate * Since we are coming up from a CPU suspend, the slave cpus 2640Sstevel@tonic-gate * are frozen. 2650Sstevel@tonic-gate */ 2660Sstevel@tonic-gate for (cp = CPU->cpu_next; cp != CPU; cp = cp->cpu_next) { 2670Sstevel@tonic-gate cp->cpu_flags = CPU_FROZEN; 2680Sstevel@tonic-gate cp->cpu_m.mutex_ready = 0; 2690Sstevel@tonic-gate } 2700Sstevel@tonic-gate 2710Sstevel@tonic-gate for (cp = CPU->cpu_next; cp != CPU; cp = cp->cpu_next) 2720Sstevel@tonic-gate restart_other_cpu(cp->cpu_id); 2730Sstevel@tonic-gate 2740Sstevel@tonic-gate pause_cpus(NULL); 2750Sstevel@tonic-gate mutex_exit(&cpu_lock); 2760Sstevel@tonic-gate 2770Sstevel@tonic-gate i_cpr_xcall(i_cpr_clear_entries); 2780Sstevel@tonic-gate } else 2790Sstevel@tonic-gate i_cpr_clear_entries(0, 0); 2800Sstevel@tonic-gate 2810Sstevel@tonic-gate /* 2820Sstevel@tonic-gate * now unlink the cif wrapper; WARNING: do not call any 2830Sstevel@tonic-gate * prom_xxx() routines until after prom pages are restored. 2840Sstevel@tonic-gate */ 2850Sstevel@tonic-gate i_cpr_cif_setup(CIF_UNLINK); 2860Sstevel@tonic-gate 2873982Svb70745 (void) i_cpr_prom_pages(CPR_PROM_RESTORE); 2885Seg155566 2895Seg155566 /* allow setting page size codes in MMU primary context register */ 2905Seg155566 kcontextreg = kctx; 2910Sstevel@tonic-gate } 2920Sstevel@tonic-gate 2930Sstevel@tonic-gate 2940Sstevel@tonic-gate /* 2950Sstevel@tonic-gate * end marker for jumpback page; 2960Sstevel@tonic-gate * this symbol is used to check the size of i_cpr_resume_setup() 2970Sstevel@tonic-gate * and the above text. For simplicity, the Makefile needs to 2980Sstevel@tonic-gate * link i_cpr_resume_setup.o and cpr_impl.o consecutively. 2990Sstevel@tonic-gate */ 3000Sstevel@tonic-gate void 3010Sstevel@tonic-gate i_cpr_end_jumpback(void) 3020Sstevel@tonic-gate { 3030Sstevel@tonic-gate } 3040Sstevel@tonic-gate 3050Sstevel@tonic-gate 3060Sstevel@tonic-gate /* 3070Sstevel@tonic-gate * scan tlb entries with reader; when valid entries are found, 3080Sstevel@tonic-gate * the filter routine will selectively save/clear them 3090Sstevel@tonic-gate */ 3100Sstevel@tonic-gate static void 3110Sstevel@tonic-gate i_cpr_scan_tlb(cti_t *ctip) 3120Sstevel@tonic-gate { 3130Sstevel@tonic-gate uint64_t va_tag; 3140Sstevel@tonic-gate int tlb_index; 3150Sstevel@tonic-gate tte_t tte; 3160Sstevel@tonic-gate 3170Sstevel@tonic-gate for (tlb_index = ctip->index; tlb_index >= 0; tlb_index--) { 3180Sstevel@tonic-gate (*ctip->reader)((uint_t)tlb_index, &tte, &va_tag); 3190Sstevel@tonic-gate if (va_tag && TTE_IS_VALID(&tte)) 3200Sstevel@tonic-gate (*ctip->filter)(tlb_index, &tte, va_tag, ctip); 3210Sstevel@tonic-gate } 3220Sstevel@tonic-gate } 3230Sstevel@tonic-gate 3240Sstevel@tonic-gate 3250Sstevel@tonic-gate /* 3260Sstevel@tonic-gate * filter for locked tlb entries that reference the text/data nucleus 3270Sstevel@tonic-gate * and any bigktsb's; these will be reinstalled by cprboot on all cpus 3280Sstevel@tonic-gate */ 3290Sstevel@tonic-gate /* ARGSUSED */ 3300Sstevel@tonic-gate static void 3310Sstevel@tonic-gate i_cpr_lnb(int index, tte_t *ttep, uint64_t va_tag, void *ctrans) 3320Sstevel@tonic-gate { 3330Sstevel@tonic-gate cti_t *ctip; 3340Sstevel@tonic-gate 3350Sstevel@tonic-gate /* 3360Sstevel@tonic-gate * record tlb data at ctip->dst; the target tlb index starts 3370Sstevel@tonic-gate * at the highest tlb offset and moves towards 0. the prom 3380Sstevel@tonic-gate * reserves both dtlb and itlb index 0. any selected entry 3390Sstevel@tonic-gate * also gets marked to prevent being flushed during resume 3400Sstevel@tonic-gate */ 3410Sstevel@tonic-gate if (TTE_IS_LOCKED(ttep) && (va_tag == (uint64_t)textva || 3420Sstevel@tonic-gate va_tag == (uint64_t)datava || IS_BIGKTSB(va_tag))) { 3430Sstevel@tonic-gate ctip = ctrans; 3440Sstevel@tonic-gate while ((1 << ctip->index) & ctip->skip) 3450Sstevel@tonic-gate ctip->index--; 3460Sstevel@tonic-gate ASSERT(ctip->index > 0); 3470Sstevel@tonic-gate ASSERT(ctip->dst < ctip->tail); 3480Sstevel@tonic-gate ctip->dst->tte.ll = ttep->ll; 3490Sstevel@tonic-gate ctip->dst->va_tag = va_tag; 3500Sstevel@tonic-gate ctip->dst->index = ctip->index--; 3510Sstevel@tonic-gate ctip->dst->tmp = 0; 3520Sstevel@tonic-gate ctip->dst++; 3530Sstevel@tonic-gate } 3540Sstevel@tonic-gate } 3550Sstevel@tonic-gate 3560Sstevel@tonic-gate 3570Sstevel@tonic-gate /* 3580Sstevel@tonic-gate * some tlb entries are stale, filter for unlocked entries 3590Sstevel@tonic-gate * within the prom virt range and clear them 3600Sstevel@tonic-gate */ 3610Sstevel@tonic-gate static void 3620Sstevel@tonic-gate i_cpr_ufw(int index, tte_t *ttep, uint64_t va_tag, void *ctrans) 3630Sstevel@tonic-gate { 3640Sstevel@tonic-gate sutlb_t clr; 3650Sstevel@tonic-gate cti_t *ctip; 3660Sstevel@tonic-gate 3670Sstevel@tonic-gate if (!TTE_IS_LOCKED(ttep) && WITHIN_OFW(va_tag)) { 3680Sstevel@tonic-gate ctip = ctrans; 3690Sstevel@tonic-gate bzero(&clr, sizeof (clr)); 3700Sstevel@tonic-gate (*ctip->writer)((uint_t)index, &clr.tte, &clr.va_tag); 3710Sstevel@tonic-gate } 3720Sstevel@tonic-gate } 3730Sstevel@tonic-gate 3740Sstevel@tonic-gate 3750Sstevel@tonic-gate /* 3760Sstevel@tonic-gate * some of the entries installed by cprboot are needed only on a 3770Sstevel@tonic-gate * short-term basis and need to be flushed to avoid clogging the tlbs. 3780Sstevel@tonic-gate * scan the dtte/itte arrays for items marked as temporary and clear 3790Sstevel@tonic-gate * dtlb/itlb entries using wrfunc. 3800Sstevel@tonic-gate */ 3810Sstevel@tonic-gate static void 3820Sstevel@tonic-gate i_cpr_clear_tmp(sutlb_t *listp, int max, tlb_rw_t wrfunc) 3830Sstevel@tonic-gate { 3840Sstevel@tonic-gate sutlb_t clr, *tail; 3850Sstevel@tonic-gate 3860Sstevel@tonic-gate bzero(&clr, sizeof (clr)); 3870Sstevel@tonic-gate for (tail = listp + max; listp < tail && listp->va_tag; listp++) { 3880Sstevel@tonic-gate if (listp->tmp) 3890Sstevel@tonic-gate (*wrfunc)((uint_t)listp->index, &clr.tte, &clr.va_tag); 3900Sstevel@tonic-gate } 3910Sstevel@tonic-gate } 3920Sstevel@tonic-gate 3930Sstevel@tonic-gate 3940Sstevel@tonic-gate /* ARGSUSED */ 3950Sstevel@tonic-gate static void 3960Sstevel@tonic-gate i_cpr_clear_entries(uint64_t arg1, uint64_t arg2) 3970Sstevel@tonic-gate { 3980Sstevel@tonic-gate extern void demap_all(void); 3990Sstevel@tonic-gate cti_t cti; 4000Sstevel@tonic-gate 4010Sstevel@tonic-gate i_cpr_clear_tmp(m_info.dtte, CPR_MAX_TLB, dtlb_wr_entry); 4020Sstevel@tonic-gate i_cpr_clear_tmp(m_info.itte, CPR_MAX_TLB, itlb_wr_entry); 4030Sstevel@tonic-gate 4040Sstevel@tonic-gate /* 4050Sstevel@tonic-gate * for newer cpus that implement DEMAP_ALL_TYPE, demap_all is 4060Sstevel@tonic-gate * a second label for vtag_flushall. the call is made using 4070Sstevel@tonic-gate * vtag_flushall() instead of demap_all() due to runtime and 4080Sstevel@tonic-gate * krtld results with both older and newer cpu modules. 4090Sstevel@tonic-gate */ 4100Sstevel@tonic-gate if (&demap_all != 0) { 4110Sstevel@tonic-gate vtag_flushall(); 4120Sstevel@tonic-gate return; 4130Sstevel@tonic-gate } 4140Sstevel@tonic-gate 4150Sstevel@tonic-gate /* 4160Sstevel@tonic-gate * for older V9 cpus, scan tlbs and clear stale entries 4170Sstevel@tonic-gate */ 4180Sstevel@tonic-gate bzero(&cti, sizeof (cti)); 4190Sstevel@tonic-gate cti.filter = i_cpr_ufw; 4200Sstevel@tonic-gate 4210Sstevel@tonic-gate cti.index = cpunodes[CPU->cpu_id].dtlb_size - 1; 4220Sstevel@tonic-gate cti.reader = dtlb_rd_entry; 4230Sstevel@tonic-gate cti.writer = dtlb_wr_entry; 4240Sstevel@tonic-gate i_cpr_scan_tlb(&cti); 4250Sstevel@tonic-gate 4260Sstevel@tonic-gate cti.index = cpunodes[CPU->cpu_id].itlb_size - 1; 4270Sstevel@tonic-gate cti.reader = itlb_rd_entry; 4280Sstevel@tonic-gate cti.writer = itlb_wr_entry; 4290Sstevel@tonic-gate i_cpr_scan_tlb(&cti); 4300Sstevel@tonic-gate } 4310Sstevel@tonic-gate 4320Sstevel@tonic-gate 4330Sstevel@tonic-gate /* 4340Sstevel@tonic-gate * craft tlb info for tmp use during resume; this data gets used by 4350Sstevel@tonic-gate * cprboot to install tlb entries. we also mark each struct as tmp 4360Sstevel@tonic-gate * so those tlb entries will get flushed after switching to the kernel 4370Sstevel@tonic-gate * trap table. no data needs to be recorded for vaddr when it falls 4380Sstevel@tonic-gate * within the nucleus since we've already recorded nucleus ttes and 4390Sstevel@tonic-gate * a 8K tte would conflict with a 4MB tte. eg: the cpr module 4400Sstevel@tonic-gate * text/data may have been loaded into the text/data nucleus. 4410Sstevel@tonic-gate */ 4420Sstevel@tonic-gate static void 4430Sstevel@tonic-gate i_cpr_make_tte(cti_t *ctip, void *vaddr, caddr_t nbase) 4440Sstevel@tonic-gate { 4450Sstevel@tonic-gate pfn_t ppn; 4460Sstevel@tonic-gate uint_t rw; 4470Sstevel@tonic-gate 4480Sstevel@tonic-gate if (WITHIN_NUCLEUS((caddr_t)vaddr, nbase)) 4490Sstevel@tonic-gate return; 4500Sstevel@tonic-gate 4510Sstevel@tonic-gate while ((1 << ctip->index) & ctip->skip) 4520Sstevel@tonic-gate ctip->index--; 4530Sstevel@tonic-gate ASSERT(ctip->index > 0); 4540Sstevel@tonic-gate ASSERT(ctip->dst < ctip->tail); 4550Sstevel@tonic-gate 4560Sstevel@tonic-gate /* 4570Sstevel@tonic-gate * without any global service available to lookup 4580Sstevel@tonic-gate * a tte by vaddr, we craft our own here: 4590Sstevel@tonic-gate */ 4600Sstevel@tonic-gate ppn = va_to_pfn(vaddr); 4610Sstevel@tonic-gate rw = (nbase == datava) ? TTE_HWWR_INT : 0; 4620Sstevel@tonic-gate ctip->dst->tte.tte_inthi = TTE_VALID_INT | TTE_PFN_INTHI(ppn); 4630Sstevel@tonic-gate ctip->dst->tte.tte_intlo = TTE_PFN_INTLO(ppn) | TTE_LCK_INT | 4640Sstevel@tonic-gate TTE_CP_INT | TTE_PRIV_INT | rw; 4650Sstevel@tonic-gate ctip->dst->va_tag = ((uintptr_t)vaddr & MMU_PAGEMASK); 4660Sstevel@tonic-gate ctip->dst->index = ctip->index--; 4670Sstevel@tonic-gate ctip->dst->tmp = 1; 4680Sstevel@tonic-gate ctip->dst++; 4690Sstevel@tonic-gate } 4700Sstevel@tonic-gate 4710Sstevel@tonic-gate 4720Sstevel@tonic-gate static void 4730Sstevel@tonic-gate i_cpr_xcall(xcfunc_t func) 4740Sstevel@tonic-gate { 4750Sstevel@tonic-gate uint_t pil, reset_pil; 4760Sstevel@tonic-gate 4770Sstevel@tonic-gate pil = getpil(); 4780Sstevel@tonic-gate if (pil < XCALL_PIL) 4790Sstevel@tonic-gate reset_pil = 0; 4800Sstevel@tonic-gate else { 4810Sstevel@tonic-gate reset_pil = 1; 4820Sstevel@tonic-gate setpil(XCALL_PIL - 1); 4830Sstevel@tonic-gate } 4840Sstevel@tonic-gate xc_some(cpu_ready_set, func, 0, 0); 4850Sstevel@tonic-gate if (reset_pil) 4860Sstevel@tonic-gate setpil(pil); 4870Sstevel@tonic-gate } 4880Sstevel@tonic-gate 4890Sstevel@tonic-gate 4900Sstevel@tonic-gate /* 4910Sstevel@tonic-gate * restart paused slave cpus 4920Sstevel@tonic-gate */ 4930Sstevel@tonic-gate void 4940Sstevel@tonic-gate i_cpr_machdep_setup(void) 4950Sstevel@tonic-gate { 4960Sstevel@tonic-gate if (ncpus > 1) { 4973446Smrj CPR_DEBUG(CPR_DEBUG1, "MP restarted...\n"); 4980Sstevel@tonic-gate mutex_enter(&cpu_lock); 4990Sstevel@tonic-gate start_cpus(); 5000Sstevel@tonic-gate mutex_exit(&cpu_lock); 5010Sstevel@tonic-gate } 5020Sstevel@tonic-gate } 5030Sstevel@tonic-gate 5040Sstevel@tonic-gate 5050Sstevel@tonic-gate /* 5060Sstevel@tonic-gate * Stop all interrupt activities in the system 5070Sstevel@tonic-gate */ 5080Sstevel@tonic-gate void 5090Sstevel@tonic-gate i_cpr_stop_intr(void) 5100Sstevel@tonic-gate { 5110Sstevel@tonic-gate (void) spl7(); 5120Sstevel@tonic-gate } 5130Sstevel@tonic-gate 5140Sstevel@tonic-gate /* 5150Sstevel@tonic-gate * Set machine up to take interrupts 5160Sstevel@tonic-gate */ 5170Sstevel@tonic-gate void 5180Sstevel@tonic-gate i_cpr_enable_intr(void) 5190Sstevel@tonic-gate { 5200Sstevel@tonic-gate (void) spl0(); 5210Sstevel@tonic-gate } 5220Sstevel@tonic-gate 5230Sstevel@tonic-gate 5240Sstevel@tonic-gate /* 5250Sstevel@tonic-gate * record cpu nodes and ids 5260Sstevel@tonic-gate */ 5270Sstevel@tonic-gate static void 5280Sstevel@tonic-gate i_cpr_save_cpu_info(void) 5290Sstevel@tonic-gate { 5300Sstevel@tonic-gate struct sun4u_cpu_info *scip; 5310Sstevel@tonic-gate cpu_t *cp; 5320Sstevel@tonic-gate 5330Sstevel@tonic-gate scip = m_info.sci; 5340Sstevel@tonic-gate cp = CPU; 5350Sstevel@tonic-gate do { 5360Sstevel@tonic-gate ASSERT(scip < &m_info.sci[NCPU]); 5370Sstevel@tonic-gate scip->cpu_id = cp->cpu_id; 5380Sstevel@tonic-gate scip->node = cpunodes[cp->cpu_id].nodeid; 5390Sstevel@tonic-gate scip++; 5400Sstevel@tonic-gate } while ((cp = cp->cpu_next) != CPU); 5410Sstevel@tonic-gate } 5420Sstevel@tonic-gate 5430Sstevel@tonic-gate 5440Sstevel@tonic-gate /* 5450Sstevel@tonic-gate * Write necessary machine dependent information to cpr state file, 5460Sstevel@tonic-gate * eg. sun4u mmu ctx secondary for the current running process (cpr) ... 5470Sstevel@tonic-gate */ 5480Sstevel@tonic-gate int 5490Sstevel@tonic-gate i_cpr_write_machdep(vnode_t *vp) 5500Sstevel@tonic-gate { 5510Sstevel@tonic-gate extern uint_t getpstate(), getwstate(); 5520Sstevel@tonic-gate extern uint_t i_cpr_tstack_size; 5530Sstevel@tonic-gate const char ustr[] = ": unix-tte 2drop false ;"; 5540Sstevel@tonic-gate uintptr_t tinfo; 5550Sstevel@tonic-gate label_t *ltp; 5560Sstevel@tonic-gate cmd_t cmach; 5570Sstevel@tonic-gate char *fmt; 5580Sstevel@tonic-gate int rc; 5590Sstevel@tonic-gate 5600Sstevel@tonic-gate /* 5610Sstevel@tonic-gate * ustr[] is used as temporary forth words during 5620Sstevel@tonic-gate * slave startup sequence, see sfmmu_mp_startup() 5630Sstevel@tonic-gate */ 5640Sstevel@tonic-gate 5650Sstevel@tonic-gate cmach.md_magic = (uint_t)CPR_MACHDEP_MAGIC; 5660Sstevel@tonic-gate cmach.md_size = sizeof (m_info) + sizeof (ustr); 5670Sstevel@tonic-gate 5680Sstevel@tonic-gate if (rc = cpr_write(vp, (caddr_t)&cmach, sizeof (cmach))) { 5690Sstevel@tonic-gate cpr_err(CE_WARN, "Failed to write descriptor."); 5700Sstevel@tonic-gate return (rc); 5710Sstevel@tonic-gate } 5720Sstevel@tonic-gate 5730Sstevel@tonic-gate /* 5740Sstevel@tonic-gate * m_info is now cleared in i_cpr_dump_setup() 5750Sstevel@tonic-gate */ 5760Sstevel@tonic-gate m_info.ksb = (uint32_t)STACK_BIAS; 5770Sstevel@tonic-gate m_info.kpstate = (uint16_t)getpstate(); 5780Sstevel@tonic-gate m_info.kwstate = (uint16_t)getwstate(); 5793446Smrj CPR_DEBUG(CPR_DEBUG1, "stack bias 0x%x, pstate 0x%x, wstate 0x%x\n", 5803446Smrj m_info.ksb, m_info.kpstate, m_info.kwstate); 5810Sstevel@tonic-gate 5820Sstevel@tonic-gate ltp = &ttolwp(curthread)->lwp_qsav; 5830Sstevel@tonic-gate m_info.qsav_pc = (cpr_ext)ltp->val[0]; 5840Sstevel@tonic-gate m_info.qsav_sp = (cpr_ext)ltp->val[1]; 5850Sstevel@tonic-gate 5860Sstevel@tonic-gate /* 5870Sstevel@tonic-gate * Set secondary context to INVALID_CONTEXT to force the HAT 5880Sstevel@tonic-gate * to re-setup the MMU registers and locked TTEs it needs for 5890Sstevel@tonic-gate * TLB miss handling. 5900Sstevel@tonic-gate */ 5910Sstevel@tonic-gate m_info.mmu_ctx_sec = INVALID_CONTEXT; 5925Seg155566 m_info.mmu_ctx_pri = KCONTEXT; 5930Sstevel@tonic-gate 5940Sstevel@tonic-gate tinfo = (uintptr_t)curthread; 5950Sstevel@tonic-gate m_info.thrp = (cpr_ptr)tinfo; 5960Sstevel@tonic-gate 5970Sstevel@tonic-gate tinfo = (uintptr_t)i_cpr_resume_setup; 5980Sstevel@tonic-gate m_info.func = (cpr_ptr)tinfo; 5990Sstevel@tonic-gate 6000Sstevel@tonic-gate /* 6010Sstevel@tonic-gate * i_cpr_data_page is comprised of a 4K stack area and a few 6020Sstevel@tonic-gate * trailing data symbols; the page is shared by the prom and 6030Sstevel@tonic-gate * kernel during resume. the stack size is recorded here 6040Sstevel@tonic-gate * and used by cprboot to set %sp 6050Sstevel@tonic-gate */ 6060Sstevel@tonic-gate tinfo = (uintptr_t)&i_cpr_data_page; 6070Sstevel@tonic-gate m_info.tmp_stack = (cpr_ptr)tinfo; 6080Sstevel@tonic-gate m_info.tmp_stacksize = i_cpr_tstack_size; 6090Sstevel@tonic-gate 6100Sstevel@tonic-gate m_info.test_mode = cpr_test_mode; 6110Sstevel@tonic-gate 6120Sstevel@tonic-gate i_cpr_save_cpu_info(); 6130Sstevel@tonic-gate 6140Sstevel@tonic-gate if (rc = cpr_write(vp, (caddr_t)&m_info, sizeof (m_info))) { 6150Sstevel@tonic-gate cpr_err(CE_WARN, "Failed to write machdep info."); 6160Sstevel@tonic-gate return (rc); 6170Sstevel@tonic-gate } 6180Sstevel@tonic-gate 6190Sstevel@tonic-gate fmt = "error writing %s forth info"; 6200Sstevel@tonic-gate if (rc = cpr_write(vp, (caddr_t)ustr, sizeof (ustr))) 6210Sstevel@tonic-gate cpr_err(CE_WARN, fmt, "unix-tte"); 6220Sstevel@tonic-gate 6230Sstevel@tonic-gate return (rc); 6240Sstevel@tonic-gate } 6250Sstevel@tonic-gate 6260Sstevel@tonic-gate 6270Sstevel@tonic-gate /* 6280Sstevel@tonic-gate * Save miscellaneous information which needs to be written to the 6290Sstevel@tonic-gate * state file. This information is required to re-initialize 6300Sstevel@tonic-gate * kernel/prom handshaking. 6310Sstevel@tonic-gate */ 6320Sstevel@tonic-gate void 6330Sstevel@tonic-gate i_cpr_save_machdep_info(void) 6340Sstevel@tonic-gate { 6353446Smrj CPR_DEBUG(CPR_DEBUG5, "jumpback size = 0x%lx\n", 6360Sstevel@tonic-gate (uintptr_t)&i_cpr_end_jumpback - 6373446Smrj (uintptr_t)i_cpr_resume_setup); 6380Sstevel@tonic-gate 6390Sstevel@tonic-gate /* 6400Sstevel@tonic-gate * Verify the jumpback code all falls in one page. 6410Sstevel@tonic-gate */ 6420Sstevel@tonic-gate if (((uintptr_t)&i_cpr_end_jumpback & MMU_PAGEMASK) != 6430Sstevel@tonic-gate ((uintptr_t)i_cpr_resume_setup & MMU_PAGEMASK)) 6440Sstevel@tonic-gate cpr_err(CE_PANIC, "jumpback code exceeds one page."); 6450Sstevel@tonic-gate } 6460Sstevel@tonic-gate 6470Sstevel@tonic-gate 6480Sstevel@tonic-gate /* 6490Sstevel@tonic-gate * cpu0 should contain bootcpu info 6500Sstevel@tonic-gate */ 6510Sstevel@tonic-gate cpu_t * 6520Sstevel@tonic-gate i_cpr_bootcpu(void) 6530Sstevel@tonic-gate { 6540Sstevel@tonic-gate return (&cpu0); 6550Sstevel@tonic-gate } 6560Sstevel@tonic-gate 6575295Srandyf processorid_t 6585295Srandyf i_cpr_bootcpuid(void) 6595295Srandyf { 6605295Srandyf return (0); 6615295Srandyf } 6620Sstevel@tonic-gate 6630Sstevel@tonic-gate /* 6640Sstevel@tonic-gate * Return the virtual address of the mapping area 6650Sstevel@tonic-gate */ 6660Sstevel@tonic-gate caddr_t 6670Sstevel@tonic-gate i_cpr_map_setup(void) 6680Sstevel@tonic-gate { 6690Sstevel@tonic-gate /* 6700Sstevel@tonic-gate * Allocate a virtual memory range spanned by an hmeblk. 6710Sstevel@tonic-gate * This would be 8 hments or 64k bytes. Starting VA 6720Sstevel@tonic-gate * must be 64k (8-page) aligned. 6730Sstevel@tonic-gate */ 6740Sstevel@tonic-gate cpr_vaddr = vmem_xalloc(heap_arena, 6750Sstevel@tonic-gate mmu_ptob(NHMENTS), mmu_ptob(NHMENTS), 6760Sstevel@tonic-gate 0, 0, NULL, NULL, VM_NOSLEEP); 6770Sstevel@tonic-gate return (cpr_vaddr); 6780Sstevel@tonic-gate } 6790Sstevel@tonic-gate 6800Sstevel@tonic-gate /* 6810Sstevel@tonic-gate * create tmp locked tlb entries for a group of phys pages; 6820Sstevel@tonic-gate * 6830Sstevel@tonic-gate * i_cpr_mapin/i_cpr_mapout should always be called in pairs, 6840Sstevel@tonic-gate * otherwise would fill up a tlb with locked entries 6850Sstevel@tonic-gate */ 6860Sstevel@tonic-gate void 6870Sstevel@tonic-gate i_cpr_mapin(caddr_t vaddr, uint_t pages, pfn_t ppn) 6880Sstevel@tonic-gate { 6890Sstevel@tonic-gate tte_t tte; 6900Sstevel@tonic-gate extern pfn_t curthreadpfn; 6910Sstevel@tonic-gate extern int curthreadremapped; 6920Sstevel@tonic-gate 6930Sstevel@tonic-gate curthreadremapped = (ppn <= curthreadpfn && curthreadpfn < ppn + pages); 6940Sstevel@tonic-gate 6950Sstevel@tonic-gate for (; pages--; ppn++, vaddr += MMU_PAGESIZE) { 6960Sstevel@tonic-gate tte.tte_inthi = TTE_VALID_INT | TTE_PFN_INTHI(ppn); 6970Sstevel@tonic-gate tte.tte_intlo = TTE_PFN_INTLO(ppn) | TTE_LCK_INT | 6980Sstevel@tonic-gate TTE_CP_INT | TTE_PRIV_INT | TTE_HWWR_INT; 6992241Shuah sfmmu_dtlb_ld_kva(vaddr, &tte); 7000Sstevel@tonic-gate } 7010Sstevel@tonic-gate } 7020Sstevel@tonic-gate 7030Sstevel@tonic-gate void 7040Sstevel@tonic-gate i_cpr_mapout(caddr_t vaddr, uint_t pages) 7050Sstevel@tonic-gate { 7060Sstevel@tonic-gate extern int curthreadremapped; 7070Sstevel@tonic-gate 7080Sstevel@tonic-gate if (curthreadremapped && vaddr <= (caddr_t)curthread && 7090Sstevel@tonic-gate (caddr_t)curthread < vaddr + pages * MMU_PAGESIZE) 7100Sstevel@tonic-gate curthreadremapped = 0; 7110Sstevel@tonic-gate 7120Sstevel@tonic-gate for (; pages--; vaddr += MMU_PAGESIZE) 7132241Shuah vtag_flushpage(vaddr, (uint64_t)ksfmmup); 7140Sstevel@tonic-gate } 7150Sstevel@tonic-gate 7160Sstevel@tonic-gate /* 7170Sstevel@tonic-gate * We're done using the mapping area; release virtual space 7180Sstevel@tonic-gate */ 7190Sstevel@tonic-gate void 7200Sstevel@tonic-gate i_cpr_map_destroy(void) 7210Sstevel@tonic-gate { 7220Sstevel@tonic-gate vmem_free(heap_arena, cpr_vaddr, mmu_ptob(NHMENTS)); 7230Sstevel@tonic-gate cpr_vaddr = NULL; 7240Sstevel@tonic-gate } 7250Sstevel@tonic-gate 7260Sstevel@tonic-gate /* ARGSUSED */ 7270Sstevel@tonic-gate void 7280Sstevel@tonic-gate i_cpr_handle_xc(int flag) 7290Sstevel@tonic-gate { 7300Sstevel@tonic-gate } 7310Sstevel@tonic-gate 7320Sstevel@tonic-gate 7330Sstevel@tonic-gate /* 7340Sstevel@tonic-gate * This function takes care of pages which are not in kas or need to be 7350Sstevel@tonic-gate * taken care of in a special way. For example, panicbuf pages are not 7360Sstevel@tonic-gate * in kas and their pages are allocated via prom_retain(). 7370Sstevel@tonic-gate */ 7380Sstevel@tonic-gate pgcnt_t 7390Sstevel@tonic-gate i_cpr_count_special_kpages(int mapflag, bitfunc_t bitfunc) 7400Sstevel@tonic-gate { 7410Sstevel@tonic-gate struct cpr_map_info *pri, *tail; 7420Sstevel@tonic-gate pgcnt_t pages, total = 0; 7430Sstevel@tonic-gate pfn_t pfn; 7440Sstevel@tonic-gate 7450Sstevel@tonic-gate /* 7460Sstevel@tonic-gate * Save information about prom retained panicbuf pages 7470Sstevel@tonic-gate */ 7480Sstevel@tonic-gate if (bitfunc == cpr_setbit) { 7490Sstevel@tonic-gate pri = &cpr_prom_retain[CPR_PANICBUF]; 7500Sstevel@tonic-gate pri->virt = (cpr_ptr)panicbuf; 7510Sstevel@tonic-gate pri->phys = va_to_pa(panicbuf); 7520Sstevel@tonic-gate pri->size = sizeof (panicbuf); 7530Sstevel@tonic-gate } 7540Sstevel@tonic-gate 7550Sstevel@tonic-gate /* 7560Sstevel@tonic-gate * Go through the prom_retain array to tag those pages. 7570Sstevel@tonic-gate */ 7580Sstevel@tonic-gate tail = &cpr_prom_retain[CPR_PROM_RETAIN_CNT]; 7590Sstevel@tonic-gate for (pri = cpr_prom_retain; pri < tail; pri++) { 7600Sstevel@tonic-gate pages = mmu_btopr(pri->size); 7610Sstevel@tonic-gate for (pfn = ADDR_TO_PN(pri->phys); pages--; pfn++) { 7620Sstevel@tonic-gate if (pf_is_memory(pfn)) { 7630Sstevel@tonic-gate if (bitfunc == cpr_setbit) { 7640Sstevel@tonic-gate if ((*bitfunc)(pfn, mapflag) == 0) 7650Sstevel@tonic-gate total++; 7660Sstevel@tonic-gate } else 7670Sstevel@tonic-gate total++; 7680Sstevel@tonic-gate } 7690Sstevel@tonic-gate } 7700Sstevel@tonic-gate } 7710Sstevel@tonic-gate 7720Sstevel@tonic-gate return (total); 7730Sstevel@tonic-gate } 7740Sstevel@tonic-gate 7750Sstevel@tonic-gate 7760Sstevel@tonic-gate /* 7770Sstevel@tonic-gate * Free up memory-related resources here. We start by freeing buffers 7780Sstevel@tonic-gate * allocated during suspend initialization. Also, free up the mapping 7790Sstevel@tonic-gate * resources allocated in cpr_init(). 7800Sstevel@tonic-gate */ 7810Sstevel@tonic-gate void 7820Sstevel@tonic-gate i_cpr_free_memory_resources(void) 7830Sstevel@tonic-gate { 7840Sstevel@tonic-gate (void) i_cpr_prom_pages(CPR_PROM_FREE); 7850Sstevel@tonic-gate i_cpr_map_destroy(); 7860Sstevel@tonic-gate i_cpr_storage_free(); 7870Sstevel@tonic-gate } 7880Sstevel@tonic-gate 7890Sstevel@tonic-gate 7900Sstevel@tonic-gate /* 7910Sstevel@tonic-gate * Derived from cpr_write_statefile(). 7920Sstevel@tonic-gate * Save the sensitive pages to the storage area and do bookkeeping 7930Sstevel@tonic-gate * using the sensitive descriptors. Each descriptor will contain no more 7940Sstevel@tonic-gate * than CPR_MAXCONTIG amount of contiguous pages to match the max amount 7950Sstevel@tonic-gate * of pages that statefile gets written to disk at each write. 7960Sstevel@tonic-gate * XXX The CPR_MAXCONTIG can be changed to the size of the compression 7970Sstevel@tonic-gate * scratch area. 7980Sstevel@tonic-gate */ 7990Sstevel@tonic-gate static int 8000Sstevel@tonic-gate i_cpr_save_to_storage(void) 8010Sstevel@tonic-gate { 8020Sstevel@tonic-gate sensitive_size_saved = 0; 8030Sstevel@tonic-gate sensitive_pages_saved = 0; 8040Sstevel@tonic-gate sensitive_write_ptr = i_cpr_storage_data_base; 8050Sstevel@tonic-gate return (cpr_contig_pages(NULL, SAVE_TO_STORAGE)); 8060Sstevel@tonic-gate } 8070Sstevel@tonic-gate 8080Sstevel@tonic-gate 8090Sstevel@tonic-gate /* 8100Sstevel@tonic-gate * This routine allocates space to save the sensitive kernel pages, 8110Sstevel@tonic-gate * i.e. kernel data nucleus, kvalloc and kvseg segments. 8120Sstevel@tonic-gate * It's assumed that those segments are the only areas that can be 8130Sstevel@tonic-gate * contaminated by memory allocations during statefile dumping. 8140Sstevel@tonic-gate * The space allocated here contains: 8150Sstevel@tonic-gate * A list of descriptors describing the saved sensitive pages. 8160Sstevel@tonic-gate * The storage area for saving the compressed sensitive kernel pages. 8170Sstevel@tonic-gate * Since storage pages are allocated from segkmem, they need to be 8180Sstevel@tonic-gate * excluded when saving. 8190Sstevel@tonic-gate */ 8200Sstevel@tonic-gate int 8210Sstevel@tonic-gate i_cpr_save_sensitive_kpages(void) 8220Sstevel@tonic-gate { 8230Sstevel@tonic-gate static const char pages_fmt[] = "\n%s %s allocs\n" 8240Sstevel@tonic-gate " spages %ld, vpages %ld, diff %ld\n"; 8250Sstevel@tonic-gate int retry_cnt; 8260Sstevel@tonic-gate int error = 0; 8270Sstevel@tonic-gate pgcnt_t pages, spages, vpages; 8280Sstevel@tonic-gate caddr_t addr; 8290Sstevel@tonic-gate char *str; 8300Sstevel@tonic-gate 8310Sstevel@tonic-gate /* 8320Sstevel@tonic-gate * Tag sensitive kpages. Allocate space for storage descriptors 8330Sstevel@tonic-gate * and storage data area based on the resulting bitmaps. 8340Sstevel@tonic-gate * Note: The storage space will be part of the sensitive 8350Sstevel@tonic-gate * segment, so we need to tag kpages here before the storage 8360Sstevel@tonic-gate * is actually allocated just so their space won't be accounted 8370Sstevel@tonic-gate * for. They will not be part of the statefile although those 8380Sstevel@tonic-gate * pages will be claimed by cprboot. 8390Sstevel@tonic-gate */ 8400Sstevel@tonic-gate cpr_clear_bitmaps(); 8410Sstevel@tonic-gate 8420Sstevel@tonic-gate spages = i_cpr_count_sensitive_kpages(REGULAR_BITMAP, cpr_setbit); 8430Sstevel@tonic-gate vpages = cpr_count_volatile_pages(REGULAR_BITMAP, cpr_clrbit); 8440Sstevel@tonic-gate pages = spages - vpages; 8450Sstevel@tonic-gate 8460Sstevel@tonic-gate str = "i_cpr_save_sensitive_kpages:"; 8473446Smrj CPR_DEBUG(CPR_DEBUG7, pages_fmt, "before", str, spages, vpages, pages); 8480Sstevel@tonic-gate 8490Sstevel@tonic-gate /* 8500Sstevel@tonic-gate * Allocate space to save the clean sensitive kpages 8510Sstevel@tonic-gate */ 8520Sstevel@tonic-gate for (retry_cnt = 0; retry_cnt < MAX_STORAGE_ALLOC_RETRY; retry_cnt++) { 8530Sstevel@tonic-gate /* 8540Sstevel@tonic-gate * Alloc on first pass or realloc if we are retrying because 8550Sstevel@tonic-gate * of insufficient storage for sensitive pages 8560Sstevel@tonic-gate */ 8570Sstevel@tonic-gate if (retry_cnt == 0 || error == ENOMEM) { 8580Sstevel@tonic-gate if (i_cpr_storage_data_base) { 8590Sstevel@tonic-gate kmem_free(i_cpr_storage_data_base, 8600Sstevel@tonic-gate mmu_ptob(i_cpr_storage_data_sz)); 8610Sstevel@tonic-gate i_cpr_storage_data_base = NULL; 8620Sstevel@tonic-gate i_cpr_storage_data_sz = 0; 8630Sstevel@tonic-gate } 8640Sstevel@tonic-gate addr = i_cpr_storage_data_alloc(pages, 8650Sstevel@tonic-gate &i_cpr_storage_data_sz, retry_cnt); 8660Sstevel@tonic-gate if (addr == NULL) { 8673446Smrj CPR_DEBUG(CPR_DEBUG7, 8680Sstevel@tonic-gate "\n%s can't allocate data storage space!\n", 8693446Smrj str); 8700Sstevel@tonic-gate return (ENOMEM); 8710Sstevel@tonic-gate } 8720Sstevel@tonic-gate i_cpr_storage_data_base = addr; 8730Sstevel@tonic-gate i_cpr_storage_data_end = 8740Sstevel@tonic-gate addr + mmu_ptob(i_cpr_storage_data_sz); 8750Sstevel@tonic-gate } 8760Sstevel@tonic-gate 8770Sstevel@tonic-gate /* 8780Sstevel@tonic-gate * Allocate on first pass, only realloc if retry is because of 8790Sstevel@tonic-gate * insufficient descriptors, but reset contents on each pass 8800Sstevel@tonic-gate * (desc_alloc resets contents as well) 8810Sstevel@tonic-gate */ 8820Sstevel@tonic-gate if (retry_cnt == 0 || error == -1) { 8830Sstevel@tonic-gate error = i_cpr_storage_desc_alloc( 8840Sstevel@tonic-gate &i_cpr_storage_desc_base, &i_cpr_storage_desc_pgcnt, 8850Sstevel@tonic-gate &i_cpr_storage_desc_end, retry_cnt); 8860Sstevel@tonic-gate if (error != 0) 8870Sstevel@tonic-gate return (error); 8880Sstevel@tonic-gate } else { 8890Sstevel@tonic-gate i_cpr_storage_desc_init(i_cpr_storage_desc_base, 8900Sstevel@tonic-gate i_cpr_storage_desc_pgcnt, i_cpr_storage_desc_end); 8910Sstevel@tonic-gate } 8920Sstevel@tonic-gate 8930Sstevel@tonic-gate /* 8940Sstevel@tonic-gate * We are ready to save the sensitive kpages to storage. 8950Sstevel@tonic-gate * We cannot trust what's tagged in the bitmaps anymore 8960Sstevel@tonic-gate * after storage allocations. Clear up the bitmaps and 8970Sstevel@tonic-gate * retag the sensitive kpages again. The storage pages 8980Sstevel@tonic-gate * should be untagged. 8990Sstevel@tonic-gate */ 9000Sstevel@tonic-gate cpr_clear_bitmaps(); 9010Sstevel@tonic-gate 9020Sstevel@tonic-gate spages = 9030Sstevel@tonic-gate i_cpr_count_sensitive_kpages(REGULAR_BITMAP, cpr_setbit); 9040Sstevel@tonic-gate vpages = cpr_count_volatile_pages(REGULAR_BITMAP, cpr_clrbit); 9050Sstevel@tonic-gate 9063446Smrj CPR_DEBUG(CPR_DEBUG7, pages_fmt, "after ", str, 9073446Smrj spages, vpages, spages - vpages); 9080Sstevel@tonic-gate 9090Sstevel@tonic-gate /* 9100Sstevel@tonic-gate * Returns 0 on success, -1 if too few descriptors, and 9110Sstevel@tonic-gate * ENOMEM if not enough space to save sensitive pages 9120Sstevel@tonic-gate */ 9133446Smrj CPR_DEBUG(CPR_DEBUG1, "compressing pages to storage...\n"); 9140Sstevel@tonic-gate error = i_cpr_save_to_storage(); 9150Sstevel@tonic-gate if (error == 0) { 9160Sstevel@tonic-gate /* Saving to storage succeeded */ 9173446Smrj CPR_DEBUG(CPR_DEBUG1, "compressed %d pages\n", 9183446Smrj sensitive_pages_saved); 9190Sstevel@tonic-gate break; 9200Sstevel@tonic-gate } else if (error == -1) 9213446Smrj CPR_DEBUG(CPR_DEBUG1, "%s too few descriptors\n", str); 9220Sstevel@tonic-gate } 9230Sstevel@tonic-gate if (error == -1) 9240Sstevel@tonic-gate error = ENOMEM; 9250Sstevel@tonic-gate return (error); 9260Sstevel@tonic-gate } 9270Sstevel@tonic-gate 9280Sstevel@tonic-gate 9290Sstevel@tonic-gate /* 9300Sstevel@tonic-gate * Estimate how much memory we will need to save 9310Sstevel@tonic-gate * the sensitive pages with compression. 9320Sstevel@tonic-gate */ 9330Sstevel@tonic-gate static caddr_t 9340Sstevel@tonic-gate i_cpr_storage_data_alloc(pgcnt_t pages, pgcnt_t *alloc_pages, int retry_cnt) 9350Sstevel@tonic-gate { 9360Sstevel@tonic-gate pgcnt_t alloc_pcnt, last_pcnt; 9370Sstevel@tonic-gate caddr_t addr; 9380Sstevel@tonic-gate char *str; 9390Sstevel@tonic-gate 9400Sstevel@tonic-gate str = "i_cpr_storage_data_alloc:"; 9410Sstevel@tonic-gate if (retry_cnt == 0) { 9420Sstevel@tonic-gate /* 9430Sstevel@tonic-gate * common compression ratio is about 3:1 9440Sstevel@tonic-gate * initial storage allocation is estimated at 40% 9450Sstevel@tonic-gate * to cover the majority of cases 9460Sstevel@tonic-gate */ 9470Sstevel@tonic-gate alloc_pcnt = INITIAL_ALLOC_PCNT; 9480Sstevel@tonic-gate *alloc_pages = (pages * alloc_pcnt) / INTEGRAL; 9493446Smrj CPR_DEBUG(CPR_DEBUG7, "%s sensitive pages: %ld\n", str, pages); 9503446Smrj CPR_DEBUG(CPR_DEBUG7, 9513446Smrj "%s initial est pages: %ld, alloc %ld%%\n", 9523446Smrj str, *alloc_pages, alloc_pcnt); 9530Sstevel@tonic-gate } else { 9540Sstevel@tonic-gate /* 9550Sstevel@tonic-gate * calculate the prior compression percentage (x100) 9560Sstevel@tonic-gate * from the last attempt to save sensitive pages 9570Sstevel@tonic-gate */ 9580Sstevel@tonic-gate ASSERT(sensitive_pages_saved != 0); 9590Sstevel@tonic-gate last_pcnt = (mmu_btopr(sensitive_size_saved) * INTEGRAL) / 9600Sstevel@tonic-gate sensitive_pages_saved; 9613446Smrj CPR_DEBUG(CPR_DEBUG7, "%s last ratio %ld%%\n", str, last_pcnt); 9620Sstevel@tonic-gate 9630Sstevel@tonic-gate /* 9640Sstevel@tonic-gate * new estimated storage size is based on 9650Sstevel@tonic-gate * the larger ratio + 5% for each retry: 9660Sstevel@tonic-gate * pages * (last + [5%, 10%]) 9670Sstevel@tonic-gate */ 9680Sstevel@tonic-gate alloc_pcnt = MAX(last_pcnt, INITIAL_ALLOC_PCNT) + 9690Sstevel@tonic-gate (retry_cnt * 5); 9700Sstevel@tonic-gate *alloc_pages = (pages * alloc_pcnt) / INTEGRAL; 9713446Smrj CPR_DEBUG(CPR_DEBUG7, "%s Retry est pages: %ld, alloc %ld%%\n", 9723446Smrj str, *alloc_pages, alloc_pcnt); 9730Sstevel@tonic-gate } 9740Sstevel@tonic-gate 9750Sstevel@tonic-gate addr = kmem_alloc(mmu_ptob(*alloc_pages), KM_NOSLEEP); 9763446Smrj CPR_DEBUG(CPR_DEBUG7, "%s alloc %ld pages\n", str, *alloc_pages); 9770Sstevel@tonic-gate return (addr); 9780Sstevel@tonic-gate } 9790Sstevel@tonic-gate 9800Sstevel@tonic-gate 9810Sstevel@tonic-gate void 9820Sstevel@tonic-gate i_cpr_storage_free(void) 9830Sstevel@tonic-gate { 9840Sstevel@tonic-gate /* Free descriptors */ 9850Sstevel@tonic-gate if (i_cpr_storage_desc_base) { 9860Sstevel@tonic-gate kmem_free(i_cpr_storage_desc_base, 9870Sstevel@tonic-gate mmu_ptob(i_cpr_storage_desc_pgcnt)); 9880Sstevel@tonic-gate i_cpr_storage_desc_base = NULL; 9890Sstevel@tonic-gate i_cpr_storage_desc_pgcnt = 0; 9900Sstevel@tonic-gate } 9910Sstevel@tonic-gate 9920Sstevel@tonic-gate 9930Sstevel@tonic-gate /* Data storage */ 9940Sstevel@tonic-gate if (i_cpr_storage_data_base) { 9950Sstevel@tonic-gate kmem_free(i_cpr_storage_data_base, 9960Sstevel@tonic-gate mmu_ptob(i_cpr_storage_data_sz)); 9970Sstevel@tonic-gate i_cpr_storage_data_base = NULL; 9980Sstevel@tonic-gate i_cpr_storage_data_sz = 0; 9990Sstevel@tonic-gate } 10000Sstevel@tonic-gate } 10010Sstevel@tonic-gate 10020Sstevel@tonic-gate 10030Sstevel@tonic-gate /* 10040Sstevel@tonic-gate * This routine is derived from cpr_compress_and_write(). 10050Sstevel@tonic-gate * 1. Do bookkeeping in the descriptor for the contiguous sensitive chunk. 10060Sstevel@tonic-gate * 2. Compress and save the clean sensitive pages into the storage area. 10070Sstevel@tonic-gate */ 10080Sstevel@tonic-gate int 10090Sstevel@tonic-gate i_cpr_compress_and_save(int chunks, pfn_t spfn, pgcnt_t pages) 10100Sstevel@tonic-gate { 10110Sstevel@tonic-gate extern char *cpr_compress_pages(cpd_t *, pgcnt_t, int); 10120Sstevel@tonic-gate extern caddr_t i_cpr_storage_data_end; 10130Sstevel@tonic-gate uint_t remaining, datalen; 10140Sstevel@tonic-gate uint32_t test_usum; 10150Sstevel@tonic-gate char *datap; 10160Sstevel@tonic-gate csd_t *descp; 10170Sstevel@tonic-gate cpd_t cpd; 10180Sstevel@tonic-gate int error; 10190Sstevel@tonic-gate 10200Sstevel@tonic-gate /* 10210Sstevel@tonic-gate * Fill next empty storage descriptor 10220Sstevel@tonic-gate */ 10230Sstevel@tonic-gate descp = i_cpr_storage_desc_base + chunks - 1; 10240Sstevel@tonic-gate if (descp >= i_cpr_storage_desc_end) { 10253446Smrj CPR_DEBUG(CPR_DEBUG1, "ran out of descriptors, base 0x%p, " 10263446Smrj "chunks %d, end 0x%p, descp 0x%p\n", 10273446Smrj i_cpr_storage_desc_base, chunks, 10283446Smrj i_cpr_storage_desc_end, descp); 10290Sstevel@tonic-gate return (-1); 10300Sstevel@tonic-gate } 10310Sstevel@tonic-gate ASSERT(descp->csd_dirty_spfn == (uint_t)-1); 10320Sstevel@tonic-gate i_cpr_storage_desc_last_used = descp; 10330Sstevel@tonic-gate 10340Sstevel@tonic-gate descp->csd_dirty_spfn = spfn; 10350Sstevel@tonic-gate descp->csd_dirty_npages = pages; 10360Sstevel@tonic-gate 10370Sstevel@tonic-gate i_cpr_mapin(CPR->c_mapping_area, pages, spfn); 10380Sstevel@tonic-gate 10390Sstevel@tonic-gate /* 10400Sstevel@tonic-gate * try compressing pages and copy cpd fields 10410Sstevel@tonic-gate * pfn is copied for debug use 10420Sstevel@tonic-gate */ 10430Sstevel@tonic-gate cpd.cpd_pfn = spfn; 10440Sstevel@tonic-gate datap = cpr_compress_pages(&cpd, pages, C_COMPRESSING); 10450Sstevel@tonic-gate datalen = cpd.cpd_length; 10460Sstevel@tonic-gate descp->csd_clean_compressed = (cpd.cpd_flag & CPD_COMPRESS); 10470Sstevel@tonic-gate #ifdef DEBUG 10480Sstevel@tonic-gate descp->csd_usum = cpd.cpd_usum; 10490Sstevel@tonic-gate descp->csd_csum = cpd.cpd_csum; 10500Sstevel@tonic-gate #endif 10510Sstevel@tonic-gate 10520Sstevel@tonic-gate error = 0; 10530Sstevel@tonic-gate 10540Sstevel@tonic-gate /* 10550Sstevel@tonic-gate * Save the raw or compressed data to the storage area pointed to by 10560Sstevel@tonic-gate * sensitive_write_ptr. Make sure the storage space is big enough to 10570Sstevel@tonic-gate * hold the result. Otherwise roll back to increase the storage space. 10580Sstevel@tonic-gate */ 10590Sstevel@tonic-gate descp->csd_clean_sva = (cpr_ptr)sensitive_write_ptr; 10600Sstevel@tonic-gate descp->csd_clean_sz = datalen; 10610Sstevel@tonic-gate if ((sensitive_write_ptr + datalen) < i_cpr_storage_data_end) { 10620Sstevel@tonic-gate extern void cprbcopy(void *, void *, size_t); 10630Sstevel@tonic-gate 10640Sstevel@tonic-gate cprbcopy(datap, sensitive_write_ptr, datalen); 10650Sstevel@tonic-gate sensitive_size_saved += datalen; 10660Sstevel@tonic-gate sensitive_pages_saved += descp->csd_dirty_npages; 10670Sstevel@tonic-gate sensitive_write_ptr += datalen; 10680Sstevel@tonic-gate } else { 10690Sstevel@tonic-gate remaining = (i_cpr_storage_data_end - sensitive_write_ptr); 10703446Smrj CPR_DEBUG(CPR_DEBUG1, "i_cpr_compress_and_save: The storage " 10710Sstevel@tonic-gate "space is too small!\ngot %d, want %d\n\n", 10723446Smrj remaining, (remaining + datalen)); 10730Sstevel@tonic-gate #ifdef DEBUG 10740Sstevel@tonic-gate /* 10750Sstevel@tonic-gate * Check to see if the content of the sensitive pages that we 10760Sstevel@tonic-gate * just copied have changed during this small time window. 10770Sstevel@tonic-gate */ 10780Sstevel@tonic-gate test_usum = checksum32(CPR->c_mapping_area, mmu_ptob(pages)); 10790Sstevel@tonic-gate descp->csd_usum = cpd.cpd_usum; 10800Sstevel@tonic-gate if (test_usum != descp->csd_usum) { 10813446Smrj CPR_DEBUG(CPR_DEBUG1, "\nWARNING: " 10823446Smrj "i_cpr_compress_and_save: " 1083931Smathue "Data in the range of pfn 0x%lx to pfn " 1084931Smathue "0x%lx has changed after they are saved " 10853446Smrj "into storage.", spfn, (spfn + pages - 1)); 10860Sstevel@tonic-gate } 10870Sstevel@tonic-gate #endif 10880Sstevel@tonic-gate error = ENOMEM; 10890Sstevel@tonic-gate } 10900Sstevel@tonic-gate 10910Sstevel@tonic-gate i_cpr_mapout(CPR->c_mapping_area, pages); 10920Sstevel@tonic-gate return (error); 10930Sstevel@tonic-gate } 10940Sstevel@tonic-gate 10950Sstevel@tonic-gate 10960Sstevel@tonic-gate /* 10970Sstevel@tonic-gate * This routine is derived from cpr_count_kpages(). 10980Sstevel@tonic-gate * It goes through kernel data nucleus and segkmem segments to select 10990Sstevel@tonic-gate * pages in use and mark them in the corresponding bitmap. 11000Sstevel@tonic-gate */ 11010Sstevel@tonic-gate pgcnt_t 11020Sstevel@tonic-gate i_cpr_count_sensitive_kpages(int mapflag, bitfunc_t bitfunc) 11030Sstevel@tonic-gate { 11040Sstevel@tonic-gate pgcnt_t kdata_cnt = 0, segkmem_cnt = 0; 11050Sstevel@tonic-gate extern caddr_t e_moddata; 11060Sstevel@tonic-gate extern struct seg kvalloc; 11070Sstevel@tonic-gate extern struct seg kmem64; 11080Sstevel@tonic-gate size_t size; 11090Sstevel@tonic-gate 11100Sstevel@tonic-gate /* 11110Sstevel@tonic-gate * Kernel data nucleus pages 11120Sstevel@tonic-gate */ 11130Sstevel@tonic-gate size = e_moddata - s_data; 11140Sstevel@tonic-gate kdata_cnt += cpr_count_pages(s_data, size, 11150Sstevel@tonic-gate mapflag, bitfunc, DBG_SHOWRANGE); 11160Sstevel@tonic-gate 11170Sstevel@tonic-gate /* 11180Sstevel@tonic-gate * kvseg and kvalloc pages 11190Sstevel@tonic-gate */ 11200Sstevel@tonic-gate segkmem_cnt += cpr_scan_kvseg(mapflag, bitfunc, &kvseg); 11210Sstevel@tonic-gate segkmem_cnt += cpr_count_pages(kvalloc.s_base, kvalloc.s_size, 11220Sstevel@tonic-gate mapflag, bitfunc, DBG_SHOWRANGE); 11230Sstevel@tonic-gate 11240Sstevel@tonic-gate /* segment to support kernel memory usage above 32-bit space (4GB) */ 11250Sstevel@tonic-gate if (kmem64.s_base) 11260Sstevel@tonic-gate segkmem_cnt += cpr_count_pages(kmem64.s_base, kmem64.s_size, 11270Sstevel@tonic-gate mapflag, bitfunc, DBG_SHOWRANGE); 11280Sstevel@tonic-gate 11293446Smrj CPR_DEBUG(CPR_DEBUG7, "\ni_cpr_count_sensitive_kpages:\n" 11300Sstevel@tonic-gate "\tkdata_cnt %ld + segkmem_cnt %ld = %ld pages\n", 11313446Smrj kdata_cnt, segkmem_cnt, kdata_cnt + segkmem_cnt); 11320Sstevel@tonic-gate 11330Sstevel@tonic-gate return (kdata_cnt + segkmem_cnt); 11340Sstevel@tonic-gate } 11350Sstevel@tonic-gate 11360Sstevel@tonic-gate 11370Sstevel@tonic-gate pgcnt_t 11380Sstevel@tonic-gate i_cpr_count_storage_pages(int mapflag, bitfunc_t bitfunc) 11390Sstevel@tonic-gate { 11400Sstevel@tonic-gate pgcnt_t count = 0; 11410Sstevel@tonic-gate 11420Sstevel@tonic-gate if (i_cpr_storage_desc_base) { 11430Sstevel@tonic-gate count += cpr_count_pages((caddr_t)i_cpr_storage_desc_base, 11440Sstevel@tonic-gate (size_t)mmu_ptob(i_cpr_storage_desc_pgcnt), 11450Sstevel@tonic-gate mapflag, bitfunc, DBG_SHOWRANGE); 11460Sstevel@tonic-gate } 11470Sstevel@tonic-gate if (i_cpr_storage_data_base) { 11480Sstevel@tonic-gate count += cpr_count_pages(i_cpr_storage_data_base, 11490Sstevel@tonic-gate (size_t)mmu_ptob(i_cpr_storage_data_sz), 11500Sstevel@tonic-gate mapflag, bitfunc, DBG_SHOWRANGE); 11510Sstevel@tonic-gate } 11520Sstevel@tonic-gate return (count); 11530Sstevel@tonic-gate } 11540Sstevel@tonic-gate 11550Sstevel@tonic-gate 11560Sstevel@tonic-gate /* 11570Sstevel@tonic-gate * Derived from cpr_write_statefile(). 11580Sstevel@tonic-gate * Allocate (or reallocate after exhausting the supply) descriptors for each 11590Sstevel@tonic-gate * chunk of contiguous sensitive kpages. 11600Sstevel@tonic-gate */ 11610Sstevel@tonic-gate static int 11620Sstevel@tonic-gate i_cpr_storage_desc_alloc(csd_t **basepp, pgcnt_t *pgsp, csd_t **endpp, 11630Sstevel@tonic-gate int retry) 11640Sstevel@tonic-gate { 11650Sstevel@tonic-gate pgcnt_t npages; 11660Sstevel@tonic-gate int chunks; 11670Sstevel@tonic-gate csd_t *descp, *end; 11680Sstevel@tonic-gate size_t len; 11690Sstevel@tonic-gate char *str = "i_cpr_storage_desc_alloc:"; 11700Sstevel@tonic-gate 11710Sstevel@tonic-gate /* 11720Sstevel@tonic-gate * On initial allocation, add some extra to cover overhead caused 11730Sstevel@tonic-gate * by the allocation for the storage area later. 11740Sstevel@tonic-gate */ 11750Sstevel@tonic-gate if (retry == 0) { 11760Sstevel@tonic-gate chunks = cpr_contig_pages(NULL, STORAGE_DESC_ALLOC) + 11770Sstevel@tonic-gate EXTRA_DESCS; 11780Sstevel@tonic-gate npages = mmu_btopr(sizeof (**basepp) * (pgcnt_t)chunks); 11793446Smrj CPR_DEBUG(CPR_DEBUG7, "%s chunks %d, ", str, chunks); 11800Sstevel@tonic-gate } else { 11813446Smrj CPR_DEBUG(CPR_DEBUG7, "%s retry %d: ", str, retry); 11820Sstevel@tonic-gate npages = *pgsp + 1; 11830Sstevel@tonic-gate } 11840Sstevel@tonic-gate /* Free old descriptors, if any */ 11850Sstevel@tonic-gate if (*basepp) 11860Sstevel@tonic-gate kmem_free((caddr_t)*basepp, mmu_ptob(*pgsp)); 11870Sstevel@tonic-gate 11880Sstevel@tonic-gate descp = *basepp = kmem_alloc(mmu_ptob(npages), KM_NOSLEEP); 11890Sstevel@tonic-gate if (descp == NULL) { 11903446Smrj CPR_DEBUG(CPR_DEBUG7, "%s no space for descriptors!\n", str); 11910Sstevel@tonic-gate return (ENOMEM); 11920Sstevel@tonic-gate } 11930Sstevel@tonic-gate 11940Sstevel@tonic-gate *pgsp = npages; 11950Sstevel@tonic-gate len = mmu_ptob(npages); 11960Sstevel@tonic-gate end = *endpp = descp + (len / (sizeof (**basepp))); 11973446Smrj CPR_DEBUG(CPR_DEBUG7, "npages 0x%lx, len 0x%lx, items 0x%lx\n\t*basepp " 11980Sstevel@tonic-gate "%p, *endpp %p\n", npages, len, (len / (sizeof (**basepp))), 11993446Smrj *basepp, *endpp); 12000Sstevel@tonic-gate i_cpr_storage_desc_init(descp, npages, end); 12010Sstevel@tonic-gate return (0); 12020Sstevel@tonic-gate } 12030Sstevel@tonic-gate 12040Sstevel@tonic-gate static void 12050Sstevel@tonic-gate i_cpr_storage_desc_init(csd_t *descp, pgcnt_t npages, csd_t *end) 12060Sstevel@tonic-gate { 12070Sstevel@tonic-gate size_t len = mmu_ptob(npages); 12080Sstevel@tonic-gate 12090Sstevel@tonic-gate /* Initialize the descriptors to something impossible. */ 12100Sstevel@tonic-gate bzero(descp, len); 12110Sstevel@tonic-gate #ifdef DEBUG 12120Sstevel@tonic-gate /* 12130Sstevel@tonic-gate * This condition is tested by an ASSERT 12140Sstevel@tonic-gate */ 12150Sstevel@tonic-gate for (; descp < end; descp++) 12160Sstevel@tonic-gate descp->csd_dirty_spfn = (uint_t)-1; 12170Sstevel@tonic-gate #endif 12180Sstevel@tonic-gate } 12190Sstevel@tonic-gate 12200Sstevel@tonic-gate int 12210Sstevel@tonic-gate i_cpr_dump_sensitive_kpages(vnode_t *vp) 12220Sstevel@tonic-gate { 12230Sstevel@tonic-gate int error = 0; 12240Sstevel@tonic-gate uint_t spin_cnt = 0; 12250Sstevel@tonic-gate csd_t *descp; 12260Sstevel@tonic-gate 12270Sstevel@tonic-gate /* 12280Sstevel@tonic-gate * These following two variables need to be reinitialized 12290Sstevel@tonic-gate * for each cpr cycle. 12300Sstevel@tonic-gate */ 12310Sstevel@tonic-gate i_cpr_sensitive_bytes_dumped = 0; 12320Sstevel@tonic-gate i_cpr_sensitive_pgs_dumped = 0; 12330Sstevel@tonic-gate 12340Sstevel@tonic-gate if (i_cpr_storage_desc_base) { 12350Sstevel@tonic-gate for (descp = i_cpr_storage_desc_base; 12360Sstevel@tonic-gate descp <= i_cpr_storage_desc_last_used; descp++) { 12370Sstevel@tonic-gate if (error = cpr_dump_sensitive(vp, descp)) 12380Sstevel@tonic-gate return (error); 12390Sstevel@tonic-gate spin_cnt++; 12400Sstevel@tonic-gate if ((spin_cnt & 0x5F) == 1) 12410Sstevel@tonic-gate cpr_spinning_bar(); 12420Sstevel@tonic-gate } 12430Sstevel@tonic-gate prom_printf(" \b"); 12440Sstevel@tonic-gate } 12450Sstevel@tonic-gate 12463446Smrj CPR_DEBUG(CPR_DEBUG7, "\ni_cpr_dump_sensitive_kpages: dumped %ld\n", 12473446Smrj i_cpr_sensitive_pgs_dumped); 12480Sstevel@tonic-gate return (0); 12490Sstevel@tonic-gate } 12500Sstevel@tonic-gate 12510Sstevel@tonic-gate 12520Sstevel@tonic-gate /* 12530Sstevel@tonic-gate * 1. Fill the cpr page descriptor with the info of the dirty pages 12540Sstevel@tonic-gate * and 12550Sstevel@tonic-gate * write the descriptor out. It will be used at resume. 12560Sstevel@tonic-gate * 2. Write the clean data in stead of the dirty data out. 12570Sstevel@tonic-gate * Note: to save space, the clean data is already compressed. 12580Sstevel@tonic-gate */ 12590Sstevel@tonic-gate static int 12600Sstevel@tonic-gate cpr_dump_sensitive(vnode_t *vp, csd_t *descp) 12610Sstevel@tonic-gate { 12620Sstevel@tonic-gate int error = 0; 12630Sstevel@tonic-gate caddr_t datap; 12640Sstevel@tonic-gate cpd_t cpd; /* cpr page descriptor */ 12650Sstevel@tonic-gate pfn_t dirty_spfn; 12660Sstevel@tonic-gate pgcnt_t dirty_npages; 12670Sstevel@tonic-gate size_t clean_sz; 12680Sstevel@tonic-gate caddr_t clean_sva; 12690Sstevel@tonic-gate int clean_compressed; 12700Sstevel@tonic-gate extern uchar_t cpr_pagecopy[]; 12710Sstevel@tonic-gate 12720Sstevel@tonic-gate dirty_spfn = descp->csd_dirty_spfn; 12730Sstevel@tonic-gate dirty_npages = descp->csd_dirty_npages; 12740Sstevel@tonic-gate clean_sva = (caddr_t)descp->csd_clean_sva; 12750Sstevel@tonic-gate clean_sz = descp->csd_clean_sz; 12760Sstevel@tonic-gate clean_compressed = descp->csd_clean_compressed; 12770Sstevel@tonic-gate 12780Sstevel@tonic-gate /* Fill cpr page descriptor. */ 12790Sstevel@tonic-gate cpd.cpd_magic = (uint_t)CPR_PAGE_MAGIC; 12800Sstevel@tonic-gate cpd.cpd_pfn = dirty_spfn; 12810Sstevel@tonic-gate cpd.cpd_flag = 0; /* must init to zero */ 12820Sstevel@tonic-gate cpd.cpd_pages = dirty_npages; 12830Sstevel@tonic-gate 12840Sstevel@tonic-gate #ifdef DEBUG 12850Sstevel@tonic-gate if ((cpd.cpd_usum = descp->csd_usum) != 0) 12860Sstevel@tonic-gate cpd.cpd_flag |= CPD_USUM; 12870Sstevel@tonic-gate if ((cpd.cpd_csum = descp->csd_csum) != 0) 12880Sstevel@tonic-gate cpd.cpd_flag |= CPD_CSUM; 12890Sstevel@tonic-gate #endif 12900Sstevel@tonic-gate 12910Sstevel@tonic-gate STAT->cs_dumped_statefsz += mmu_ptob(dirty_npages); 12920Sstevel@tonic-gate 12930Sstevel@tonic-gate /* 12940Sstevel@tonic-gate * The sensitive kpages are usually saved with compression 12950Sstevel@tonic-gate * unless compression could not reduce the size of the data. 12960Sstevel@tonic-gate * If user choose not to have the statefile compressed, 12970Sstevel@tonic-gate * we need to decompress the data back before dumping it to disk. 12980Sstevel@tonic-gate */ 12990Sstevel@tonic-gate if (CPR->c_flags & C_COMPRESSING) { 13000Sstevel@tonic-gate cpd.cpd_length = clean_sz; 13010Sstevel@tonic-gate datap = clean_sva; 13020Sstevel@tonic-gate if (clean_compressed) 13030Sstevel@tonic-gate cpd.cpd_flag |= CPD_COMPRESS; 13040Sstevel@tonic-gate } else { 13050Sstevel@tonic-gate if (clean_compressed) { 13060Sstevel@tonic-gate cpd.cpd_length = decompress(clean_sva, cpr_pagecopy, 13070Sstevel@tonic-gate clean_sz, mmu_ptob(dirty_npages)); 13080Sstevel@tonic-gate datap = (caddr_t)cpr_pagecopy; 13090Sstevel@tonic-gate ASSERT(cpd.cpd_length == mmu_ptob(dirty_npages)); 13100Sstevel@tonic-gate } else { 13110Sstevel@tonic-gate cpd.cpd_length = clean_sz; 13120Sstevel@tonic-gate datap = clean_sva; 13130Sstevel@tonic-gate } 13140Sstevel@tonic-gate cpd.cpd_csum = 0; 13150Sstevel@tonic-gate } 13160Sstevel@tonic-gate 13170Sstevel@tonic-gate /* Write cpr page descriptor */ 13180Sstevel@tonic-gate error = cpr_write(vp, (caddr_t)&cpd, sizeof (cpd)); 13190Sstevel@tonic-gate if (error) { 13203446Smrj CPR_DEBUG(CPR_DEBUG7, "descp: %p\n", descp); 13210Sstevel@tonic-gate #ifdef DEBUG 13220Sstevel@tonic-gate debug_enter("cpr_dump_sensitive: cpr_write() page " 13234729Skchow "descriptor failed!\n"); 13240Sstevel@tonic-gate #endif 13250Sstevel@tonic-gate return (error); 13260Sstevel@tonic-gate } 13270Sstevel@tonic-gate 13280Sstevel@tonic-gate i_cpr_sensitive_bytes_dumped += sizeof (cpd_t); 13290Sstevel@tonic-gate 13300Sstevel@tonic-gate /* Write page data */ 13310Sstevel@tonic-gate error = cpr_write(vp, (caddr_t)datap, cpd.cpd_length); 13320Sstevel@tonic-gate if (error) { 13333446Smrj CPR_DEBUG(CPR_DEBUG7, "error: %x\n", error); 13343446Smrj CPR_DEBUG(CPR_DEBUG7, "descp: %p\n", descp); 13353446Smrj CPR_DEBUG(CPR_DEBUG7, "cpr_write(%p, %p , %lx)\n", vp, datap, 13364729Skchow cpd.cpd_length); 13370Sstevel@tonic-gate #ifdef DEBUG 13380Sstevel@tonic-gate debug_enter("cpr_dump_sensitive: cpr_write() data failed!\n"); 13390Sstevel@tonic-gate #endif 13400Sstevel@tonic-gate return (error); 13410Sstevel@tonic-gate } 13420Sstevel@tonic-gate 13430Sstevel@tonic-gate i_cpr_sensitive_bytes_dumped += cpd.cpd_length; 13440Sstevel@tonic-gate i_cpr_sensitive_pgs_dumped += dirty_npages; 13450Sstevel@tonic-gate 13460Sstevel@tonic-gate return (error); 13470Sstevel@tonic-gate } 13480Sstevel@tonic-gate 13490Sstevel@tonic-gate 13500Sstevel@tonic-gate /* 13510Sstevel@tonic-gate * Sanity check to make sure that we have dumped right amount 13520Sstevel@tonic-gate * of pages from different sources to statefile. 13530Sstevel@tonic-gate */ 13540Sstevel@tonic-gate int 13550Sstevel@tonic-gate i_cpr_check_pgs_dumped(uint_t pgs_expected, uint_t regular_pgs_dumped) 13560Sstevel@tonic-gate { 13570Sstevel@tonic-gate uint_t total_pgs_dumped; 13580Sstevel@tonic-gate 13590Sstevel@tonic-gate total_pgs_dumped = regular_pgs_dumped + i_cpr_sensitive_pgs_dumped; 13600Sstevel@tonic-gate 13613446Smrj CPR_DEBUG(CPR_DEBUG7, "\ncheck_pgs: reg %d + sens %ld = %d, " 13623446Smrj "expect %d\n\n", regular_pgs_dumped, i_cpr_sensitive_pgs_dumped, 13633446Smrj total_pgs_dumped, pgs_expected); 13640Sstevel@tonic-gate 13650Sstevel@tonic-gate if (pgs_expected == total_pgs_dumped) 13660Sstevel@tonic-gate return (0); 13670Sstevel@tonic-gate 13680Sstevel@tonic-gate return (EINVAL); 13690Sstevel@tonic-gate } 13700Sstevel@tonic-gate 13710Sstevel@tonic-gate 13720Sstevel@tonic-gate int 13730Sstevel@tonic-gate i_cpr_reusefini(void) 13740Sstevel@tonic-gate { 13750Sstevel@tonic-gate struct vnode *vp; 13760Sstevel@tonic-gate cdef_t *cdef; 13770Sstevel@tonic-gate size_t size; 13780Sstevel@tonic-gate char *bufp; 13790Sstevel@tonic-gate int rc; 13800Sstevel@tonic-gate 13810Sstevel@tonic-gate if (cpr_reusable_mode) 13820Sstevel@tonic-gate cpr_reusable_mode = 0; 13830Sstevel@tonic-gate 13840Sstevel@tonic-gate if (rc = cpr_open_deffile(FREAD|FWRITE, &vp)) { 13850Sstevel@tonic-gate if (rc == EROFS) { 13860Sstevel@tonic-gate cpr_err(CE_CONT, "uadmin A_FREEZE AD_REUSEFINI " 13870Sstevel@tonic-gate "(uadmin %d %d)\nmust be done with / mounted " 13880Sstevel@tonic-gate "writeable.\n", A_FREEZE, AD_REUSEFINI); 13890Sstevel@tonic-gate } 13900Sstevel@tonic-gate return (rc); 13910Sstevel@tonic-gate } 13920Sstevel@tonic-gate 13930Sstevel@tonic-gate cdef = kmem_alloc(sizeof (*cdef), KM_SLEEP); 13940Sstevel@tonic-gate rc = cpr_rdwr(UIO_READ, vp, cdef, sizeof (*cdef)); 13950Sstevel@tonic-gate 13960Sstevel@tonic-gate if (rc) { 13970Sstevel@tonic-gate cpr_err(CE_WARN, "Failed reading %s, errno = %d", 13980Sstevel@tonic-gate cpr_default_path, rc); 13990Sstevel@tonic-gate } else if (cdef->mini.magic != CPR_DEFAULT_MAGIC) { 14000Sstevel@tonic-gate cpr_err(CE_WARN, "bad magic number in %s, cannot restore " 14010Sstevel@tonic-gate "prom values for %s", cpr_default_path, 14020Sstevel@tonic-gate cpr_enumerate_promprops(&bufp, &size)); 14030Sstevel@tonic-gate kmem_free(bufp, size); 14040Sstevel@tonic-gate rc = EINVAL; 14050Sstevel@tonic-gate } else { 14060Sstevel@tonic-gate /* 14070Sstevel@tonic-gate * clean up prom properties 14080Sstevel@tonic-gate */ 14090Sstevel@tonic-gate rc = cpr_update_nvram(cdef->props); 14100Sstevel@tonic-gate if (rc == 0) { 14110Sstevel@tonic-gate /* 14120Sstevel@tonic-gate * invalidate the disk copy and turn off reusable 14130Sstevel@tonic-gate */ 14140Sstevel@tonic-gate cdef->mini.magic = 0; 14150Sstevel@tonic-gate cdef->mini.reusable = 0; 14160Sstevel@tonic-gate if (rc = cpr_rdwr(UIO_WRITE, vp, 14170Sstevel@tonic-gate &cdef->mini, sizeof (cdef->mini))) { 14180Sstevel@tonic-gate cpr_err(CE_WARN, "Failed writing %s, errno %d", 14190Sstevel@tonic-gate cpr_default_path, rc); 14200Sstevel@tonic-gate } 14210Sstevel@tonic-gate } 14220Sstevel@tonic-gate } 14230Sstevel@tonic-gate 1424*5331Samw (void) VOP_CLOSE(vp, FREAD|FWRITE, 1, (offset_t)0, CRED(), NULL); 14250Sstevel@tonic-gate VN_RELE(vp); 14260Sstevel@tonic-gate kmem_free(cdef, sizeof (*cdef)); 14270Sstevel@tonic-gate 14280Sstevel@tonic-gate return (rc); 14290Sstevel@tonic-gate } 14300Sstevel@tonic-gate 14310Sstevel@tonic-gate 14320Sstevel@tonic-gate int 14330Sstevel@tonic-gate i_cpr_reuseinit(void) 14340Sstevel@tonic-gate { 14350Sstevel@tonic-gate int rc = 0; 14360Sstevel@tonic-gate 14370Sstevel@tonic-gate if (rc = cpr_default_setup(1)) 14380Sstevel@tonic-gate return (rc); 14390Sstevel@tonic-gate 14400Sstevel@tonic-gate /* 14410Sstevel@tonic-gate * We need to validate default file 14420Sstevel@tonic-gate */ 14430Sstevel@tonic-gate rc = cpr_validate_definfo(1); 14440Sstevel@tonic-gate if (rc == 0) 14450Sstevel@tonic-gate cpr_reusable_mode = 1; 14460Sstevel@tonic-gate else if (rc == EROFS) { 14470Sstevel@tonic-gate cpr_err(CE_NOTE, "reuseinit must be performed " 14480Sstevel@tonic-gate "while / is mounted writeable"); 14490Sstevel@tonic-gate } 14500Sstevel@tonic-gate 14510Sstevel@tonic-gate (void) cpr_default_setup(0); 14520Sstevel@tonic-gate 14530Sstevel@tonic-gate return (rc); 14540Sstevel@tonic-gate } 14550Sstevel@tonic-gate 14560Sstevel@tonic-gate 14570Sstevel@tonic-gate int 14580Sstevel@tonic-gate i_cpr_check_cprinfo(void) 14590Sstevel@tonic-gate { 14600Sstevel@tonic-gate struct vnode *vp; 14610Sstevel@tonic-gate cmini_t mini; 14620Sstevel@tonic-gate int rc = 0; 14630Sstevel@tonic-gate 14640Sstevel@tonic-gate if (rc = cpr_open_deffile(FREAD, &vp)) { 14650Sstevel@tonic-gate if (rc == ENOENT) 14660Sstevel@tonic-gate cpr_err(CE_NOTE, "cprinfo file does not " 14670Sstevel@tonic-gate "exist. You must run 'uadmin %d %d' " 14680Sstevel@tonic-gate "command while / is mounted writeable,\n" 14690Sstevel@tonic-gate "then reboot and run 'uadmin %d %d' " 14700Sstevel@tonic-gate "to create a reusable statefile", 14710Sstevel@tonic-gate A_FREEZE, AD_REUSEINIT, A_FREEZE, AD_REUSABLE); 14720Sstevel@tonic-gate return (rc); 14730Sstevel@tonic-gate } 14740Sstevel@tonic-gate 14750Sstevel@tonic-gate rc = cpr_rdwr(UIO_READ, vp, &mini, sizeof (mini)); 1476*5331Samw (void) VOP_CLOSE(vp, FREAD, 1, (offset_t)0, CRED(), NULL); 14770Sstevel@tonic-gate VN_RELE(vp); 14780Sstevel@tonic-gate 14790Sstevel@tonic-gate if (rc) { 14800Sstevel@tonic-gate cpr_err(CE_WARN, "Failed reading %s, errno = %d", 14810Sstevel@tonic-gate cpr_default_path, rc); 14820Sstevel@tonic-gate } else if (mini.magic != CPR_DEFAULT_MAGIC) { 14830Sstevel@tonic-gate cpr_err(CE_CONT, "bad magic number in cprinfo file.\n" 14840Sstevel@tonic-gate "You must run 'uadmin %d %d' while / is mounted " 14850Sstevel@tonic-gate "writeable, then reboot and run 'uadmin %d %d' " 14860Sstevel@tonic-gate "to create a reusable statefile\n", 14870Sstevel@tonic-gate A_FREEZE, AD_REUSEINIT, A_FREEZE, AD_REUSABLE); 14880Sstevel@tonic-gate rc = EINVAL; 14890Sstevel@tonic-gate } 14900Sstevel@tonic-gate 14910Sstevel@tonic-gate return (rc); 14920Sstevel@tonic-gate } 14930Sstevel@tonic-gate 14940Sstevel@tonic-gate 14950Sstevel@tonic-gate int 14960Sstevel@tonic-gate i_cpr_reusable_supported(void) 14970Sstevel@tonic-gate { 14980Sstevel@tonic-gate return (1); 14990Sstevel@tonic-gate } 15000Sstevel@tonic-gate 15010Sstevel@tonic-gate 15020Sstevel@tonic-gate /* 15030Sstevel@tonic-gate * find prom phys pages and alloc space for a tmp copy 15040Sstevel@tonic-gate */ 15050Sstevel@tonic-gate static int 15060Sstevel@tonic-gate i_cpr_find_ppages(void) 15070Sstevel@tonic-gate { 15080Sstevel@tonic-gate extern struct vnode prom_ppages; 15090Sstevel@tonic-gate struct page *pp; 15100Sstevel@tonic-gate struct memlist *pmem; 15110Sstevel@tonic-gate pgcnt_t npages, pcnt, scnt, vcnt; 15120Sstevel@tonic-gate pfn_t ppn, plast, *dst; 15130Sstevel@tonic-gate int mapflag; 15140Sstevel@tonic-gate 15150Sstevel@tonic-gate cpr_clear_bitmaps(); 15160Sstevel@tonic-gate mapflag = REGULAR_BITMAP; 15170Sstevel@tonic-gate 15180Sstevel@tonic-gate /* 15190Sstevel@tonic-gate * there should be a page_t for each phys page used by the kernel; 15200Sstevel@tonic-gate * set a bit for each phys page not tracked by a page_t 15210Sstevel@tonic-gate */ 15220Sstevel@tonic-gate pcnt = 0; 15230Sstevel@tonic-gate memlist_read_lock(); 15240Sstevel@tonic-gate for (pmem = phys_install; pmem; pmem = pmem->next) { 15250Sstevel@tonic-gate npages = mmu_btop(pmem->size); 15260Sstevel@tonic-gate ppn = mmu_btop(pmem->address); 15270Sstevel@tonic-gate for (plast = ppn + npages; ppn < plast; ppn++) { 15280Sstevel@tonic-gate if (page_numtopp_nolock(ppn)) 15290Sstevel@tonic-gate continue; 15300Sstevel@tonic-gate (void) cpr_setbit(ppn, mapflag); 15310Sstevel@tonic-gate pcnt++; 15320Sstevel@tonic-gate } 15330Sstevel@tonic-gate } 15340Sstevel@tonic-gate memlist_read_unlock(); 15350Sstevel@tonic-gate 15360Sstevel@tonic-gate /* 15370Sstevel@tonic-gate * clear bits for phys pages in each segment 15380Sstevel@tonic-gate */ 15390Sstevel@tonic-gate scnt = cpr_count_seg_pages(mapflag, cpr_clrbit); 15400Sstevel@tonic-gate 15410Sstevel@tonic-gate /* 15420Sstevel@tonic-gate * set bits for phys pages referenced by the prom_ppages vnode; 15430Sstevel@tonic-gate * these pages are mostly comprised of forthdebug words 15440Sstevel@tonic-gate */ 15450Sstevel@tonic-gate vcnt = 0; 15460Sstevel@tonic-gate for (pp = prom_ppages.v_pages; pp; ) { 15470Sstevel@tonic-gate if (cpr_setbit(pp->p_offset, mapflag) == 0) 15480Sstevel@tonic-gate vcnt++; 15490Sstevel@tonic-gate pp = pp->p_vpnext; 15500Sstevel@tonic-gate if (pp == prom_ppages.v_pages) 15510Sstevel@tonic-gate break; 15520Sstevel@tonic-gate } 15530Sstevel@tonic-gate 15540Sstevel@tonic-gate /* 15550Sstevel@tonic-gate * total number of prom pages are: 15560Sstevel@tonic-gate * (non-page_t pages - seg pages + vnode pages) 15570Sstevel@tonic-gate */ 15580Sstevel@tonic-gate ppage_count = pcnt - scnt + vcnt; 15593446Smrj CPR_DEBUG(CPR_DEBUG1, 15603446Smrj "find_ppages: pcnt %ld - scnt %ld + vcnt %ld = %ld\n", 15613446Smrj pcnt, scnt, vcnt, ppage_count); 15620Sstevel@tonic-gate 15630Sstevel@tonic-gate /* 15640Sstevel@tonic-gate * alloc array of pfn_t to store phys page list 15650Sstevel@tonic-gate */ 15660Sstevel@tonic-gate pphys_list_size = ppage_count * sizeof (pfn_t); 15670Sstevel@tonic-gate pphys_list = kmem_alloc(pphys_list_size, KM_NOSLEEP); 15680Sstevel@tonic-gate if (pphys_list == NULL) { 15690Sstevel@tonic-gate cpr_err(CE_WARN, "cannot alloc pphys_list"); 15700Sstevel@tonic-gate return (ENOMEM); 15710Sstevel@tonic-gate } 15720Sstevel@tonic-gate 15730Sstevel@tonic-gate /* 15740Sstevel@tonic-gate * phys pages referenced in the bitmap should be 15750Sstevel@tonic-gate * those used by the prom; scan bitmap and save 15760Sstevel@tonic-gate * a list of prom phys page numbers 15770Sstevel@tonic-gate */ 15780Sstevel@tonic-gate dst = pphys_list; 15790Sstevel@tonic-gate memlist_read_lock(); 15800Sstevel@tonic-gate for (pmem = phys_install; pmem; pmem = pmem->next) { 15810Sstevel@tonic-gate npages = mmu_btop(pmem->size); 15820Sstevel@tonic-gate ppn = mmu_btop(pmem->address); 15830Sstevel@tonic-gate for (plast = ppn + npages; ppn < plast; ppn++) { 15840Sstevel@tonic-gate if (cpr_isset(ppn, mapflag)) { 15850Sstevel@tonic-gate ASSERT(dst < (pphys_list + ppage_count)); 15860Sstevel@tonic-gate *dst++ = ppn; 15870Sstevel@tonic-gate } 15880Sstevel@tonic-gate } 15890Sstevel@tonic-gate } 15900Sstevel@tonic-gate memlist_read_unlock(); 15910Sstevel@tonic-gate 15920Sstevel@tonic-gate /* 15930Sstevel@tonic-gate * allocate space to store prom pages 15940Sstevel@tonic-gate */ 15950Sstevel@tonic-gate ppage_buf = kmem_alloc(mmu_ptob(ppage_count), KM_NOSLEEP); 15960Sstevel@tonic-gate if (ppage_buf == NULL) { 15970Sstevel@tonic-gate kmem_free(pphys_list, pphys_list_size); 15980Sstevel@tonic-gate pphys_list = NULL; 15990Sstevel@tonic-gate cpr_err(CE_WARN, "cannot alloc ppage_buf"); 16000Sstevel@tonic-gate return (ENOMEM); 16010Sstevel@tonic-gate } 16020Sstevel@tonic-gate 16030Sstevel@tonic-gate return (0); 16040Sstevel@tonic-gate } 16050Sstevel@tonic-gate 16060Sstevel@tonic-gate 16070Sstevel@tonic-gate /* 16080Sstevel@tonic-gate * save prom pages to kmem pages 16090Sstevel@tonic-gate */ 16100Sstevel@tonic-gate static void 16110Sstevel@tonic-gate i_cpr_save_ppages(void) 16120Sstevel@tonic-gate { 16130Sstevel@tonic-gate pfn_t *pphys, *plast; 16140Sstevel@tonic-gate caddr_t dst; 16150Sstevel@tonic-gate 16160Sstevel@tonic-gate /* 16170Sstevel@tonic-gate * map in each prom page and copy to a kmem page 16180Sstevel@tonic-gate */ 16190Sstevel@tonic-gate dst = ppage_buf; 16200Sstevel@tonic-gate plast = pphys_list + ppage_count; 16210Sstevel@tonic-gate for (pphys = pphys_list; pphys < plast; pphys++) { 16220Sstevel@tonic-gate i_cpr_mapin(cpr_vaddr, 1, *pphys); 16230Sstevel@tonic-gate bcopy(cpr_vaddr, dst, MMU_PAGESIZE); 16240Sstevel@tonic-gate i_cpr_mapout(cpr_vaddr, 1); 16250Sstevel@tonic-gate dst += MMU_PAGESIZE; 16260Sstevel@tonic-gate } 16270Sstevel@tonic-gate 16283446Smrj CPR_DEBUG(CPR_DEBUG1, "saved %ld prom pages\n", ppage_count); 16290Sstevel@tonic-gate } 16300Sstevel@tonic-gate 16310Sstevel@tonic-gate 16320Sstevel@tonic-gate /* 16330Sstevel@tonic-gate * restore prom pages from kmem pages 16340Sstevel@tonic-gate */ 16350Sstevel@tonic-gate static void 16360Sstevel@tonic-gate i_cpr_restore_ppages(void) 16370Sstevel@tonic-gate { 16380Sstevel@tonic-gate pfn_t *pphys, *plast; 16390Sstevel@tonic-gate caddr_t src; 16400Sstevel@tonic-gate 16410Sstevel@tonic-gate dcache_flushall(); 16420Sstevel@tonic-gate 16430Sstevel@tonic-gate /* 16440Sstevel@tonic-gate * map in each prom page and copy from a kmem page 16450Sstevel@tonic-gate */ 16460Sstevel@tonic-gate src = ppage_buf; 16470Sstevel@tonic-gate plast = pphys_list + ppage_count; 16480Sstevel@tonic-gate for (pphys = pphys_list; pphys < plast; pphys++) { 16490Sstevel@tonic-gate i_cpr_mapin(cpr_vaddr, 1, *pphys); 16500Sstevel@tonic-gate bcopy(src, cpr_vaddr, MMU_PAGESIZE); 16510Sstevel@tonic-gate i_cpr_mapout(cpr_vaddr, 1); 16520Sstevel@tonic-gate src += MMU_PAGESIZE; 16530Sstevel@tonic-gate } 16540Sstevel@tonic-gate 16550Sstevel@tonic-gate dcache_flushall(); 16560Sstevel@tonic-gate 16573446Smrj CPR_DEBUG(CPR_DEBUG1, "restored %ld prom pages\n", ppage_count); 16580Sstevel@tonic-gate } 16590Sstevel@tonic-gate 16600Sstevel@tonic-gate 16610Sstevel@tonic-gate /* 16620Sstevel@tonic-gate * save/restore prom pages or free related allocs 16630Sstevel@tonic-gate */ 16640Sstevel@tonic-gate int 16650Sstevel@tonic-gate i_cpr_prom_pages(int action) 16660Sstevel@tonic-gate { 16670Sstevel@tonic-gate int error; 16680Sstevel@tonic-gate 16690Sstevel@tonic-gate if (action == CPR_PROM_SAVE) { 16700Sstevel@tonic-gate if (ppage_buf == NULL) { 16710Sstevel@tonic-gate ASSERT(pphys_list == NULL); 16720Sstevel@tonic-gate if (error = i_cpr_find_ppages()) 16730Sstevel@tonic-gate return (error); 16740Sstevel@tonic-gate i_cpr_save_ppages(); 16750Sstevel@tonic-gate } 16760Sstevel@tonic-gate } else if (action == CPR_PROM_RESTORE) { 16770Sstevel@tonic-gate i_cpr_restore_ppages(); 16780Sstevel@tonic-gate } else if (action == CPR_PROM_FREE) { 16790Sstevel@tonic-gate if (pphys_list) { 16800Sstevel@tonic-gate ASSERT(pphys_list_size); 16810Sstevel@tonic-gate kmem_free(pphys_list, pphys_list_size); 16820Sstevel@tonic-gate pphys_list = NULL; 16830Sstevel@tonic-gate pphys_list_size = 0; 16840Sstevel@tonic-gate } 16850Sstevel@tonic-gate if (ppage_buf) { 16860Sstevel@tonic-gate ASSERT(ppage_count); 16870Sstevel@tonic-gate kmem_free(ppage_buf, mmu_ptob(ppage_count)); 16883446Smrj CPR_DEBUG(CPR_DEBUG1, "freed %ld prom pages\n", 16893446Smrj ppage_count); 16900Sstevel@tonic-gate ppage_buf = NULL; 16910Sstevel@tonic-gate ppage_count = 0; 16920Sstevel@tonic-gate } 16930Sstevel@tonic-gate } 16940Sstevel@tonic-gate return (0); 16950Sstevel@tonic-gate } 16960Sstevel@tonic-gate 16970Sstevel@tonic-gate 16980Sstevel@tonic-gate /* 16990Sstevel@tonic-gate * record tlb data for the nucleus, bigktsb's, and the cpr module; 17000Sstevel@tonic-gate * this data is later used by cprboot to install dtlb/itlb entries. 17010Sstevel@tonic-gate * when we jump into the cpr module during the resume phase, those 17020Sstevel@tonic-gate * mappings are needed until switching to the kernel trap table. 17030Sstevel@tonic-gate * to make the dtte/itte info available during resume, we need 17040Sstevel@tonic-gate * the info recorded prior to saving sensitive pages, otherwise 17050Sstevel@tonic-gate * all the data would appear as NULLs. 17060Sstevel@tonic-gate */ 17070Sstevel@tonic-gate static void 17080Sstevel@tonic-gate i_cpr_save_tlbinfo(void) 17090Sstevel@tonic-gate { 17101772Sjl139090 cti_t cti = {0}; 17110Sstevel@tonic-gate 17120Sstevel@tonic-gate /* 17130Sstevel@tonic-gate * during resume - shortly after jumping into the cpr module, 17140Sstevel@tonic-gate * sfmmu_load_mmustate() will overwrite any dtlb entry at any 17150Sstevel@tonic-gate * index used for TSBs; skip is set so that any saved tte will 17160Sstevel@tonic-gate * target other tlb offsets and prevent being lost during 17170Sstevel@tonic-gate * resume. now scan the dtlb and save locked entries, 17180Sstevel@tonic-gate * then add entries for the tmp stack / data page and the 17190Sstevel@tonic-gate * cpr thread structure. 17200Sstevel@tonic-gate */ 17210Sstevel@tonic-gate cti.dst = m_info.dtte; 17220Sstevel@tonic-gate cti.tail = cti.dst + CPR_MAX_TLB; 17230Sstevel@tonic-gate cti.reader = dtlb_rd_entry; 17240Sstevel@tonic-gate cti.writer = NULL; 17250Sstevel@tonic-gate cti.filter = i_cpr_lnb; 17260Sstevel@tonic-gate cti.index = cpunodes[CPU->cpu_id].dtlb_size - 1; 17271772Sjl139090 17281772Sjl139090 if (utsb_dtlb_ttenum != -1) 17291772Sjl139090 cti.skip = (1 << utsb_dtlb_ttenum); 17301772Sjl139090 17311772Sjl139090 if (utsb4m_dtlb_ttenum != -1) 17321772Sjl139090 cti.skip |= (1 << utsb4m_dtlb_ttenum); 17331772Sjl139090 17340Sstevel@tonic-gate i_cpr_scan_tlb(&cti); 17350Sstevel@tonic-gate i_cpr_make_tte(&cti, &i_cpr_data_page, datava); 17360Sstevel@tonic-gate i_cpr_make_tte(&cti, curthread, datava); 17370Sstevel@tonic-gate 17380Sstevel@tonic-gate /* 17390Sstevel@tonic-gate * scan itlb and save locked entries; add an entry for 17400Sstevel@tonic-gate * the first text page of the cpr module; cprboot will 17410Sstevel@tonic-gate * jump to that page after restoring kernel pages. 17420Sstevel@tonic-gate */ 17430Sstevel@tonic-gate cti.dst = m_info.itte; 17440Sstevel@tonic-gate cti.tail = cti.dst + CPR_MAX_TLB; 17450Sstevel@tonic-gate cti.reader = itlb_rd_entry; 17460Sstevel@tonic-gate cti.index = cpunodes[CPU->cpu_id].itlb_size - 1; 17470Sstevel@tonic-gate cti.skip = 0; 17480Sstevel@tonic-gate i_cpr_scan_tlb(&cti); 17490Sstevel@tonic-gate i_cpr_make_tte(&cti, (void *)i_cpr_resume_setup, textva); 17500Sstevel@tonic-gate } 17510Sstevel@tonic-gate 17520Sstevel@tonic-gate 17530Sstevel@tonic-gate /* ARGSUSED */ 17540Sstevel@tonic-gate int 17550Sstevel@tonic-gate i_cpr_dump_setup(vnode_t *vp) 17560Sstevel@tonic-gate { 17570Sstevel@tonic-gate /* 17580Sstevel@tonic-gate * zero out m_info and add info to dtte/itte arrays 17590Sstevel@tonic-gate */ 17600Sstevel@tonic-gate bzero(&m_info, sizeof (m_info)); 17610Sstevel@tonic-gate i_cpr_save_tlbinfo(); 17620Sstevel@tonic-gate return (0); 17630Sstevel@tonic-gate } 17640Sstevel@tonic-gate 17650Sstevel@tonic-gate 17660Sstevel@tonic-gate int 17675295Srandyf i_cpr_is_supported(int sleeptype) 17680Sstevel@tonic-gate { 17690Sstevel@tonic-gate char es_prop[] = "energystar-v2"; 1770789Sahrens pnode_t node; 17710Sstevel@tonic-gate int last; 17720Sstevel@tonic-gate extern int cpr_supported_override; 17730Sstevel@tonic-gate extern int cpr_platform_enable; 17740Sstevel@tonic-gate 17755295Srandyf if (sleeptype != CPR_TODISK) 17765295Srandyf return (0); 17775295Srandyf 17780Sstevel@tonic-gate /* 17790Sstevel@tonic-gate * The next statement tests if a specific platform has turned off 17800Sstevel@tonic-gate * cpr support. 17810Sstevel@tonic-gate */ 17820Sstevel@tonic-gate if (cpr_supported_override) 17830Sstevel@tonic-gate return (0); 17840Sstevel@tonic-gate 17850Sstevel@tonic-gate /* 17860Sstevel@tonic-gate * Do not inspect energystar-v* property if a platform has 17870Sstevel@tonic-gate * specifically turned on cpr support 17880Sstevel@tonic-gate */ 17890Sstevel@tonic-gate if (cpr_platform_enable) 17900Sstevel@tonic-gate return (1); 17910Sstevel@tonic-gate 17920Sstevel@tonic-gate node = prom_rootnode(); 17930Sstevel@tonic-gate if (prom_getproplen(node, es_prop) != -1) 17940Sstevel@tonic-gate return (1); 17950Sstevel@tonic-gate last = strlen(es_prop) - 1; 17960Sstevel@tonic-gate es_prop[last] = '3'; 17970Sstevel@tonic-gate return (prom_getproplen(node, es_prop) != -1); 17980Sstevel@tonic-gate } 17990Sstevel@tonic-gate 18000Sstevel@tonic-gate 18010Sstevel@tonic-gate /* 18020Sstevel@tonic-gate * the actual size of the statefile data isn't known until after all the 18030Sstevel@tonic-gate * compressed pages are written; even the inode size doesn't reflect the 18040Sstevel@tonic-gate * data size since there are usually many extra fs blocks. for recording 18050Sstevel@tonic-gate * the actual data size, the first sector of the statefile is copied to 18060Sstevel@tonic-gate * a tmp buf, and the copy is later updated and flushed to disk. 18070Sstevel@tonic-gate */ 18080Sstevel@tonic-gate int 18090Sstevel@tonic-gate i_cpr_blockzero(char *base, char **bufpp, int *blkno, vnode_t *vp) 18100Sstevel@tonic-gate { 18110Sstevel@tonic-gate extern int cpr_flush_write(vnode_t *); 18120Sstevel@tonic-gate static char cpr_sector[DEV_BSIZE]; 18130Sstevel@tonic-gate cpr_ext bytes, *dst; 18140Sstevel@tonic-gate 18150Sstevel@tonic-gate /* 18160Sstevel@tonic-gate * this routine is called after cdd_t and csu_md_t are copied 18170Sstevel@tonic-gate * to cpr_buf; mini-hack alert: the save/update method creates 18180Sstevel@tonic-gate * a dependency on the combined struct size being >= one sector 18190Sstevel@tonic-gate * or DEV_BSIZE; since introduction in Sol2.7, csu_md_t size is 18200Sstevel@tonic-gate * over 1K bytes and will probably grow with any changes. 18210Sstevel@tonic-gate * 18220Sstevel@tonic-gate * copy when vp is NULL, flush when non-NULL 18230Sstevel@tonic-gate */ 18240Sstevel@tonic-gate if (vp == NULL) { 18250Sstevel@tonic-gate ASSERT((*bufpp - base) >= DEV_BSIZE); 18260Sstevel@tonic-gate bcopy(base, cpr_sector, sizeof (cpr_sector)); 18270Sstevel@tonic-gate return (0); 18280Sstevel@tonic-gate } else { 18290Sstevel@tonic-gate bytes = dbtob(*blkno); 18300Sstevel@tonic-gate dst = &((cdd_t *)cpr_sector)->cdd_filesize; 18310Sstevel@tonic-gate bcopy(&bytes, dst, sizeof (bytes)); 18320Sstevel@tonic-gate bcopy(cpr_sector, base, sizeof (cpr_sector)); 18330Sstevel@tonic-gate *bufpp = base + sizeof (cpr_sector); 18340Sstevel@tonic-gate *blkno = cpr_statefile_offset(); 18353446Smrj CPR_DEBUG(CPR_DEBUG1, "statefile data size: %ld\n\n", bytes); 18360Sstevel@tonic-gate return (cpr_flush_write(vp)); 18370Sstevel@tonic-gate } 18380Sstevel@tonic-gate } 18390Sstevel@tonic-gate 18400Sstevel@tonic-gate 18410Sstevel@tonic-gate /* 18420Sstevel@tonic-gate * Allocate bitmaps according to the phys_install list. 18430Sstevel@tonic-gate */ 18440Sstevel@tonic-gate static int 18450Sstevel@tonic-gate i_cpr_bitmap_setup(void) 18460Sstevel@tonic-gate { 18470Sstevel@tonic-gate struct memlist *pmem; 18480Sstevel@tonic-gate cbd_t *dp, *tail; 18490Sstevel@tonic-gate void *space; 18500Sstevel@tonic-gate size_t size; 18510Sstevel@tonic-gate 18520Sstevel@tonic-gate /* 18530Sstevel@tonic-gate * The number of bitmap descriptors will be the count of 18540Sstevel@tonic-gate * phys_install ranges plus 1 for a trailing NULL struct. 18550Sstevel@tonic-gate */ 18560Sstevel@tonic-gate cpr_nbitmaps = 1; 18570Sstevel@tonic-gate for (pmem = phys_install; pmem; pmem = pmem->next) 18580Sstevel@tonic-gate cpr_nbitmaps++; 18590Sstevel@tonic-gate 18600Sstevel@tonic-gate if (cpr_nbitmaps > (CPR_MAX_BMDESC - 1)) { 18610Sstevel@tonic-gate cpr_err(CE_WARN, "too many physical memory ranges %d, max %d", 18620Sstevel@tonic-gate cpr_nbitmaps, CPR_MAX_BMDESC - 1); 18630Sstevel@tonic-gate return (EFBIG); 18640Sstevel@tonic-gate } 18650Sstevel@tonic-gate 18660Sstevel@tonic-gate /* Alloc an array of bitmap descriptors. */ 18670Sstevel@tonic-gate dp = kmem_zalloc(cpr_nbitmaps * sizeof (*dp), KM_NOSLEEP); 18680Sstevel@tonic-gate if (dp == NULL) { 18690Sstevel@tonic-gate cpr_nbitmaps = 0; 18700Sstevel@tonic-gate return (ENOMEM); 18710Sstevel@tonic-gate } 18720Sstevel@tonic-gate tail = dp + cpr_nbitmaps; 18730Sstevel@tonic-gate 18740Sstevel@tonic-gate CPR->c_bmda = dp; 18750Sstevel@tonic-gate for (pmem = phys_install; pmem; pmem = pmem->next) { 18760Sstevel@tonic-gate size = BITMAP_BYTES(pmem->size); 18770Sstevel@tonic-gate space = kmem_zalloc(size * 2, KM_NOSLEEP); 18780Sstevel@tonic-gate if (space == NULL) 18790Sstevel@tonic-gate return (ENOMEM); 18800Sstevel@tonic-gate ASSERT(dp < tail); 18810Sstevel@tonic-gate dp->cbd_magic = CPR_BITMAP_MAGIC; 18820Sstevel@tonic-gate dp->cbd_spfn = mmu_btop(pmem->address); 18830Sstevel@tonic-gate dp->cbd_epfn = mmu_btop(pmem->address + pmem->size) - 1; 18840Sstevel@tonic-gate dp->cbd_size = size; 18850Sstevel@tonic-gate dp->cbd_reg_bitmap = (cpr_ptr)space; 18860Sstevel@tonic-gate dp->cbd_vlt_bitmap = (cpr_ptr)((caddr_t)space + size); 18870Sstevel@tonic-gate dp++; 18880Sstevel@tonic-gate } 18890Sstevel@tonic-gate 18900Sstevel@tonic-gate /* set magic for the last descriptor */ 18910Sstevel@tonic-gate ASSERT(dp == (tail - 1)); 18920Sstevel@tonic-gate dp->cbd_magic = CPR_BITMAP_MAGIC; 18930Sstevel@tonic-gate 18940Sstevel@tonic-gate return (0); 18950Sstevel@tonic-gate } 18960Sstevel@tonic-gate 18970Sstevel@tonic-gate 18980Sstevel@tonic-gate void 18990Sstevel@tonic-gate i_cpr_bitmap_cleanup(void) 19000Sstevel@tonic-gate { 19010Sstevel@tonic-gate cbd_t *dp; 19020Sstevel@tonic-gate 19030Sstevel@tonic-gate if (CPR->c_bmda == NULL) 19040Sstevel@tonic-gate return; 19050Sstevel@tonic-gate for (dp = CPR->c_bmda; dp->cbd_size; dp++) 19060Sstevel@tonic-gate kmem_free((void *)dp->cbd_reg_bitmap, dp->cbd_size * 2); 19070Sstevel@tonic-gate kmem_free(CPR->c_bmda, cpr_nbitmaps * sizeof (*CPR->c_bmda)); 19080Sstevel@tonic-gate CPR->c_bmda = NULL; 19090Sstevel@tonic-gate cpr_nbitmaps = 0; 19100Sstevel@tonic-gate } 19110Sstevel@tonic-gate 19120Sstevel@tonic-gate 19130Sstevel@tonic-gate /* 19140Sstevel@tonic-gate * A "regular" and "volatile" bitmap are created for each range of 19150Sstevel@tonic-gate * physical memory. The volatile maps are used to count and track pages 19160Sstevel@tonic-gate * susceptible to heap corruption - caused by drivers that allocate mem 19170Sstevel@tonic-gate * during VOP_DUMP(); the regular maps are used for all the other non- 19180Sstevel@tonic-gate * susceptible pages. Before writing the bitmaps to the statefile, 19190Sstevel@tonic-gate * each bitmap pair gets merged to simplify handling within cprboot. 19200Sstevel@tonic-gate */ 19210Sstevel@tonic-gate int 19220Sstevel@tonic-gate i_cpr_alloc_bitmaps(void) 19230Sstevel@tonic-gate { 19240Sstevel@tonic-gate int err; 19250Sstevel@tonic-gate 19260Sstevel@tonic-gate memlist_read_lock(); 19270Sstevel@tonic-gate err = i_cpr_bitmap_setup(); 19280Sstevel@tonic-gate memlist_read_unlock(); 19290Sstevel@tonic-gate if (err) 19300Sstevel@tonic-gate i_cpr_bitmap_cleanup(); 19310Sstevel@tonic-gate return (err); 19320Sstevel@tonic-gate } 19335295Srandyf 19345295Srandyf 19355295Srandyf 19365295Srandyf /* 19375295Srandyf * Power down the system. 19385295Srandyf */ 19395295Srandyf int 19405295Srandyf i_cpr_power_down(int sleeptype) 19415295Srandyf { 19425295Srandyf int is_defined = 0; 19435295Srandyf char *wordexists = "p\" power-off\" find nip swap l! "; 19445295Srandyf char *req = "power-off"; 19455295Srandyf 19465295Srandyf ASSERT(sleeptype == CPR_TODISK); 19475295Srandyf 19485295Srandyf /* 19495295Srandyf * is_defined has value -1 when defined 19505295Srandyf */ 19515295Srandyf prom_interpret(wordexists, (uintptr_t)&is_defined, 0, 0, 0, 0); 19525295Srandyf if (is_defined) { 19535295Srandyf CPR_DEBUG(CPR_DEBUG1, "\ncpr: %s...\n", req); 19545295Srandyf prom_interpret(req, 0, 0, 0, 0, 0); 19555295Srandyf } 19565295Srandyf /* 19575295Srandyf * Only returns if failed 19585295Srandyf */ 19595295Srandyf return (EIO); 19605295Srandyf } 19615295Srandyf 19625295Srandyf void 19635295Srandyf i_cpr_stop_other_cpus(void) 19645295Srandyf { 19655295Srandyf stop_other_cpus(); 19665295Srandyf } 19675295Srandyf 19685295Srandyf /* 19695295Srandyf * Save context for the specified CPU 19705295Srandyf */ 19715295Srandyf /* ARGSUSED */ 19725295Srandyf void * 19735295Srandyf i_cpr_save_context(void *arg) 19745295Srandyf { 19755295Srandyf /* 19765295Srandyf * Not yet 19775295Srandyf */ 19785295Srandyf ASSERT(0); 19795295Srandyf return (NULL); 19805295Srandyf } 19815295Srandyf 19825295Srandyf void 19835295Srandyf i_cpr_pre_resume_cpus(void) 19845295Srandyf { 19855295Srandyf /* 19865295Srandyf * Not yet 19875295Srandyf */ 19885295Srandyf ASSERT(0); 19895295Srandyf } 19905295Srandyf 19915295Srandyf void 19925295Srandyf i_cpr_post_resume_cpus(void) 19935295Srandyf { 19945295Srandyf /* 19955295Srandyf * Not yet 19965295Srandyf */ 19975295Srandyf ASSERT(0); 19985295Srandyf } 19995295Srandyf 20005295Srandyf /* 20015295Srandyf * nothing to do 20025295Srandyf */ 20035295Srandyf void 20045295Srandyf i_cpr_alloc_cpus(void) 20055295Srandyf { 20065295Srandyf } 20075295Srandyf 20085295Srandyf /* 20095295Srandyf * nothing to do 20105295Srandyf */ 20115295Srandyf void 20125295Srandyf i_cpr_free_cpus(void) 20135295Srandyf { 20145295Srandyf } 20155295Srandyf 20165295Srandyf /* ARGSUSED */ 20175295Srandyf void 20185295Srandyf i_cpr_save_configuration(dev_info_t *dip) 20195295Srandyf { 20205295Srandyf /* 20215295Srandyf * this is a no-op on sparc 20225295Srandyf */ 20235295Srandyf } 20245295Srandyf 20255295Srandyf /* ARGSUSED */ 20265295Srandyf void 20275295Srandyf i_cpr_restore_configuration(dev_info_t *dip) 20285295Srandyf { 20295295Srandyf /* 20305295Srandyf * this is a no-op on sparc 20315295Srandyf */ 20325295Srandyf } 2033