xref: /onnv-gate/usr/src/uts/sun4u/os/memscrub.c (revision 2895:d18c5b0839cf)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*2895Svb70745  * Common Development and Distribution License (the "License").
6*2895Svb70745  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
22*2895Svb70745  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
270Sstevel@tonic-gate 
280Sstevel@tonic-gate /*
290Sstevel@tonic-gate  * sun4u Memory Scrubbing
300Sstevel@tonic-gate  *
310Sstevel@tonic-gate  * On detection of a correctable memory ECC error, the sun4u kernel
320Sstevel@tonic-gate  * returns the corrected data to the requester and re-writes it
330Sstevel@tonic-gate  * to memory (DRAM).  So if the correctable error was transient,
340Sstevel@tonic-gate  * the read has effectively been cleaned (scrubbed) from memory.
350Sstevel@tonic-gate  *
360Sstevel@tonic-gate  * Scrubbing thus reduces the likelyhood that multiple transient errors
370Sstevel@tonic-gate  * will occur in the same memory word, making uncorrectable errors due
380Sstevel@tonic-gate  * to transients less likely.
390Sstevel@tonic-gate  *
400Sstevel@tonic-gate  * Thus is born the desire that every memory location be periodically
410Sstevel@tonic-gate  * accessed.
420Sstevel@tonic-gate  *
430Sstevel@tonic-gate  * This file implements a memory scrubbing thread.  This scrubber
440Sstevel@tonic-gate  * guarantees that all of physical memory is accessed periodically
450Sstevel@tonic-gate  * (memscrub_period_sec -- 12 hours).
460Sstevel@tonic-gate  *
470Sstevel@tonic-gate  * It attempts to do this as unobtrusively as possible.  The thread
480Sstevel@tonic-gate  * schedules itself to wake up at an interval such that if it reads
490Sstevel@tonic-gate  * memscrub_span_pages (8MB) on each wakeup, it will read all of physical
500Sstevel@tonic-gate  * memory in in memscrub_period_sec (12 hours).
510Sstevel@tonic-gate  *
520Sstevel@tonic-gate  * The scrubber uses the block load hardware to read memory @ 268MB/s,
530Sstevel@tonic-gate  * so it reads spans of 8MB in 0.03 seconds.  Unlike the original sun4d
540Sstevel@tonic-gate  * scrubber the sun4u scrubber does not read ahead if the system is idle
550Sstevel@tonic-gate  * because we can read memory very efficently.
560Sstevel@tonic-gate  *
570Sstevel@tonic-gate  * The scrubber maintains a private copy of the phys_install memory list
580Sstevel@tonic-gate  * to keep track of what memory should be scrubbed.
590Sstevel@tonic-gate  *
600Sstevel@tonic-gate  * The global routines memscrub_add_span() and memscrub_delete_span() are
610Sstevel@tonic-gate  * used to add and delete from this list.  If hotplug memory is later
620Sstevel@tonic-gate  * supported these two routines can be used to notify the scrubber of
630Sstevel@tonic-gate  * memory configuration changes.
640Sstevel@tonic-gate  *
650Sstevel@tonic-gate  * The following parameters can be set via /etc/system
660Sstevel@tonic-gate  *
670Sstevel@tonic-gate  * memscrub_span_pages = MEMSCRUB_DFL_SPAN_PAGES (8MB)
680Sstevel@tonic-gate  * memscrub_period_sec = MEMSCRUB_DFL_PERIOD_SEC (12 hours)
690Sstevel@tonic-gate  * memscrub_thread_pri = MEMSCRUB_DFL_THREAD_PRI (MINCLSYSPRI)
700Sstevel@tonic-gate  * memscrub_delay_start_sec = (5 minutes)
710Sstevel@tonic-gate  * memscrub_verbose = (0)
720Sstevel@tonic-gate  * memscrub_override_ticks = (1 tick)
730Sstevel@tonic-gate  * disable_memscrub = (0)
740Sstevel@tonic-gate  * pause_memscrub = (0)
750Sstevel@tonic-gate  * read_all_memscrub = (0)
760Sstevel@tonic-gate  *
770Sstevel@tonic-gate  * The scrubber will print NOTICE messages of what it is doing if
780Sstevel@tonic-gate  * "memscrub_verbose" is set.
790Sstevel@tonic-gate  *
800Sstevel@tonic-gate  * If the scrubber's sleep time calculation drops to zero ticks,
810Sstevel@tonic-gate  * memscrub_override_ticks will be used as the sleep time instead. The
820Sstevel@tonic-gate  * sleep time should only drop to zero on a system with over 32.95
830Sstevel@tonic-gate  * terabytes of memory, or where the default scrubber parameters have
840Sstevel@tonic-gate  * been adjusted. For example, reducing memscrub_span_pages or
850Sstevel@tonic-gate  * memscrub_period_sec causes the sleep time to drop to zero with less
860Sstevel@tonic-gate  * memory. Note that since the sleep time is calculated in clock ticks,
870Sstevel@tonic-gate  * using hires clock ticks allows for more memory before the sleep time
880Sstevel@tonic-gate  * becomes zero.
890Sstevel@tonic-gate  *
900Sstevel@tonic-gate  * The scrubber will exit (or never be started) if it finds the variable
910Sstevel@tonic-gate  * "disable_memscrub" set.
920Sstevel@tonic-gate  *
930Sstevel@tonic-gate  * The scrubber will pause (not read memory) when "pause_memscrub"
940Sstevel@tonic-gate  * is set.  It will check the state of pause_memscrub at each wakeup
950Sstevel@tonic-gate  * period.  The scrubber will not make up for lost time.  If you
960Sstevel@tonic-gate  * pause the scrubber for a prolonged period of time you can use
970Sstevel@tonic-gate  * the "read_all_memscrub" switch (see below) to catch up. In addition,
980Sstevel@tonic-gate  * pause_memscrub is used internally by the post memory DR callbacks.
990Sstevel@tonic-gate  * It is set for the small period of time during which the callbacks
1000Sstevel@tonic-gate  * are executing. This ensures "memscrub_lock" will be released,
1010Sstevel@tonic-gate  * allowing the callbacks to finish.
1020Sstevel@tonic-gate  *
1030Sstevel@tonic-gate  * The scrubber will read all memory if "read_all_memscrub" is set.
1040Sstevel@tonic-gate  * The normal span read will also occur during the wakeup.
1050Sstevel@tonic-gate  *
1060Sstevel@tonic-gate  * MEMSCRUB_MIN_PAGES (32MB) is the minimum amount of memory a system
1070Sstevel@tonic-gate  * must have before we'll start the scrubber.
1080Sstevel@tonic-gate  *
1090Sstevel@tonic-gate  * MEMSCRUB_DFL_SPAN_PAGES (8MB) is based on the guess that 0.03 sec
1100Sstevel@tonic-gate  * is a "good" amount of minimum time for the thread to run at a time.
1110Sstevel@tonic-gate  *
1120Sstevel@tonic-gate  * MEMSCRUB_DFL_PERIOD_SEC (12 hours) is nearly a total guess --
1130Sstevel@tonic-gate  * twice the frequency the hardware folk estimated would be necessary.
1140Sstevel@tonic-gate  *
1150Sstevel@tonic-gate  * MEMSCRUB_DFL_THREAD_PRI (MINCLSYSPRI) is based on the assumption
1160Sstevel@tonic-gate  * that the scurbber should get its fair share of time (since it
1170Sstevel@tonic-gate  * is short).  At a priority of 0 the scrubber will be starved.
1180Sstevel@tonic-gate  */
1190Sstevel@tonic-gate 
1200Sstevel@tonic-gate #include <sys/systm.h>		/* timeout, types, t_lock */
1210Sstevel@tonic-gate #include <sys/cmn_err.h>
1220Sstevel@tonic-gate #include <sys/sysmacros.h>	/* MIN */
1230Sstevel@tonic-gate #include <sys/memlist.h>	/* memlist */
1240Sstevel@tonic-gate #include <sys/mem_config.h>	/* memory add/delete */
1250Sstevel@tonic-gate #include <sys/kmem.h>		/* KMEM_NOSLEEP */
1260Sstevel@tonic-gate #include <sys/cpuvar.h>		/* ncpus_online */
1270Sstevel@tonic-gate #include <sys/debug.h>		/* ASSERTs */
1280Sstevel@tonic-gate #include <sys/machsystm.h>	/* lddphys */
1290Sstevel@tonic-gate #include <sys/cpu_module.h>	/* vtag_flushpage */
1300Sstevel@tonic-gate #include <sys/kstat.h>
1310Sstevel@tonic-gate #include <sys/atomic.h>		/* atomic_add_32 */
1320Sstevel@tonic-gate 
1330Sstevel@tonic-gate #include <vm/hat.h>
1340Sstevel@tonic-gate #include <vm/seg_kmem.h>
1350Sstevel@tonic-gate #include <vm/hat_sfmmu.h>	/* XXX FIXME - delete */
1360Sstevel@tonic-gate 
1370Sstevel@tonic-gate #include <sys/time.h>
1380Sstevel@tonic-gate #include <sys/callb.h>		/* CPR callback */
1390Sstevel@tonic-gate #include <sys/ontrap.h>
1400Sstevel@tonic-gate 
1410Sstevel@tonic-gate /*
1420Sstevel@tonic-gate  * Should really have paddr_t defined, but it is broken.  Use
1430Sstevel@tonic-gate  * ms_paddr_t in the meantime to make the code cleaner
1440Sstevel@tonic-gate  */
1450Sstevel@tonic-gate typedef uint64_t ms_paddr_t;
1460Sstevel@tonic-gate 
1470Sstevel@tonic-gate /*
1480Sstevel@tonic-gate  * Global Routines:
1490Sstevel@tonic-gate  */
1500Sstevel@tonic-gate int memscrub_add_span(pfn_t pfn, pgcnt_t pages);
1510Sstevel@tonic-gate int memscrub_delete_span(pfn_t pfn, pgcnt_t pages);
1520Sstevel@tonic-gate int memscrub_init(void);
153*2895Svb70745 void memscrub_induced_error(void);
1540Sstevel@tonic-gate 
1550Sstevel@tonic-gate /*
1560Sstevel@tonic-gate  * Global Data:
1570Sstevel@tonic-gate  */
1580Sstevel@tonic-gate 
1590Sstevel@tonic-gate /*
1600Sstevel@tonic-gate  * scrub if we have at least this many pages
1610Sstevel@tonic-gate  */
1620Sstevel@tonic-gate #define	MEMSCRUB_MIN_PAGES (32 * 1024 * 1024 / PAGESIZE)
1630Sstevel@tonic-gate 
1640Sstevel@tonic-gate /*
1650Sstevel@tonic-gate  * scan all of physical memory at least once every MEMSCRUB_PERIOD_SEC
1660Sstevel@tonic-gate  */
1670Sstevel@tonic-gate #define	MEMSCRUB_DFL_PERIOD_SEC	(12 * 60 * 60)	/* 12 hours */
1680Sstevel@tonic-gate 
1690Sstevel@tonic-gate /*
1700Sstevel@tonic-gate  * scan at least MEMSCRUB_DFL_SPAN_PAGES each iteration
1710Sstevel@tonic-gate  */
1720Sstevel@tonic-gate #define	MEMSCRUB_DFL_SPAN_PAGES	((8 * 1024 * 1024) / PAGESIZE)
1730Sstevel@tonic-gate 
1740Sstevel@tonic-gate /*
1750Sstevel@tonic-gate  * almost anything is higher priority than scrubbing
1760Sstevel@tonic-gate  */
1770Sstevel@tonic-gate #define	MEMSCRUB_DFL_THREAD_PRI	MINCLSYSPRI
1780Sstevel@tonic-gate 
1790Sstevel@tonic-gate /*
1800Sstevel@tonic-gate  * size used when scanning memory
1810Sstevel@tonic-gate  */
1820Sstevel@tonic-gate #define	MEMSCRUB_BLOCK_SIZE		256
1830Sstevel@tonic-gate #define	MEMSCRUB_BLOCK_SIZE_SHIFT	8 	/* log2(MEMSCRUB_BLOCK_SIZE) */
1840Sstevel@tonic-gate #define	MEMSCRUB_BLOCKS_PER_PAGE	(PAGESIZE >> MEMSCRUB_BLOCK_SIZE_SHIFT)
1850Sstevel@tonic-gate 
1860Sstevel@tonic-gate #define	MEMSCRUB_BPP4M		MMU_PAGESIZE4M >> MEMSCRUB_BLOCK_SIZE_SHIFT
1870Sstevel@tonic-gate #define	MEMSCRUB_BPP512K	MMU_PAGESIZE512K >> MEMSCRUB_BLOCK_SIZE_SHIFT
1880Sstevel@tonic-gate #define	MEMSCRUB_BPP64K		MMU_PAGESIZE64K >> MEMSCRUB_BLOCK_SIZE_SHIFT
1890Sstevel@tonic-gate #define	MEMSCRUB_BPP		MMU_PAGESIZE >> MEMSCRUB_BLOCK_SIZE_SHIFT
1900Sstevel@tonic-gate 
1910Sstevel@tonic-gate /*
1920Sstevel@tonic-gate  * This message indicates that we have exceeded the limitations of
1930Sstevel@tonic-gate  * the memscrubber. See the comments above regarding what would
1940Sstevel@tonic-gate  * cause the sleep time to become zero. In DEBUG mode, this message
1950Sstevel@tonic-gate  * is logged on the console and in the messages file. In non-DEBUG
1960Sstevel@tonic-gate  * mode, it is only logged in the messages file.
1970Sstevel@tonic-gate  */
1980Sstevel@tonic-gate #ifdef DEBUG
1990Sstevel@tonic-gate #define	MEMSCRUB_OVERRIDE_MSG	"Memory scrubber sleep time is zero " \
2000Sstevel@tonic-gate 	"seconds, consuming entire CPU."
2010Sstevel@tonic-gate #else
2020Sstevel@tonic-gate #define	MEMSCRUB_OVERRIDE_MSG	"!Memory scrubber sleep time is zero " \
2030Sstevel@tonic-gate 	"seconds, consuming entire CPU."
2040Sstevel@tonic-gate #endif /* DEBUG */
2050Sstevel@tonic-gate 
2060Sstevel@tonic-gate /*
2070Sstevel@tonic-gate  * we can patch these defaults in /etc/system if necessary
2080Sstevel@tonic-gate  */
2090Sstevel@tonic-gate uint_t disable_memscrub = 0;
2100Sstevel@tonic-gate uint_t pause_memscrub = 0;
2110Sstevel@tonic-gate uint_t read_all_memscrub = 0;
2120Sstevel@tonic-gate uint_t memscrub_verbose = 0;
2130Sstevel@tonic-gate uint_t memscrub_all_idle = 0;
2140Sstevel@tonic-gate uint_t memscrub_span_pages = MEMSCRUB_DFL_SPAN_PAGES;
2150Sstevel@tonic-gate uint_t memscrub_period_sec = MEMSCRUB_DFL_PERIOD_SEC;
2160Sstevel@tonic-gate uint_t memscrub_thread_pri = MEMSCRUB_DFL_THREAD_PRI;
2170Sstevel@tonic-gate uint_t memscrub_delay_start_sec = 5 * 60;
2180Sstevel@tonic-gate uint_t memscrub_override_ticks = 1;
2190Sstevel@tonic-gate 
2200Sstevel@tonic-gate /*
2210Sstevel@tonic-gate  * Static Routines
2220Sstevel@tonic-gate  */
2230Sstevel@tonic-gate static void memscrubber(void);
2240Sstevel@tonic-gate static void memscrub_cleanup(void);
2250Sstevel@tonic-gate static int memscrub_add_span_gen(pfn_t, pgcnt_t, struct memlist **, uint_t *);
2260Sstevel@tonic-gate static int memscrub_verify_span(ms_paddr_t *addrp, pgcnt_t *pagesp);
2270Sstevel@tonic-gate static void memscrub_scan(uint_t blks, ms_paddr_t src);
2280Sstevel@tonic-gate 
2290Sstevel@tonic-gate /*
2300Sstevel@tonic-gate  * Static Data
2310Sstevel@tonic-gate  */
2320Sstevel@tonic-gate 
2330Sstevel@tonic-gate static struct memlist *memscrub_memlist;
2340Sstevel@tonic-gate static uint_t memscrub_phys_pages;
2350Sstevel@tonic-gate 
2360Sstevel@tonic-gate static kcondvar_t memscrub_cv;
2370Sstevel@tonic-gate static kmutex_t memscrub_lock;
2380Sstevel@tonic-gate /*
2390Sstevel@tonic-gate  * memscrub_lock protects memscrub_memlist, interval_ticks, cprinfo, ...
2400Sstevel@tonic-gate  */
2410Sstevel@tonic-gate static void memscrub_init_mem_config(void);
2420Sstevel@tonic-gate static void memscrub_uninit_mem_config(void);
2430Sstevel@tonic-gate 
2440Sstevel@tonic-gate /*
245*2895Svb70745  * Linked list of memscrub aware spans having retired pages.
246*2895Svb70745  * Currently enabled only on sun4u USIII-based platforms.
247*2895Svb70745  */
248*2895Svb70745 typedef struct memscrub_page_retire_span {
249*2895Svb70745 	ms_paddr_t				address;
250*2895Svb70745 	struct memscrub_page_retire_span	*next;
251*2895Svb70745 } memscrub_page_retire_span_t;
252*2895Svb70745 
253*2895Svb70745 static memscrub_page_retire_span_t *memscrub_page_retire_span_list = NULL;
254*2895Svb70745 
255*2895Svb70745 static void memscrub_page_retire_span_add(ms_paddr_t);
256*2895Svb70745 static void memscrub_page_retire_span_delete(ms_paddr_t);
257*2895Svb70745 static int memscrub_page_retire_span_search(ms_paddr_t);
258*2895Svb70745 static void memscrub_page_retire_span_list_update(void);
259*2895Svb70745 
260*2895Svb70745 /*
261*2895Svb70745  * add_to_page_retire_list: Set by cpu_async_log_err() routine
262*2895Svb70745  * by calling memscrub_induced_error() when CE/UE occurs on a retired
263*2895Svb70745  * page due to memscrub reading.  Cleared by memscrub after updating
264*2895Svb70745  * global page retire span list.  Piggybacking on protection of
265*2895Svb70745  * memscrub_lock, which is held during set and clear.
266*2895Svb70745  * Note: When cpu_async_log_err() calls memscrub_induced_error(), it is running
267*2895Svb70745  * on softint context, which gets fired on a cpu memscrub thread currently
268*2895Svb70745  * running.  Memscrub thread has affinity set during memscrub_read(), hence
269*2895Svb70745  * migration to new cpu not expected.
270*2895Svb70745  */
271*2895Svb70745 static int add_to_page_retire_list = 0;
272*2895Svb70745 
273*2895Svb70745 /*
2740Sstevel@tonic-gate  * Keep track of some interesting statistics
2750Sstevel@tonic-gate  */
2760Sstevel@tonic-gate static struct memscrub_kstats {
2770Sstevel@tonic-gate 	kstat_named_t	done_early;	/* ahead of schedule */
2780Sstevel@tonic-gate 	kstat_named_t	early_sec;	/* by cumulative num secs */
2790Sstevel@tonic-gate 	kstat_named_t	done_late;	/* behind schedule */
2800Sstevel@tonic-gate 	kstat_named_t	late_sec;	/* by cumulative num secs */
2810Sstevel@tonic-gate 	kstat_named_t	interval_ticks;	/* num ticks between intervals */
2820Sstevel@tonic-gate 	kstat_named_t	force_run;	/* forced to run, non-timeout */
2830Sstevel@tonic-gate 	kstat_named_t	errors_found;	/* num errors found by memscrub */
2840Sstevel@tonic-gate } memscrub_counts = {
2850Sstevel@tonic-gate 	{ "done_early",		KSTAT_DATA_UINT32 },
2860Sstevel@tonic-gate 	{ "early_sec", 		KSTAT_DATA_UINT32 },
2870Sstevel@tonic-gate 	{ "done_late", 		KSTAT_DATA_UINT32 },
2880Sstevel@tonic-gate 	{ "late_sec",		KSTAT_DATA_UINT32 },
2890Sstevel@tonic-gate 	{ "interval_ticks",	KSTAT_DATA_UINT32 },
2900Sstevel@tonic-gate 	{ "force_run",		KSTAT_DATA_UINT32 },
2910Sstevel@tonic-gate 	{ "errors_found",	KSTAT_DATA_UINT32 },
2920Sstevel@tonic-gate };
2930Sstevel@tonic-gate static struct kstat *memscrub_ksp = (struct kstat *)NULL;
2940Sstevel@tonic-gate 
2950Sstevel@tonic-gate static timeout_id_t memscrub_tid = 0;	/* keep track of timeout id */
2960Sstevel@tonic-gate 
2970Sstevel@tonic-gate /*
2980Sstevel@tonic-gate  * create memscrub_memlist from phys_install list
2990Sstevel@tonic-gate  * initialize locks, set memscrub_phys_pages.
3000Sstevel@tonic-gate  */
3010Sstevel@tonic-gate int
3020Sstevel@tonic-gate memscrub_init(void)
3030Sstevel@tonic-gate {
3040Sstevel@tonic-gate 	struct memlist *src;
3050Sstevel@tonic-gate 
3060Sstevel@tonic-gate 	/*
3070Sstevel@tonic-gate 	 * only startup the scrubber if we have a minimum
3080Sstevel@tonic-gate 	 * number of pages
3090Sstevel@tonic-gate 	 */
3100Sstevel@tonic-gate 	if (physinstalled >= MEMSCRUB_MIN_PAGES) {
3110Sstevel@tonic-gate 
3120Sstevel@tonic-gate 		/*
3130Sstevel@tonic-gate 		 * initialize locks
3140Sstevel@tonic-gate 		 */
3150Sstevel@tonic-gate 		mutex_init(&memscrub_lock, NULL, MUTEX_DRIVER, NULL);
3160Sstevel@tonic-gate 		cv_init(&memscrub_cv, NULL, CV_DRIVER, NULL);
3170Sstevel@tonic-gate 
3180Sstevel@tonic-gate 		/*
3190Sstevel@tonic-gate 		 * copy phys_install to memscrub_memlist
3200Sstevel@tonic-gate 		 */
3210Sstevel@tonic-gate 		for (src = phys_install; src; src = src->next) {
3220Sstevel@tonic-gate 			if (memscrub_add_span(
3230Sstevel@tonic-gate 			    (pfn_t)(src->address >> PAGESHIFT),
3240Sstevel@tonic-gate 			    (pgcnt_t)(src->size >> PAGESHIFT))) {
3250Sstevel@tonic-gate 				memscrub_cleanup();
3260Sstevel@tonic-gate 				return (-1);
3270Sstevel@tonic-gate 			}
3280Sstevel@tonic-gate 		}
3290Sstevel@tonic-gate 
3300Sstevel@tonic-gate 		/*
3310Sstevel@tonic-gate 		 * initialize kstats
3320Sstevel@tonic-gate 		 */
3330Sstevel@tonic-gate 		memscrub_ksp = kstat_create("unix", 0, "memscrub_kstat",
3340Sstevel@tonic-gate 			"misc", KSTAT_TYPE_NAMED,
3350Sstevel@tonic-gate 			sizeof (memscrub_counts) / sizeof (kstat_named_t),
3360Sstevel@tonic-gate 			KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_WRITABLE);
3370Sstevel@tonic-gate 
3380Sstevel@tonic-gate 		if (memscrub_ksp) {
3390Sstevel@tonic-gate 			memscrub_ksp->ks_data = (void *)&memscrub_counts;
3400Sstevel@tonic-gate 			kstat_install(memscrub_ksp);
3410Sstevel@tonic-gate 		} else {
3420Sstevel@tonic-gate 			cmn_err(CE_NOTE, "Memscrubber cannot create kstats\n");
3430Sstevel@tonic-gate 		}
3440Sstevel@tonic-gate 
3450Sstevel@tonic-gate 		/*
3460Sstevel@tonic-gate 		 * create memscrubber thread
3470Sstevel@tonic-gate 		 */
3480Sstevel@tonic-gate 		(void) thread_create(NULL, 0, (void (*)())memscrubber,
3490Sstevel@tonic-gate 		    NULL, 0, &p0, TS_RUN, memscrub_thread_pri);
3500Sstevel@tonic-gate 
3510Sstevel@tonic-gate 		/*
3520Sstevel@tonic-gate 		 * We don't want call backs changing the list
3530Sstevel@tonic-gate 		 * if there is no thread running. We do not
3540Sstevel@tonic-gate 		 * attempt to deal with stopping/starting scrubbing
3550Sstevel@tonic-gate 		 * on memory size changes.
3560Sstevel@tonic-gate 		 */
3570Sstevel@tonic-gate 		memscrub_init_mem_config();
3580Sstevel@tonic-gate 	}
3590Sstevel@tonic-gate 
3600Sstevel@tonic-gate 	return (0);
3610Sstevel@tonic-gate }
3620Sstevel@tonic-gate 
3630Sstevel@tonic-gate static void
3640Sstevel@tonic-gate memscrub_cleanup(void)
3650Sstevel@tonic-gate {
3660Sstevel@tonic-gate 	memscrub_uninit_mem_config();
3670Sstevel@tonic-gate 	while (memscrub_memlist) {
3680Sstevel@tonic-gate 		(void) memscrub_delete_span(
3690Sstevel@tonic-gate 			(pfn_t)(memscrub_memlist->address >> PAGESHIFT),
3700Sstevel@tonic-gate 			(pgcnt_t)(memscrub_memlist->size >> PAGESHIFT));
3710Sstevel@tonic-gate 	}
3720Sstevel@tonic-gate 	if (memscrub_ksp)
3730Sstevel@tonic-gate 		kstat_delete(memscrub_ksp);
3740Sstevel@tonic-gate 	cv_destroy(&memscrub_cv);
3750Sstevel@tonic-gate 	mutex_destroy(&memscrub_lock);
3760Sstevel@tonic-gate }
3770Sstevel@tonic-gate 
3780Sstevel@tonic-gate #ifdef MEMSCRUB_DEBUG
3790Sstevel@tonic-gate static void
3800Sstevel@tonic-gate memscrub_printmemlist(char *title, struct memlist *listp)
3810Sstevel@tonic-gate {
3820Sstevel@tonic-gate 	struct memlist *list;
3830Sstevel@tonic-gate 
3840Sstevel@tonic-gate 	cmn_err(CE_CONT, "%s:\n", title);
3850Sstevel@tonic-gate 
3860Sstevel@tonic-gate 	for (list = listp; list; list = list->next) {
3870Sstevel@tonic-gate 		cmn_err(CE_CONT, "addr = 0x%llx, size = 0x%llx\n",
3880Sstevel@tonic-gate 		    list->address, list->size);
3890Sstevel@tonic-gate 	}
3900Sstevel@tonic-gate }
3910Sstevel@tonic-gate #endif /* MEMSCRUB_DEBUG */
3920Sstevel@tonic-gate 
3930Sstevel@tonic-gate /* ARGSUSED */
3940Sstevel@tonic-gate static void
3950Sstevel@tonic-gate memscrub_wakeup(void *c)
3960Sstevel@tonic-gate {
3970Sstevel@tonic-gate 	/*
3980Sstevel@tonic-gate 	 * grab mutex to guarantee that our wakeup call
3990Sstevel@tonic-gate 	 * arrives after we go to sleep -- so we can't sleep forever.
4000Sstevel@tonic-gate 	 */
4010Sstevel@tonic-gate 	mutex_enter(&memscrub_lock);
4020Sstevel@tonic-gate 	cv_signal(&memscrub_cv);
4030Sstevel@tonic-gate 	mutex_exit(&memscrub_lock);
4040Sstevel@tonic-gate }
4050Sstevel@tonic-gate 
4060Sstevel@tonic-gate /*
4070Sstevel@tonic-gate  * provide an interface external to the memscrubber
4080Sstevel@tonic-gate  * which will force the memscrub thread to run vs.
4090Sstevel@tonic-gate  * waiting for the timeout, if one is set
4100Sstevel@tonic-gate  */
4110Sstevel@tonic-gate void
4120Sstevel@tonic-gate memscrub_run(void)
4130Sstevel@tonic-gate {
4140Sstevel@tonic-gate 	memscrub_counts.force_run.value.ui32++;
4150Sstevel@tonic-gate 	if (memscrub_tid) {
4160Sstevel@tonic-gate 		(void) untimeout(memscrub_tid);
4170Sstevel@tonic-gate 		memscrub_wakeup((void *)NULL);
4180Sstevel@tonic-gate 	}
4190Sstevel@tonic-gate }
4200Sstevel@tonic-gate 
4210Sstevel@tonic-gate /*
4220Sstevel@tonic-gate  * this calculation doesn't account for the time
4230Sstevel@tonic-gate  * that the actual scan consumes -- so we'd fall
4240Sstevel@tonic-gate  * slightly behind schedule with this interval.
4250Sstevel@tonic-gate  * It's very small.
4260Sstevel@tonic-gate  */
4270Sstevel@tonic-gate 
4280Sstevel@tonic-gate static uint_t
4290Sstevel@tonic-gate compute_interval_ticks(void)
4300Sstevel@tonic-gate {
4310Sstevel@tonic-gate 	/*
4320Sstevel@tonic-gate 	 * We use msp_safe mpp_safe below to insure somebody
4330Sstevel@tonic-gate 	 * doesn't set memscrub_span_pages or memscrub_phys_pages
4340Sstevel@tonic-gate 	 * to 0 on us.
4350Sstevel@tonic-gate 	 */
4360Sstevel@tonic-gate 	static uint_t msp_safe, mpp_safe;
4370Sstevel@tonic-gate 	static uint_t interval_ticks, period_ticks;
4380Sstevel@tonic-gate 	msp_safe = memscrub_span_pages;
4390Sstevel@tonic-gate 	mpp_safe = memscrub_phys_pages;
4400Sstevel@tonic-gate 
4410Sstevel@tonic-gate 	period_ticks = memscrub_period_sec * hz;
4420Sstevel@tonic-gate 	interval_ticks = period_ticks;
4430Sstevel@tonic-gate 
4440Sstevel@tonic-gate 	ASSERT(mutex_owned(&memscrub_lock));
4450Sstevel@tonic-gate 
4460Sstevel@tonic-gate 	if ((msp_safe != 0) && (mpp_safe != 0)) {
4470Sstevel@tonic-gate 		if (memscrub_phys_pages <= msp_safe) {
4480Sstevel@tonic-gate 			interval_ticks = period_ticks;
4490Sstevel@tonic-gate 		} else {
4500Sstevel@tonic-gate 			interval_ticks = (period_ticks /
4510Sstevel@tonic-gate 			    (mpp_safe / msp_safe));
4520Sstevel@tonic-gate 		}
4530Sstevel@tonic-gate 	}
4540Sstevel@tonic-gate 	return (interval_ticks);
4550Sstevel@tonic-gate }
4560Sstevel@tonic-gate 
4570Sstevel@tonic-gate void
4580Sstevel@tonic-gate memscrubber(void)
4590Sstevel@tonic-gate {
4600Sstevel@tonic-gate 	ms_paddr_t address, addr;
4610Sstevel@tonic-gate 	time_t deadline;
4620Sstevel@tonic-gate 	pgcnt_t pages;
4630Sstevel@tonic-gate 	uint_t reached_end = 1;
4640Sstevel@tonic-gate 	uint_t paused_message = 0;
4650Sstevel@tonic-gate 	uint_t interval_ticks = 0;
4660Sstevel@tonic-gate 	uint_t sleep_warn_printed = 0;
4670Sstevel@tonic-gate 	callb_cpr_t cprinfo;
4680Sstevel@tonic-gate 
4690Sstevel@tonic-gate 	/*
4700Sstevel@tonic-gate 	 * notify CPR of our existence
4710Sstevel@tonic-gate 	 */
4720Sstevel@tonic-gate 	CALLB_CPR_INIT(&cprinfo, &memscrub_lock, callb_generic_cpr, "memscrub");
4730Sstevel@tonic-gate 
4740Sstevel@tonic-gate 	mutex_enter(&memscrub_lock);
4750Sstevel@tonic-gate 
4760Sstevel@tonic-gate 	if (memscrub_memlist == NULL) {
4770Sstevel@tonic-gate 		cmn_err(CE_WARN, "memscrub_memlist not initialized.");
4780Sstevel@tonic-gate 		goto memscrub_exit;
4790Sstevel@tonic-gate 	}
4800Sstevel@tonic-gate 
4810Sstevel@tonic-gate 	address = memscrub_memlist->address;
4820Sstevel@tonic-gate 
4830Sstevel@tonic-gate 	deadline = gethrestime_sec() + memscrub_delay_start_sec;
4840Sstevel@tonic-gate 
4850Sstevel@tonic-gate 	for (;;) {
4860Sstevel@tonic-gate 		if (disable_memscrub)
4870Sstevel@tonic-gate 			break;
4880Sstevel@tonic-gate 
4890Sstevel@tonic-gate 		/*
4900Sstevel@tonic-gate 		 * compute interval_ticks
4910Sstevel@tonic-gate 		 */
4920Sstevel@tonic-gate 		interval_ticks = compute_interval_ticks();
4930Sstevel@tonic-gate 
4940Sstevel@tonic-gate 		/*
4950Sstevel@tonic-gate 		 * If the calculated sleep time is zero, and pause_memscrub
4960Sstevel@tonic-gate 		 * has been set, make sure we sleep so that another thread
4970Sstevel@tonic-gate 		 * can acquire memscrub_lock.
4980Sstevel@tonic-gate 		 */
4990Sstevel@tonic-gate 		if (interval_ticks == 0 && pause_memscrub) {
5000Sstevel@tonic-gate 			interval_ticks = hz;
5010Sstevel@tonic-gate 		}
5020Sstevel@tonic-gate 
5030Sstevel@tonic-gate 		/*
5040Sstevel@tonic-gate 		 * And as a fail safe, under normal non-paused operation, do
5050Sstevel@tonic-gate 		 * not allow the sleep time to be zero.
5060Sstevel@tonic-gate 		 */
5070Sstevel@tonic-gate 		if (interval_ticks == 0) {
5080Sstevel@tonic-gate 			interval_ticks = memscrub_override_ticks;
5090Sstevel@tonic-gate 			if (!sleep_warn_printed) {
5100Sstevel@tonic-gate 				cmn_err(CE_NOTE, MEMSCRUB_OVERRIDE_MSG);
5110Sstevel@tonic-gate 				sleep_warn_printed = 1;
5120Sstevel@tonic-gate 			}
5130Sstevel@tonic-gate 		}
5140Sstevel@tonic-gate 
5150Sstevel@tonic-gate 		memscrub_counts.interval_ticks.value.ui32 = interval_ticks;
5160Sstevel@tonic-gate 
5170Sstevel@tonic-gate 		/*
5180Sstevel@tonic-gate 		 * Did we just reach the end of memory? If we are at the
5190Sstevel@tonic-gate 		 * end of memory, delay end of memory processing until
5200Sstevel@tonic-gate 		 * pause_memscrub is not set.
5210Sstevel@tonic-gate 		 */
5220Sstevel@tonic-gate 		if (reached_end && !pause_memscrub) {
5230Sstevel@tonic-gate 			time_t now = gethrestime_sec();
5240Sstevel@tonic-gate 
5250Sstevel@tonic-gate 			if (now >= deadline) {
5260Sstevel@tonic-gate 				memscrub_counts.done_late.value.ui32++;
5270Sstevel@tonic-gate 				memscrub_counts.late_sec.value.ui32 +=
5280Sstevel@tonic-gate 					(now - deadline);
5290Sstevel@tonic-gate 				/*
5300Sstevel@tonic-gate 				 * past deadline, start right away
5310Sstevel@tonic-gate 				 */
5320Sstevel@tonic-gate 				interval_ticks = 0;
5330Sstevel@tonic-gate 
5340Sstevel@tonic-gate 				deadline = now + memscrub_period_sec;
5350Sstevel@tonic-gate 			} else {
5360Sstevel@tonic-gate 				/*
5370Sstevel@tonic-gate 				 * we finished ahead of schedule.
5380Sstevel@tonic-gate 				 * wait till previous deadline before re-start.
5390Sstevel@tonic-gate 				 */
5400Sstevel@tonic-gate 				interval_ticks = (deadline - now) * hz;
5410Sstevel@tonic-gate 				memscrub_counts.done_early.value.ui32++;
5420Sstevel@tonic-gate 				memscrub_counts.early_sec.value.ui32 +=
5430Sstevel@tonic-gate 					(deadline - now);
5440Sstevel@tonic-gate 				deadline += memscrub_period_sec;
5450Sstevel@tonic-gate 			}
5460Sstevel@tonic-gate 			reached_end = 0;
5470Sstevel@tonic-gate 			sleep_warn_printed = 0;
5480Sstevel@tonic-gate 		}
5490Sstevel@tonic-gate 
5500Sstevel@tonic-gate 		if (interval_ticks != 0) {
5510Sstevel@tonic-gate 			/*
5520Sstevel@tonic-gate 			 * it is safe from our standpoint for CPR to
5530Sstevel@tonic-gate 			 * suspend the system
5540Sstevel@tonic-gate 			 */
5550Sstevel@tonic-gate 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
5560Sstevel@tonic-gate 
5570Sstevel@tonic-gate 			/*
5580Sstevel@tonic-gate 			 * hit the snooze bar
5590Sstevel@tonic-gate 			 */
5600Sstevel@tonic-gate 			memscrub_tid = timeout(memscrub_wakeup, NULL,
5610Sstevel@tonic-gate 			    interval_ticks);
5620Sstevel@tonic-gate 
5630Sstevel@tonic-gate 			/*
5640Sstevel@tonic-gate 			 * go to sleep
5650Sstevel@tonic-gate 			 */
5660Sstevel@tonic-gate 			cv_wait(&memscrub_cv, &memscrub_lock);
5670Sstevel@tonic-gate 
5680Sstevel@tonic-gate 			/*
5690Sstevel@tonic-gate 			 * at this point, no timeout should be set
5700Sstevel@tonic-gate 			 */
5710Sstevel@tonic-gate 			memscrub_tid = 0;
5720Sstevel@tonic-gate 
5730Sstevel@tonic-gate 			/*
5740Sstevel@tonic-gate 			 * we need to goto work and will be modifying
5750Sstevel@tonic-gate 			 * our internal state and mapping/unmapping
5760Sstevel@tonic-gate 			 * TTEs
5770Sstevel@tonic-gate 			 */
5780Sstevel@tonic-gate 			CALLB_CPR_SAFE_END(&cprinfo, &memscrub_lock);
5790Sstevel@tonic-gate 		}
5800Sstevel@tonic-gate 
5810Sstevel@tonic-gate 
5820Sstevel@tonic-gate 		if (memscrub_phys_pages == 0) {
5830Sstevel@tonic-gate 			cmn_err(CE_WARN, "Memory scrubber has 0 pages to read");
5840Sstevel@tonic-gate 			goto memscrub_exit;
5850Sstevel@tonic-gate 		}
5860Sstevel@tonic-gate 
5870Sstevel@tonic-gate 		if (!pause_memscrub) {
5880Sstevel@tonic-gate 			if (paused_message) {
5890Sstevel@tonic-gate 				paused_message = 0;
5900Sstevel@tonic-gate 				if (memscrub_verbose)
5910Sstevel@tonic-gate 					cmn_err(CE_NOTE, "Memory scrubber "
5920Sstevel@tonic-gate 					    "resuming");
5930Sstevel@tonic-gate 			}
5940Sstevel@tonic-gate 
5950Sstevel@tonic-gate 			if (read_all_memscrub) {
5960Sstevel@tonic-gate 				if (memscrub_verbose)
5970Sstevel@tonic-gate 					cmn_err(CE_NOTE, "Memory scrubber "
5980Sstevel@tonic-gate 					    "reading all memory per request");
5990Sstevel@tonic-gate 
6000Sstevel@tonic-gate 				addr = memscrub_memlist->address;
6010Sstevel@tonic-gate 				reached_end = 0;
6020Sstevel@tonic-gate 				while (!reached_end) {
6030Sstevel@tonic-gate 					if (disable_memscrub)
6040Sstevel@tonic-gate 						break;
6050Sstevel@tonic-gate 					pages = memscrub_phys_pages;
6060Sstevel@tonic-gate 					reached_end = memscrub_verify_span(
6070Sstevel@tonic-gate 					    &addr, &pages);
6080Sstevel@tonic-gate 					memscrub_scan(pages *
6090Sstevel@tonic-gate 					    MEMSCRUB_BLOCKS_PER_PAGE, addr);
6100Sstevel@tonic-gate 					addr += ((uint64_t)pages * PAGESIZE);
6110Sstevel@tonic-gate 				}
6120Sstevel@tonic-gate 				read_all_memscrub = 0;
6130Sstevel@tonic-gate 			}
6140Sstevel@tonic-gate 
6150Sstevel@tonic-gate 			/*
6160Sstevel@tonic-gate 			 * read 1 span
6170Sstevel@tonic-gate 			 */
6180Sstevel@tonic-gate 			pages = memscrub_span_pages;
6190Sstevel@tonic-gate 
6200Sstevel@tonic-gate 			if (disable_memscrub)
6210Sstevel@tonic-gate 				break;
6220Sstevel@tonic-gate 
6230Sstevel@tonic-gate 			/*
6240Sstevel@tonic-gate 			 * determine physical address range
6250Sstevel@tonic-gate 			 */
6260Sstevel@tonic-gate 			reached_end = memscrub_verify_span(&address,
6270Sstevel@tonic-gate 			    &pages);
6280Sstevel@tonic-gate 
6290Sstevel@tonic-gate 			memscrub_scan(pages * MEMSCRUB_BLOCKS_PER_PAGE,
6300Sstevel@tonic-gate 			    address);
6310Sstevel@tonic-gate 
6320Sstevel@tonic-gate 			address += ((uint64_t)pages * PAGESIZE);
6330Sstevel@tonic-gate 		}
6340Sstevel@tonic-gate 
6350Sstevel@tonic-gate 		if (pause_memscrub && !paused_message) {
6360Sstevel@tonic-gate 			paused_message = 1;
6370Sstevel@tonic-gate 			if (memscrub_verbose)
6380Sstevel@tonic-gate 				cmn_err(CE_NOTE, "Memory scrubber paused");
6390Sstevel@tonic-gate 		}
6400Sstevel@tonic-gate 	}
6410Sstevel@tonic-gate 
6420Sstevel@tonic-gate memscrub_exit:
6430Sstevel@tonic-gate 	cmn_err(CE_NOTE, "Memory scrubber exiting");
6440Sstevel@tonic-gate 	CALLB_CPR_EXIT(&cprinfo);
6450Sstevel@tonic-gate 	memscrub_cleanup();
6460Sstevel@tonic-gate 	thread_exit();
6470Sstevel@tonic-gate 	/* NOTREACHED */
6480Sstevel@tonic-gate }
6490Sstevel@tonic-gate 
6500Sstevel@tonic-gate /*
6510Sstevel@tonic-gate  * condition address and size
6520Sstevel@tonic-gate  * such that they span legal physical addresses.
6530Sstevel@tonic-gate  *
6540Sstevel@tonic-gate  * when appropriate, address will be rounded up to start of next
6550Sstevel@tonic-gate  * struct memlist, and pages will be rounded down to the end of the
6560Sstevel@tonic-gate  * memlist size.
6570Sstevel@tonic-gate  *
6580Sstevel@tonic-gate  * returns 1 if reached end of list, else returns 0.
6590Sstevel@tonic-gate  */
6600Sstevel@tonic-gate static int
6610Sstevel@tonic-gate memscrub_verify_span(ms_paddr_t *addrp, pgcnt_t *pagesp)
6620Sstevel@tonic-gate {
6630Sstevel@tonic-gate 	struct memlist *mlp;
6640Sstevel@tonic-gate 	ms_paddr_t address = *addrp;
6650Sstevel@tonic-gate 	uint64_t bytes = (uint64_t)*pagesp * PAGESIZE;
6660Sstevel@tonic-gate 	uint64_t bytes_remaining;
6670Sstevel@tonic-gate 	int reached_end = 0;
6680Sstevel@tonic-gate 
6690Sstevel@tonic-gate 	ASSERT(mutex_owned(&memscrub_lock));
6700Sstevel@tonic-gate 
6710Sstevel@tonic-gate 	/*
6720Sstevel@tonic-gate 	 * find memlist struct that contains addrp
6730Sstevel@tonic-gate 	 * assumes memlist is sorted by ascending address.
6740Sstevel@tonic-gate 	 */
6750Sstevel@tonic-gate 	for (mlp = memscrub_memlist; mlp != NULL; mlp = mlp->next) {
6760Sstevel@tonic-gate 		/*
6770Sstevel@tonic-gate 		 * if before this chunk, round up to beginning
6780Sstevel@tonic-gate 		 */
6790Sstevel@tonic-gate 		if (address < mlp->address) {
6800Sstevel@tonic-gate 			address = mlp->address;
6810Sstevel@tonic-gate 			break;
6820Sstevel@tonic-gate 		}
6830Sstevel@tonic-gate 		/*
6840Sstevel@tonic-gate 		 * if before end of chunk, then we found it
6850Sstevel@tonic-gate 		 */
6860Sstevel@tonic-gate 		if (address < (mlp->address + mlp->size))
6870Sstevel@tonic-gate 			break;
6880Sstevel@tonic-gate 
6890Sstevel@tonic-gate 		/* else go to next struct memlist */
6900Sstevel@tonic-gate 	}
6910Sstevel@tonic-gate 	/*
6920Sstevel@tonic-gate 	 * if we hit end of list, start at beginning
6930Sstevel@tonic-gate 	 */
6940Sstevel@tonic-gate 	if (mlp == NULL) {
6950Sstevel@tonic-gate 		mlp = memscrub_memlist;
6960Sstevel@tonic-gate 		address = mlp->address;
6970Sstevel@tonic-gate 	}
6980Sstevel@tonic-gate 
6990Sstevel@tonic-gate 	/*
7000Sstevel@tonic-gate 	 * now we have legal address, and its mlp, condition bytes
7010Sstevel@tonic-gate 	 */
7020Sstevel@tonic-gate 	bytes_remaining = (mlp->address + mlp->size) - address;
7030Sstevel@tonic-gate 
7040Sstevel@tonic-gate 	if (bytes > bytes_remaining)
7050Sstevel@tonic-gate 		bytes = bytes_remaining;
7060Sstevel@tonic-gate 
7070Sstevel@tonic-gate 	/*
7080Sstevel@tonic-gate 	 * will this span take us to end of list?
7090Sstevel@tonic-gate 	 */
7100Sstevel@tonic-gate 	if ((mlp->next == NULL) &&
7110Sstevel@tonic-gate 	    ((mlp->address + mlp->size) == (address + bytes)))
7120Sstevel@tonic-gate 		reached_end = 1;
7130Sstevel@tonic-gate 
7140Sstevel@tonic-gate 	/* return values */
7150Sstevel@tonic-gate 	*addrp = address;
7160Sstevel@tonic-gate 	*pagesp = bytes / PAGESIZE;
7170Sstevel@tonic-gate 
7180Sstevel@tonic-gate 	return (reached_end);
7190Sstevel@tonic-gate }
7200Sstevel@tonic-gate 
7210Sstevel@tonic-gate /*
7220Sstevel@tonic-gate  * add a span to the memscrub list
7230Sstevel@tonic-gate  * add to memscrub_phys_pages
7240Sstevel@tonic-gate  */
7250Sstevel@tonic-gate int
7260Sstevel@tonic-gate memscrub_add_span(pfn_t pfn, pgcnt_t pages)
7270Sstevel@tonic-gate {
7280Sstevel@tonic-gate #ifdef MEMSCRUB_DEBUG
7290Sstevel@tonic-gate 	ms_paddr_t address = (ms_paddr_t)pfn << PAGESHIFT;
7300Sstevel@tonic-gate 	uint64_t bytes = (uint64_t)pages << PAGESHIFT;
7310Sstevel@tonic-gate #endif /* MEMSCRUB_DEBUG */
7320Sstevel@tonic-gate 
7330Sstevel@tonic-gate 	int retval;
7340Sstevel@tonic-gate 
7350Sstevel@tonic-gate 	mutex_enter(&memscrub_lock);
7360Sstevel@tonic-gate 
7370Sstevel@tonic-gate #ifdef MEMSCRUB_DEBUG
7380Sstevel@tonic-gate 	memscrub_printmemlist("memscrub_memlist before", memscrub_memlist);
7390Sstevel@tonic-gate 	cmn_err(CE_CONT, "memscrub_phys_pages: 0x%x\n", memscrub_phys_pages);
7400Sstevel@tonic-gate 	cmn_err(CE_CONT, "memscrub_add_span: address: 0x%llx"
7410Sstevel@tonic-gate 	    " size: 0x%llx\n", address, bytes);
7420Sstevel@tonic-gate #endif /* MEMSCRUB_DEBUG */
7430Sstevel@tonic-gate 
7440Sstevel@tonic-gate 	retval = memscrub_add_span_gen(pfn, pages, &memscrub_memlist,
7450Sstevel@tonic-gate 	    &memscrub_phys_pages);
7460Sstevel@tonic-gate 
7470Sstevel@tonic-gate #ifdef MEMSCRUB_DEBUG
7480Sstevel@tonic-gate 	memscrub_printmemlist("memscrub_memlist after", memscrub_memlist);
7490Sstevel@tonic-gate 	cmn_err(CE_CONT, "memscrub_phys_pages: 0x%x\n", memscrub_phys_pages);
7500Sstevel@tonic-gate #endif /* MEMSCRUB_DEBUG */
7510Sstevel@tonic-gate 
7520Sstevel@tonic-gate 	mutex_exit(&memscrub_lock);
7530Sstevel@tonic-gate 
7540Sstevel@tonic-gate 	return (retval);
7550Sstevel@tonic-gate }
7560Sstevel@tonic-gate 
7570Sstevel@tonic-gate static int
7580Sstevel@tonic-gate memscrub_add_span_gen(
7590Sstevel@tonic-gate 	pfn_t pfn,
7600Sstevel@tonic-gate 	pgcnt_t pages,
7610Sstevel@tonic-gate 	struct memlist **list,
7620Sstevel@tonic-gate 	uint_t *npgs)
7630Sstevel@tonic-gate {
7640Sstevel@tonic-gate 	ms_paddr_t address = (ms_paddr_t)pfn << PAGESHIFT;
7650Sstevel@tonic-gate 	uint64_t bytes = (uint64_t)pages << PAGESHIFT;
7660Sstevel@tonic-gate 	struct memlist *dst;
7670Sstevel@tonic-gate 	struct memlist *prev, *next;
7680Sstevel@tonic-gate 	int retval = 0;
7690Sstevel@tonic-gate 
7700Sstevel@tonic-gate 	/*
7710Sstevel@tonic-gate 	 * allocate a new struct memlist
7720Sstevel@tonic-gate 	 */
7730Sstevel@tonic-gate 
7740Sstevel@tonic-gate 	dst = (struct memlist *)
7750Sstevel@tonic-gate 	    kmem_alloc(sizeof (struct memlist), KM_NOSLEEP);
7760Sstevel@tonic-gate 
7770Sstevel@tonic-gate 	if (dst == NULL) {
7780Sstevel@tonic-gate 		retval = -1;
7790Sstevel@tonic-gate 		goto add_done;
7800Sstevel@tonic-gate 	}
7810Sstevel@tonic-gate 
7820Sstevel@tonic-gate 	dst->address = address;
7830Sstevel@tonic-gate 	dst->size = bytes;
7840Sstevel@tonic-gate 
7850Sstevel@tonic-gate 	/*
7860Sstevel@tonic-gate 	 * first insert
7870Sstevel@tonic-gate 	 */
7880Sstevel@tonic-gate 	if (*list == NULL) {
7890Sstevel@tonic-gate 		dst->prev = NULL;
7900Sstevel@tonic-gate 		dst->next = NULL;
7910Sstevel@tonic-gate 		*list = dst;
7920Sstevel@tonic-gate 
7930Sstevel@tonic-gate 		goto add_done;
7940Sstevel@tonic-gate 	}
7950Sstevel@tonic-gate 
7960Sstevel@tonic-gate 	/*
7970Sstevel@tonic-gate 	 * insert into sorted list
7980Sstevel@tonic-gate 	 */
7990Sstevel@tonic-gate 	for (prev = NULL, next = *list;
8000Sstevel@tonic-gate 	    next != NULL;
8010Sstevel@tonic-gate 	    prev = next, next = next->next) {
8020Sstevel@tonic-gate 		if (address > (next->address + next->size))
8030Sstevel@tonic-gate 			continue;
8040Sstevel@tonic-gate 
8050Sstevel@tonic-gate 		/*
8060Sstevel@tonic-gate 		 * else insert here
8070Sstevel@tonic-gate 		 */
8080Sstevel@tonic-gate 
8090Sstevel@tonic-gate 		/*
8100Sstevel@tonic-gate 		 * prepend to next
8110Sstevel@tonic-gate 		 */
8120Sstevel@tonic-gate 		if ((address + bytes) == next->address) {
8130Sstevel@tonic-gate 			kmem_free(dst, sizeof (struct memlist));
8140Sstevel@tonic-gate 
8150Sstevel@tonic-gate 			next->address = address;
8160Sstevel@tonic-gate 			next->size += bytes;
8170Sstevel@tonic-gate 
8180Sstevel@tonic-gate 			goto add_done;
8190Sstevel@tonic-gate 		}
8200Sstevel@tonic-gate 
8210Sstevel@tonic-gate 		/*
8220Sstevel@tonic-gate 		 * append to next
8230Sstevel@tonic-gate 		 */
8240Sstevel@tonic-gate 		if (address == (next->address + next->size)) {
8250Sstevel@tonic-gate 			kmem_free(dst, sizeof (struct memlist));
8260Sstevel@tonic-gate 
8270Sstevel@tonic-gate 			if (next->next) {
8280Sstevel@tonic-gate 				/*
8290Sstevel@tonic-gate 				 * don't overlap with next->next
8300Sstevel@tonic-gate 				 */
8310Sstevel@tonic-gate 				if ((address + bytes) > next->next->address) {
8320Sstevel@tonic-gate 					retval = -1;
8330Sstevel@tonic-gate 					goto add_done;
8340Sstevel@tonic-gate 				}
8350Sstevel@tonic-gate 				/*
8360Sstevel@tonic-gate 				 * concatenate next and next->next
8370Sstevel@tonic-gate 				 */
8380Sstevel@tonic-gate 				if ((address + bytes) == next->next->address) {
8390Sstevel@tonic-gate 					struct memlist *mlp = next->next;
8400Sstevel@tonic-gate 
8410Sstevel@tonic-gate 					if (next == *list)
8420Sstevel@tonic-gate 						*list = next->next;
8430Sstevel@tonic-gate 
8440Sstevel@tonic-gate 					mlp->address = next->address;
8450Sstevel@tonic-gate 					mlp->size += next->size;
8460Sstevel@tonic-gate 					mlp->size += bytes;
8470Sstevel@tonic-gate 
8480Sstevel@tonic-gate 					if (next->prev)
8490Sstevel@tonic-gate 						next->prev->next = mlp;
8500Sstevel@tonic-gate 					mlp->prev = next->prev;
8510Sstevel@tonic-gate 
8520Sstevel@tonic-gate 					kmem_free(next,
8530Sstevel@tonic-gate 						sizeof (struct memlist));
8540Sstevel@tonic-gate 					goto add_done;
8550Sstevel@tonic-gate 				}
8560Sstevel@tonic-gate 			}
8570Sstevel@tonic-gate 
8580Sstevel@tonic-gate 			next->size += bytes;
8590Sstevel@tonic-gate 
8600Sstevel@tonic-gate 			goto add_done;
8610Sstevel@tonic-gate 		}
8620Sstevel@tonic-gate 
8630Sstevel@tonic-gate 		/* don't overlap with next */
8640Sstevel@tonic-gate 		if ((address + bytes) > next->address) {
8650Sstevel@tonic-gate 			retval = -1;
8660Sstevel@tonic-gate 			kmem_free(dst, sizeof (struct memlist));
8670Sstevel@tonic-gate 			goto add_done;
8680Sstevel@tonic-gate 		}
8690Sstevel@tonic-gate 
8700Sstevel@tonic-gate 		/*
8710Sstevel@tonic-gate 		 * insert before next
8720Sstevel@tonic-gate 		 */
8730Sstevel@tonic-gate 		dst->prev = prev;
8740Sstevel@tonic-gate 		dst->next = next;
8750Sstevel@tonic-gate 		next->prev = dst;
8760Sstevel@tonic-gate 		if (prev == NULL) {
8770Sstevel@tonic-gate 			*list = dst;
8780Sstevel@tonic-gate 		} else {
8790Sstevel@tonic-gate 			prev->next = dst;
8800Sstevel@tonic-gate 		}
8810Sstevel@tonic-gate 		goto add_done;
8820Sstevel@tonic-gate 	}	/* end for */
8830Sstevel@tonic-gate 
8840Sstevel@tonic-gate 	/*
8850Sstevel@tonic-gate 	 * end of list, prev is valid and next is NULL
8860Sstevel@tonic-gate 	 */
8870Sstevel@tonic-gate 	prev->next = dst;
8880Sstevel@tonic-gate 	dst->prev = prev;
8890Sstevel@tonic-gate 	dst->next = NULL;
8900Sstevel@tonic-gate 
8910Sstevel@tonic-gate add_done:
8920Sstevel@tonic-gate 
8930Sstevel@tonic-gate 	if (retval != -1)
8940Sstevel@tonic-gate 		*npgs += pages;
8950Sstevel@tonic-gate 
8960Sstevel@tonic-gate 	return (retval);
8970Sstevel@tonic-gate }
8980Sstevel@tonic-gate 
8990Sstevel@tonic-gate /*
9000Sstevel@tonic-gate  * delete a span from the memscrub list
9010Sstevel@tonic-gate  * subtract from memscrub_phys_pages
9020Sstevel@tonic-gate  */
9030Sstevel@tonic-gate int
9040Sstevel@tonic-gate memscrub_delete_span(pfn_t pfn, pgcnt_t pages)
9050Sstevel@tonic-gate {
9060Sstevel@tonic-gate 	ms_paddr_t address = (ms_paddr_t)pfn << PAGESHIFT;
9070Sstevel@tonic-gate 	uint64_t bytes = (uint64_t)pages << PAGESHIFT;
9080Sstevel@tonic-gate 	struct memlist *dst, *next;
9090Sstevel@tonic-gate 	int retval = 0;
9100Sstevel@tonic-gate 
9110Sstevel@tonic-gate 	mutex_enter(&memscrub_lock);
9120Sstevel@tonic-gate 
9130Sstevel@tonic-gate #ifdef MEMSCRUB_DEBUG
9140Sstevel@tonic-gate 	memscrub_printmemlist("memscrub_memlist Before", memscrub_memlist);
9150Sstevel@tonic-gate 	cmn_err(CE_CONT, "memscrub_phys_pages: 0x%x\n", memscrub_phys_pages);
9160Sstevel@tonic-gate 	cmn_err(CE_CONT, "memscrub_delete_span: 0x%llx 0x%llx\n",
9170Sstevel@tonic-gate 	    address, bytes);
9180Sstevel@tonic-gate #endif /* MEMSCRUB_DEBUG */
9190Sstevel@tonic-gate 
9200Sstevel@tonic-gate 	/*
9210Sstevel@tonic-gate 	 * find struct memlist containing page
9220Sstevel@tonic-gate 	 */
9230Sstevel@tonic-gate 	for (next = memscrub_memlist; next != NULL; next = next->next) {
9240Sstevel@tonic-gate 		if ((address >= next->address) &&
9250Sstevel@tonic-gate 		    (address < next->address + next->size))
9260Sstevel@tonic-gate 			break;
9270Sstevel@tonic-gate 	}
9280Sstevel@tonic-gate 
9290Sstevel@tonic-gate 	/*
9300Sstevel@tonic-gate 	 * if start address not in list
9310Sstevel@tonic-gate 	 */
9320Sstevel@tonic-gate 	if (next == NULL) {
9330Sstevel@tonic-gate 		retval = -1;
9340Sstevel@tonic-gate 		goto delete_done;
9350Sstevel@tonic-gate 	}
9360Sstevel@tonic-gate 
9370Sstevel@tonic-gate 	/*
9380Sstevel@tonic-gate 	 * error if size goes off end of this struct memlist
9390Sstevel@tonic-gate 	 */
9400Sstevel@tonic-gate 	if (address + bytes > next->address + next->size) {
9410Sstevel@tonic-gate 		retval = -1;
9420Sstevel@tonic-gate 		goto delete_done;
9430Sstevel@tonic-gate 	}
9440Sstevel@tonic-gate 
9450Sstevel@tonic-gate 	/*
9460Sstevel@tonic-gate 	 * pages at beginning of struct memlist
9470Sstevel@tonic-gate 	 */
9480Sstevel@tonic-gate 	if (address == next->address) {
9490Sstevel@tonic-gate 		/*
9500Sstevel@tonic-gate 		 * if start & size match, delete from list
9510Sstevel@tonic-gate 		 */
9520Sstevel@tonic-gate 		if (bytes == next->size) {
9530Sstevel@tonic-gate 			if (next == memscrub_memlist)
9540Sstevel@tonic-gate 				memscrub_memlist = next->next;
9550Sstevel@tonic-gate 			if (next->prev != NULL)
9560Sstevel@tonic-gate 				next->prev->next = next->next;
9570Sstevel@tonic-gate 			if (next->next != NULL)
9580Sstevel@tonic-gate 				next->next->prev = next->prev;
9590Sstevel@tonic-gate 
9600Sstevel@tonic-gate 			kmem_free(next, sizeof (struct memlist));
9610Sstevel@tonic-gate 		} else {
9620Sstevel@tonic-gate 		/*
9630Sstevel@tonic-gate 		 * increment start address by bytes
9640Sstevel@tonic-gate 		 */
9650Sstevel@tonic-gate 			next->address += bytes;
9660Sstevel@tonic-gate 			next->size -= bytes;
9670Sstevel@tonic-gate 		}
9680Sstevel@tonic-gate 		goto delete_done;
9690Sstevel@tonic-gate 	}
9700Sstevel@tonic-gate 
9710Sstevel@tonic-gate 	/*
9720Sstevel@tonic-gate 	 * pages at end of struct memlist
9730Sstevel@tonic-gate 	 */
9740Sstevel@tonic-gate 	if (address + bytes == next->address + next->size) {
9750Sstevel@tonic-gate 		/*
9760Sstevel@tonic-gate 		 * decrement size by bytes
9770Sstevel@tonic-gate 		 */
9780Sstevel@tonic-gate 		next->size -= bytes;
9790Sstevel@tonic-gate 		goto delete_done;
9800Sstevel@tonic-gate 	}
9810Sstevel@tonic-gate 
9820Sstevel@tonic-gate 	/*
9830Sstevel@tonic-gate 	 * delete a span in the middle of the struct memlist
9840Sstevel@tonic-gate 	 */
9850Sstevel@tonic-gate 	{
9860Sstevel@tonic-gate 		/*
9870Sstevel@tonic-gate 		 * create a new struct memlist
9880Sstevel@tonic-gate 		 */
9890Sstevel@tonic-gate 		dst = (struct memlist *)
9900Sstevel@tonic-gate 		    kmem_alloc(sizeof (struct memlist), KM_NOSLEEP);
9910Sstevel@tonic-gate 
9920Sstevel@tonic-gate 		if (dst == NULL) {
9930Sstevel@tonic-gate 			retval = -1;
9940Sstevel@tonic-gate 			goto delete_done;
9950Sstevel@tonic-gate 		}
9960Sstevel@tonic-gate 
9970Sstevel@tonic-gate 		/*
9980Sstevel@tonic-gate 		 * existing struct memlist gets address
9990Sstevel@tonic-gate 		 * and size up to pfn
10000Sstevel@tonic-gate 		 */
10010Sstevel@tonic-gate 		dst->address = address + bytes;
10020Sstevel@tonic-gate 		dst->size = (next->address + next->size) - dst->address;
10030Sstevel@tonic-gate 		next->size = address - next->address;
10040Sstevel@tonic-gate 
10050Sstevel@tonic-gate 		/*
10060Sstevel@tonic-gate 		 * new struct memlist gets address starting
10070Sstevel@tonic-gate 		 * after pfn, until end
10080Sstevel@tonic-gate 		 */
10090Sstevel@tonic-gate 
10100Sstevel@tonic-gate 		/*
10110Sstevel@tonic-gate 		 * link in new memlist after old
10120Sstevel@tonic-gate 		 */
10130Sstevel@tonic-gate 		dst->next = next->next;
10140Sstevel@tonic-gate 		dst->prev = next;
10150Sstevel@tonic-gate 
10160Sstevel@tonic-gate 		if (next->next != NULL)
10170Sstevel@tonic-gate 			next->next->prev = dst;
10180Sstevel@tonic-gate 		next->next = dst;
10190Sstevel@tonic-gate 	}
10200Sstevel@tonic-gate 
10210Sstevel@tonic-gate delete_done:
10220Sstevel@tonic-gate 	if (retval != -1) {
10230Sstevel@tonic-gate 		memscrub_phys_pages -= pages;
10240Sstevel@tonic-gate 		if (memscrub_phys_pages == 0)
10250Sstevel@tonic-gate 			disable_memscrub = 1;
10260Sstevel@tonic-gate 	}
10270Sstevel@tonic-gate 
10280Sstevel@tonic-gate #ifdef MEMSCRUB_DEBUG
10290Sstevel@tonic-gate 	memscrub_printmemlist("memscrub_memlist After", memscrub_memlist);
10300Sstevel@tonic-gate 	cmn_err(CE_CONT, "memscrub_phys_pages: 0x%x\n", memscrub_phys_pages);
10310Sstevel@tonic-gate #endif /* MEMSCRUB_DEBUG */
10320Sstevel@tonic-gate 
10330Sstevel@tonic-gate 	mutex_exit(&memscrub_lock);
10340Sstevel@tonic-gate 	return (retval);
10350Sstevel@tonic-gate }
10360Sstevel@tonic-gate 
10370Sstevel@tonic-gate static void
10380Sstevel@tonic-gate memscrub_scan(uint_t blks, ms_paddr_t src)
10390Sstevel@tonic-gate {
10400Sstevel@tonic-gate 	uint_t 		psz, bpp, pgsread;
10410Sstevel@tonic-gate 	pfn_t		pfn;
10420Sstevel@tonic-gate 	ms_paddr_t	pa;
10430Sstevel@tonic-gate 	caddr_t		va;
10440Sstevel@tonic-gate 	on_trap_data_t	otd;
1045*2895Svb70745 	int		scan_mmu_pagesize = 0;
1046*2895Svb70745 	int		retired_pages = 0;
10470Sstevel@tonic-gate 
10480Sstevel@tonic-gate 	extern void memscrub_read(caddr_t src, uint_t blks);
10490Sstevel@tonic-gate 
10500Sstevel@tonic-gate 	ASSERT(mutex_owned(&memscrub_lock));
10510Sstevel@tonic-gate 
10520Sstevel@tonic-gate 	pgsread = 0;
10530Sstevel@tonic-gate 	pa = src;
10540Sstevel@tonic-gate 
1055*2895Svb70745 	if (memscrub_page_retire_span_list != NULL) {
1056*2895Svb70745 		if (memscrub_page_retire_span_search(src)) {
1057*2895Svb70745 			/* retired pages in current span */
1058*2895Svb70745 			scan_mmu_pagesize = 1;
1059*2895Svb70745 		}
1060*2895Svb70745 	}
1061*2895Svb70745 
1062*2895Svb70745 #ifdef MEMSCRUB_DEBUG
1063*2895Svb70745 	cmn_err(CE_NOTE, "scan_mmu_pagesize = %d\n" scan_mmu_pagesize);
1064*2895Svb70745 #endif /* MEMSCRUB_DEBUG */
1065*2895Svb70745 
10660Sstevel@tonic-gate 	while (blks != 0) {
10670Sstevel@tonic-gate 		/* Ensure the PA is properly aligned */
10680Sstevel@tonic-gate 		if (((pa & MMU_PAGEMASK4M) == pa) &&
10690Sstevel@tonic-gate 			(blks >= MEMSCRUB_BPP4M)) {
10700Sstevel@tonic-gate 			psz = MMU_PAGESIZE4M;
10710Sstevel@tonic-gate 			bpp = MEMSCRUB_BPP4M;
10720Sstevel@tonic-gate 		} else if (((pa & MMU_PAGEMASK512K) == pa) &&
10730Sstevel@tonic-gate 			(blks >= MEMSCRUB_BPP512K)) {
10740Sstevel@tonic-gate 			psz = MMU_PAGESIZE512K;
10750Sstevel@tonic-gate 			bpp = MEMSCRUB_BPP512K;
10760Sstevel@tonic-gate 		} else if (((pa & MMU_PAGEMASK64K) == pa) &&
10770Sstevel@tonic-gate 			(blks >= MEMSCRUB_BPP64K)) {
10780Sstevel@tonic-gate 			psz = MMU_PAGESIZE64K;
10790Sstevel@tonic-gate 			bpp = MEMSCRUB_BPP64K;
10800Sstevel@tonic-gate 		} else if ((pa & MMU_PAGEMASK) == pa) {
10810Sstevel@tonic-gate 			psz = MMU_PAGESIZE;
10820Sstevel@tonic-gate 			bpp = MEMSCRUB_BPP;
10830Sstevel@tonic-gate 		} else {
10840Sstevel@tonic-gate 			if (memscrub_verbose) {
10850Sstevel@tonic-gate 				cmn_err(CE_NOTE, "Memory scrubber ignoring "
10860Sstevel@tonic-gate 				    "non-page aligned block starting at 0x%"
10870Sstevel@tonic-gate 				    PRIx64, src);
10880Sstevel@tonic-gate 			}
10890Sstevel@tonic-gate 			return;
10900Sstevel@tonic-gate 		}
10910Sstevel@tonic-gate 		if (blks < bpp) bpp = blks;
10920Sstevel@tonic-gate 
10930Sstevel@tonic-gate #ifdef MEMSCRUB_DEBUG
10940Sstevel@tonic-gate 		cmn_err(CE_NOTE, "Going to run psz=%x, "
10950Sstevel@tonic-gate 		    "bpp=%x pa=%llx\n", psz, bpp, pa);
10960Sstevel@tonic-gate #endif /* MEMSCRUB_DEBUG */
10970Sstevel@tonic-gate 
10980Sstevel@tonic-gate 		/*
10990Sstevel@tonic-gate 		 * MEMSCRUBBASE is a 4MB aligned page in the
11000Sstevel@tonic-gate 		 * kernel so that we can quickly map the PA
11010Sstevel@tonic-gate 		 * to a VA for the block loads performed in
11020Sstevel@tonic-gate 		 * memscrub_read.
11030Sstevel@tonic-gate 		 */
11040Sstevel@tonic-gate 		pfn = mmu_btop(pa);
11050Sstevel@tonic-gate 		va = (caddr_t)MEMSCRUBBASE;
11060Sstevel@tonic-gate 		hat_devload(kas.a_hat, va, psz, pfn, PROT_READ,
11070Sstevel@tonic-gate 			HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK);
11080Sstevel@tonic-gate 
11090Sstevel@tonic-gate 		/*
11100Sstevel@tonic-gate 		 * Can't allow the memscrubber to migrate across CPUs as
11110Sstevel@tonic-gate 		 * we need to know whether CEEN is enabled for the current
11120Sstevel@tonic-gate 		 * CPU to enable us to scrub the memory. Don't use
11130Sstevel@tonic-gate 		 * kpreempt_disable as the time we take to scan a span (even
11140Sstevel@tonic-gate 		 * without cpu_check_ce having to manually cpu_check_block)
11150Sstevel@tonic-gate 		 * is too long to hold a higher priority thread (eg, RT)
11160Sstevel@tonic-gate 		 * off cpu.
11170Sstevel@tonic-gate 		 */
11180Sstevel@tonic-gate 		thread_affinity_set(curthread, CPU_CURRENT);
11190Sstevel@tonic-gate 
11200Sstevel@tonic-gate 		/*
11210Sstevel@tonic-gate 		 * Protect read scrub from async faults.  For now, we simply
11220Sstevel@tonic-gate 		 * maintain a count of such faults caught.
11230Sstevel@tonic-gate 		 */
11240Sstevel@tonic-gate 
1125*2895Svb70745 		if (!scan_mmu_pagesize && !on_trap(&otd, OT_DATA_EC)) {
11260Sstevel@tonic-gate 			memscrub_read(va, bpp);
11270Sstevel@tonic-gate 			/*
11280Sstevel@tonic-gate 			 * Check if CEs require logging
11290Sstevel@tonic-gate 			 */
11300Sstevel@tonic-gate 			cpu_check_ce(SCRUBBER_CEEN_CHECK,
11310Sstevel@tonic-gate 			    (uint64_t)pa, va, psz);
1132102Srjnoe 			no_trap();
11330Sstevel@tonic-gate 			thread_affinity_clear(curthread);
11340Sstevel@tonic-gate 		} else {
11350Sstevel@tonic-gate 			no_trap();
11360Sstevel@tonic-gate 			thread_affinity_clear(curthread);
11370Sstevel@tonic-gate 
11380Sstevel@tonic-gate 			/*
11390Sstevel@tonic-gate 			 * Got an async error..
11400Sstevel@tonic-gate 			 * Try rescanning it at MMU_PAGESIZE
11410Sstevel@tonic-gate 			 * granularity if we were trying to
11420Sstevel@tonic-gate 			 * read at a larger page size.
11430Sstevel@tonic-gate 			 * This is to ensure we continue to
11440Sstevel@tonic-gate 			 * scan the rest of the span.
1145*2895Svb70745 			 * OR scanning MMU_PAGESIZE granularity to avoid
1146*2895Svb70745 			 * reading retired pages memory when scan_mmu_pagesize
1147*2895Svb70745 			 * is set.
11480Sstevel@tonic-gate 			 */
1149*2895Svb70745 			if (psz > MMU_PAGESIZE || scan_mmu_pagesize) {
11500Sstevel@tonic-gate 			    caddr_t vaddr = va;
11510Sstevel@tonic-gate 			    ms_paddr_t paddr = pa;
11520Sstevel@tonic-gate 			    int tmp = 0;
11530Sstevel@tonic-gate 			    for (; tmp < bpp; tmp += MEMSCRUB_BPP) {
1154*2895Svb70745 				/* Don't scrub retired pages */
1155*2895Svb70745 				if (page_retire_check(paddr, NULL) == 0) {
1156*2895Svb70745 					vaddr += MMU_PAGESIZE;
1157*2895Svb70745 					paddr += MMU_PAGESIZE;
1158*2895Svb70745 					retired_pages++;
1159*2895Svb70745 					continue;
1160*2895Svb70745 				}
11610Sstevel@tonic-gate 				thread_affinity_set(curthread, CPU_CURRENT);
1162102Srjnoe 				if (!on_trap(&otd, OT_DATA_EC)) {
11630Sstevel@tonic-gate 				    memscrub_read(vaddr, MEMSCRUB_BPP);
1164102Srjnoe 				    cpu_check_ce(SCRUBBER_CEEN_CHECK,
1165102Srjnoe 					(uint64_t)paddr, vaddr, MMU_PAGESIZE);
1166102Srjnoe 				    no_trap();
1167102Srjnoe 				} else {
1168102Srjnoe 				    no_trap();
11690Sstevel@tonic-gate 				    memscrub_counts.errors_found.value.ui32++;
1170102Srjnoe 				}
11710Sstevel@tonic-gate 				thread_affinity_clear(curthread);
11720Sstevel@tonic-gate 				vaddr += MMU_PAGESIZE;
11730Sstevel@tonic-gate 				paddr += MMU_PAGESIZE;
11740Sstevel@tonic-gate 			    }
11750Sstevel@tonic-gate 			}
11760Sstevel@tonic-gate 		}
11770Sstevel@tonic-gate 		hat_unload(kas.a_hat, va, psz, HAT_UNLOAD_UNLOCK);
11780Sstevel@tonic-gate 
11790Sstevel@tonic-gate 		blks -= bpp;
11800Sstevel@tonic-gate 		pa += psz;
11810Sstevel@tonic-gate 		pgsread++;
11820Sstevel@tonic-gate 	}
1183*2895Svb70745 
1184*2895Svb70745 	/*
1185*2895Svb70745 	 * If just finished scrubbing MMU_PAGESIZE at a time, but no retired
1186*2895Svb70745 	 * pages found so delete span from global list.
1187*2895Svb70745 	 */
1188*2895Svb70745 	if (scan_mmu_pagesize && retired_pages == 0)
1189*2895Svb70745 		memscrub_page_retire_span_delete(src);
1190*2895Svb70745 
1191*2895Svb70745 	/*
1192*2895Svb70745 	 * Encountered CE/UE on a retired page during memscrub read of current
1193*2895Svb70745 	 * span.  Adding span to global list to enable avoid reading further.
1194*2895Svb70745 	 */
1195*2895Svb70745 	if (add_to_page_retire_list) {
1196*2895Svb70745 		if (!memscrub_page_retire_span_search(src))
1197*2895Svb70745 			memscrub_page_retire_span_add(src);
1198*2895Svb70745 		add_to_page_retire_list = 0;
1199*2895Svb70745 	}
1200*2895Svb70745 
12010Sstevel@tonic-gate 	if (memscrub_verbose) {
12020Sstevel@tonic-gate 		cmn_err(CE_NOTE, "Memory scrubber read 0x%x pages starting "
12030Sstevel@tonic-gate 		    "at 0x%" PRIx64, pgsread, src);
12040Sstevel@tonic-gate 	}
12050Sstevel@tonic-gate }
12060Sstevel@tonic-gate 
12070Sstevel@tonic-gate /*
1208*2895Svb70745  * Called by cpu_async_log_err() when memscrub read causes
1209*2895Svb70745  * CE/UE on a retired page.
1210*2895Svb70745  */
1211*2895Svb70745 void
1212*2895Svb70745 memscrub_induced_error(void)
1213*2895Svb70745 {
1214*2895Svb70745 	add_to_page_retire_list = 1;
1215*2895Svb70745 }
1216*2895Svb70745 
1217*2895Svb70745 
1218*2895Svb70745 /*
1219*2895Svb70745  * Called by memscrub_scan().
1220*2895Svb70745  * pa: physical address of span with CE/UE, add to global list.
1221*2895Svb70745  */
1222*2895Svb70745 static void
1223*2895Svb70745 memscrub_page_retire_span_add(ms_paddr_t pa)
1224*2895Svb70745 {
1225*2895Svb70745 	memscrub_page_retire_span_t *new_span;
1226*2895Svb70745 
1227*2895Svb70745 	new_span = (memscrub_page_retire_span_t *)
1228*2895Svb70745 	    kmem_zalloc(sizeof (memscrub_page_retire_span_t), KM_NOSLEEP);
1229*2895Svb70745 
1230*2895Svb70745 	if (new_span == NULL) {
1231*2895Svb70745 #ifdef MEMSCRUB_DEBUG
1232*2895Svb70745 		cmn_err(CE_NOTE, "failed to allocate new span - span with"
1233*2895Svb70745 		    " retired page/s not tracked.\n");
1234*2895Svb70745 #endif /* MEMSCRUB_DEBUG */
1235*2895Svb70745 		return;
1236*2895Svb70745 	}
1237*2895Svb70745 
1238*2895Svb70745 	new_span->address = pa;
1239*2895Svb70745 	new_span->next = memscrub_page_retire_span_list;
1240*2895Svb70745 	memscrub_page_retire_span_list = new_span;
1241*2895Svb70745 }
1242*2895Svb70745 
1243*2895Svb70745 /*
1244*2895Svb70745  * Called by memscrub_scan().
1245*2895Svb70745  * pa: physical address of span to be removed from global list.
1246*2895Svb70745  */
1247*2895Svb70745 static void
1248*2895Svb70745 memscrub_page_retire_span_delete(ms_paddr_t pa)
1249*2895Svb70745 {
1250*2895Svb70745 	memscrub_page_retire_span_t *prev_span, *next_span;
1251*2895Svb70745 
1252*2895Svb70745 	prev_span = memscrub_page_retire_span_list;
1253*2895Svb70745 	next_span = memscrub_page_retire_span_list->next;
1254*2895Svb70745 
1255*2895Svb70745 	if (pa == prev_span->address) {
1256*2895Svb70745 		memscrub_page_retire_span_list = next_span;
1257*2895Svb70745 		kmem_free(prev_span, sizeof (memscrub_page_retire_span_t));
1258*2895Svb70745 		return;
1259*2895Svb70745 	}
1260*2895Svb70745 
1261*2895Svb70745 	while (next_span) {
1262*2895Svb70745 		if (pa == next_span->address) {
1263*2895Svb70745 			prev_span->next = next_span->next;
1264*2895Svb70745 			kmem_free(next_span,
1265*2895Svb70745 			    sizeof (memscrub_page_retire_span_t));
1266*2895Svb70745 			return;
1267*2895Svb70745 		}
1268*2895Svb70745 		prev_span = next_span;
1269*2895Svb70745 		next_span = next_span->next;
1270*2895Svb70745 	}
1271*2895Svb70745 }
1272*2895Svb70745 
1273*2895Svb70745 /*
1274*2895Svb70745  * Called by memscrub_scan().
1275*2895Svb70745  * pa: physical address of span to be searched in global list.
1276*2895Svb70745  */
1277*2895Svb70745 static int
1278*2895Svb70745 memscrub_page_retire_span_search(ms_paddr_t pa)
1279*2895Svb70745 {
1280*2895Svb70745 	memscrub_page_retire_span_t *next_span = memscrub_page_retire_span_list;
1281*2895Svb70745 
1282*2895Svb70745 	while (next_span) {
1283*2895Svb70745 		if (pa == next_span->address)
1284*2895Svb70745 			return (1);
1285*2895Svb70745 		next_span = next_span->next;
1286*2895Svb70745 	}
1287*2895Svb70745 	return (0);
1288*2895Svb70745 }
1289*2895Svb70745 
1290*2895Svb70745 /*
1291*2895Svb70745  * Called from new_memscrub() as a result of memory delete.
1292*2895Svb70745  * Using page_numtopp_nolock() to determine if we have valid PA.
1293*2895Svb70745  */
1294*2895Svb70745 static void
1295*2895Svb70745 memscrub_page_retire_span_list_update(void)
1296*2895Svb70745 {
1297*2895Svb70745 	memscrub_page_retire_span_t *prev, *cur, *next;
1298*2895Svb70745 
1299*2895Svb70745 	if (memscrub_page_retire_span_list == NULL)
1300*2895Svb70745 		return;
1301*2895Svb70745 
1302*2895Svb70745 	prev = cur = memscrub_page_retire_span_list;
1303*2895Svb70745 	next = cur->next;
1304*2895Svb70745 
1305*2895Svb70745 	while (cur) {
1306*2895Svb70745 		if (page_numtopp_nolock(mmu_btop(cur->address)) == NULL) {
1307*2895Svb70745 			if (cur == memscrub_page_retire_span_list) {
1308*2895Svb70745 				memscrub_page_retire_span_list = next;
1309*2895Svb70745 				kmem_free(cur,
1310*2895Svb70745 				    sizeof (memscrub_page_retire_span_t));
1311*2895Svb70745 				prev = cur = memscrub_page_retire_span_list;
1312*2895Svb70745 			} else {
1313*2895Svb70745 				prev->next = cur->next;
1314*2895Svb70745 				kmem_free(cur,
1315*2895Svb70745 				    sizeof (memscrub_page_retire_span_t));
1316*2895Svb70745 				cur = next;
1317*2895Svb70745 			}
1318*2895Svb70745 		} else {
1319*2895Svb70745 			prev = cur;
1320*2895Svb70745 			cur = next;
1321*2895Svb70745 		}
1322*2895Svb70745 		if (cur != NULL)
1323*2895Svb70745 			next = cur->next;
1324*2895Svb70745 	}
1325*2895Svb70745 }
1326*2895Svb70745 
1327*2895Svb70745 /*
13280Sstevel@tonic-gate  * The memory add/delete callback mechanism does not pass in the
13290Sstevel@tonic-gate  * page ranges. The phys_install list has been updated though, so
13300Sstevel@tonic-gate  * create a new scrub list from it.
13310Sstevel@tonic-gate  */
13320Sstevel@tonic-gate 
13330Sstevel@tonic-gate static int
1334*2895Svb70745 new_memscrub(int update_page_retire_list)
13350Sstevel@tonic-gate {
13360Sstevel@tonic-gate 	struct memlist *src, *list, *old_list;
13370Sstevel@tonic-gate 	uint_t npgs;
13380Sstevel@tonic-gate 
13390Sstevel@tonic-gate 	/*
13400Sstevel@tonic-gate 	 * copy phys_install to memscrub_memlist
13410Sstevel@tonic-gate 	 */
13420Sstevel@tonic-gate 	list = NULL;
13430Sstevel@tonic-gate 	npgs = 0;
13440Sstevel@tonic-gate 	memlist_read_lock();
13450Sstevel@tonic-gate 	for (src = phys_install; src; src = src->next) {
13460Sstevel@tonic-gate 		if (memscrub_add_span_gen((pfn_t)(src->address >> PAGESHIFT),
13470Sstevel@tonic-gate 		    (pgcnt_t)(src->size >> PAGESHIFT), &list, &npgs)) {
13480Sstevel@tonic-gate 			memlist_read_unlock();
13490Sstevel@tonic-gate 			while (list) {
13500Sstevel@tonic-gate 				struct memlist *el;
13510Sstevel@tonic-gate 
13520Sstevel@tonic-gate 				el = list;
13530Sstevel@tonic-gate 				list = list->next;
13540Sstevel@tonic-gate 				kmem_free(el, sizeof (struct memlist));
13550Sstevel@tonic-gate 			}
13560Sstevel@tonic-gate 			return (-1);
13570Sstevel@tonic-gate 		}
13580Sstevel@tonic-gate 	}
13590Sstevel@tonic-gate 	memlist_read_unlock();
13600Sstevel@tonic-gate 
13610Sstevel@tonic-gate 	mutex_enter(&memscrub_lock);
13620Sstevel@tonic-gate 	memscrub_phys_pages = npgs;
13630Sstevel@tonic-gate 	old_list = memscrub_memlist;
13640Sstevel@tonic-gate 	memscrub_memlist = list;
1365*2895Svb70745 
1366*2895Svb70745 	if (update_page_retire_list)
1367*2895Svb70745 		memscrub_page_retire_span_list_update();
1368*2895Svb70745 
13690Sstevel@tonic-gate 	mutex_exit(&memscrub_lock);
13700Sstevel@tonic-gate 
13710Sstevel@tonic-gate 	while (old_list) {
13720Sstevel@tonic-gate 		struct memlist *el;
13730Sstevel@tonic-gate 
13740Sstevel@tonic-gate 		el = old_list;
13750Sstevel@tonic-gate 		old_list = old_list->next;
13760Sstevel@tonic-gate 		kmem_free(el, sizeof (struct memlist));
13770Sstevel@tonic-gate 	}
1378*2895Svb70745 
13790Sstevel@tonic-gate 	return (0);
13800Sstevel@tonic-gate }
13810Sstevel@tonic-gate 
13820Sstevel@tonic-gate /*ARGSUSED*/
13830Sstevel@tonic-gate static void
13840Sstevel@tonic-gate memscrub_mem_config_post_add(
13850Sstevel@tonic-gate 	void *arg,
13860Sstevel@tonic-gate 	pgcnt_t delta_pages)
13870Sstevel@tonic-gate {
13880Sstevel@tonic-gate 	/*
13890Sstevel@tonic-gate 	 * We increment pause_memscrub before entering new_memscrub(). This
13900Sstevel@tonic-gate 	 * will force the memscrubber to sleep, allowing the DR callback
13910Sstevel@tonic-gate 	 * thread to acquire memscrub_lock in new_memscrub(). The use of
13920Sstevel@tonic-gate 	 * atomic_add_32() allows concurrent memory DR operations to use the
13930Sstevel@tonic-gate 	 * callbacks safely.
13940Sstevel@tonic-gate 	 */
13950Sstevel@tonic-gate 	atomic_add_32(&pause_memscrub, 1);
13960Sstevel@tonic-gate 	ASSERT(pause_memscrub != 0);
13970Sstevel@tonic-gate 
13980Sstevel@tonic-gate 	/*
13990Sstevel@tonic-gate 	 * "Don't care" if we are not scrubbing new memory.
14000Sstevel@tonic-gate 	 */
1401*2895Svb70745 	(void) new_memscrub(0);		/* retain page retire list */
14020Sstevel@tonic-gate 
14030Sstevel@tonic-gate 	/* Restore the pause setting. */
14040Sstevel@tonic-gate 	atomic_add_32(&pause_memscrub, -1);
14050Sstevel@tonic-gate }
14060Sstevel@tonic-gate 
14070Sstevel@tonic-gate /*ARGSUSED*/
14080Sstevel@tonic-gate static int
14090Sstevel@tonic-gate memscrub_mem_config_pre_del(
14100Sstevel@tonic-gate 	void *arg,
14110Sstevel@tonic-gate 	pgcnt_t delta_pages)
14120Sstevel@tonic-gate {
14130Sstevel@tonic-gate 	/* Nothing to do. */
14140Sstevel@tonic-gate 	return (0);
14150Sstevel@tonic-gate }
14160Sstevel@tonic-gate 
14170Sstevel@tonic-gate /*ARGSUSED*/
14180Sstevel@tonic-gate static void
14190Sstevel@tonic-gate memscrub_mem_config_post_del(
14200Sstevel@tonic-gate 	void *arg,
14210Sstevel@tonic-gate 	pgcnt_t delta_pages,
14220Sstevel@tonic-gate 	int cancelled)
14230Sstevel@tonic-gate {
14240Sstevel@tonic-gate 	/*
14250Sstevel@tonic-gate 	 * We increment pause_memscrub before entering new_memscrub(). This
14260Sstevel@tonic-gate 	 * will force the memscrubber to sleep, allowing the DR callback
14270Sstevel@tonic-gate 	 * thread to acquire memscrub_lock in new_memscrub(). The use of
14280Sstevel@tonic-gate 	 * atomic_add_32() allows concurrent memory DR operations to use the
14290Sstevel@tonic-gate 	 * callbacks safely.
14300Sstevel@tonic-gate 	 */
14310Sstevel@tonic-gate 	atomic_add_32(&pause_memscrub, 1);
14320Sstevel@tonic-gate 	ASSERT(pause_memscrub != 0);
14330Sstevel@tonic-gate 
14340Sstevel@tonic-gate 	/*
14350Sstevel@tonic-gate 	 * Must stop scrubbing deleted memory as it may be disconnected.
14360Sstevel@tonic-gate 	 */
1437*2895Svb70745 	if (new_memscrub(1)) {	/* update page retire list */
14380Sstevel@tonic-gate 		disable_memscrub = 1;
14390Sstevel@tonic-gate 	}
14400Sstevel@tonic-gate 
14410Sstevel@tonic-gate 	/* Restore the pause setting. */
14420Sstevel@tonic-gate 	atomic_add_32(&pause_memscrub, -1);
14430Sstevel@tonic-gate }
14440Sstevel@tonic-gate 
14450Sstevel@tonic-gate static kphysm_setup_vector_t memscrub_mem_config_vec = {
14460Sstevel@tonic-gate 	KPHYSM_SETUP_VECTOR_VERSION,
14470Sstevel@tonic-gate 	memscrub_mem_config_post_add,
14480Sstevel@tonic-gate 	memscrub_mem_config_pre_del,
14490Sstevel@tonic-gate 	memscrub_mem_config_post_del,
14500Sstevel@tonic-gate };
14510Sstevel@tonic-gate 
14520Sstevel@tonic-gate static void
14530Sstevel@tonic-gate memscrub_init_mem_config()
14540Sstevel@tonic-gate {
14550Sstevel@tonic-gate 	int ret;
14560Sstevel@tonic-gate 
14570Sstevel@tonic-gate 	ret = kphysm_setup_func_register(&memscrub_mem_config_vec,
14580Sstevel@tonic-gate 	    (void *)NULL);
14590Sstevel@tonic-gate 	ASSERT(ret == 0);
14600Sstevel@tonic-gate }
14610Sstevel@tonic-gate 
14620Sstevel@tonic-gate static void
14630Sstevel@tonic-gate memscrub_uninit_mem_config()
14640Sstevel@tonic-gate {
14650Sstevel@tonic-gate 	/* This call is OK if the register call was not done. */
14660Sstevel@tonic-gate 	kphysm_setup_func_unregister(&memscrub_mem_config_vec, (void *)NULL);
14670Sstevel@tonic-gate }
1468