10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51772Sjl139090 * Common Development and Distribution License (the "License"). 61772Sjl139090 * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 22*7799SRichard.Bean@Sun.COM * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate /* 270Sstevel@tonic-gate * DR memory support routines. 280Sstevel@tonic-gate */ 290Sstevel@tonic-gate 300Sstevel@tonic-gate #include <sys/note.h> 310Sstevel@tonic-gate #include <sys/debug.h> 320Sstevel@tonic-gate #include <sys/types.h> 330Sstevel@tonic-gate #include <sys/errno.h> 340Sstevel@tonic-gate #include <sys/param.h> 350Sstevel@tonic-gate #include <sys/dditypes.h> 360Sstevel@tonic-gate #include <sys/kmem.h> 370Sstevel@tonic-gate #include <sys/conf.h> 380Sstevel@tonic-gate #include <sys/ddi.h> 390Sstevel@tonic-gate #include <sys/sunddi.h> 400Sstevel@tonic-gate #include <sys/sunndi.h> 410Sstevel@tonic-gate #include <sys/ddi_impldefs.h> 420Sstevel@tonic-gate #include <sys/ndi_impldefs.h> 430Sstevel@tonic-gate #include <sys/sysmacros.h> 440Sstevel@tonic-gate #include <sys/machsystm.h> 450Sstevel@tonic-gate #include <sys/spitregs.h> 460Sstevel@tonic-gate #include <sys/cpuvar.h> 470Sstevel@tonic-gate #include <sys/promif.h> 480Sstevel@tonic-gate #include <vm/seg_kmem.h> 490Sstevel@tonic-gate #include <sys/lgrp.h> 500Sstevel@tonic-gate #include <sys/platform_module.h> 510Sstevel@tonic-gate 520Sstevel@tonic-gate #include <vm/page.h> 530Sstevel@tonic-gate 540Sstevel@tonic-gate #include <sys/dr.h> 550Sstevel@tonic-gate #include <sys/dr_util.h> 560Sstevel@tonic-gate 570Sstevel@tonic-gate extern struct memlist *phys_install; 580Sstevel@tonic-gate 590Sstevel@tonic-gate /* TODO: push this reference below drmach line */ 600Sstevel@tonic-gate extern int kcage_on; 610Sstevel@tonic-gate 620Sstevel@tonic-gate /* for the DR*INTERNAL_ERROR macros. see sys/dr.h. */ 63*7799SRichard.Bean@Sun.COM static char *dr_ie_fmt = "dr_mem.c %d"; 640Sstevel@tonic-gate 650Sstevel@tonic-gate static int dr_post_detach_mem_unit(dr_mem_unit_t *mp); 660Sstevel@tonic-gate static int dr_reserve_mem_spans(memhandle_t *mhp, 670Sstevel@tonic-gate struct memlist *mlist); 680Sstevel@tonic-gate static int dr_select_mem_target(dr_handle_t *hp, 690Sstevel@tonic-gate dr_mem_unit_t *mp, struct memlist *ml); 700Sstevel@tonic-gate static void dr_init_mem_unit_data(dr_mem_unit_t *mp); 710Sstevel@tonic-gate 720Sstevel@tonic-gate static int memlist_canfit(struct memlist *s_mlist, 730Sstevel@tonic-gate struct memlist *t_mlist); 740Sstevel@tonic-gate 750Sstevel@tonic-gate /* 760Sstevel@tonic-gate * dr_mem_unit_t.sbm_flags 770Sstevel@tonic-gate */ 780Sstevel@tonic-gate #define DR_MFLAG_RESERVED 0x01 /* mem unit reserved for delete */ 790Sstevel@tonic-gate #define DR_MFLAG_SOURCE 0x02 /* source brd of copy/rename op */ 800Sstevel@tonic-gate #define DR_MFLAG_TARGET 0x04 /* target brd of copy/rename op */ 810Sstevel@tonic-gate #define DR_MFLAG_MEMUPSIZE 0x08 /* move from big to small board */ 820Sstevel@tonic-gate #define DR_MFLAG_MEMDOWNSIZE 0x10 /* move from small to big board */ 830Sstevel@tonic-gate #define DR_MFLAG_MEMRESIZE 0x18 /* move to different size board */ 840Sstevel@tonic-gate #define DR_MFLAG_RELOWNER 0x20 /* memory release (delete) owner */ 850Sstevel@tonic-gate #define DR_MFLAG_RELDONE 0x40 /* memory release (delete) done */ 860Sstevel@tonic-gate 870Sstevel@tonic-gate /* helper macros */ 880Sstevel@tonic-gate #define _ptob64(p) ((uint64_t)(p) << PAGESHIFT) 890Sstevel@tonic-gate #define _b64top(b) ((pgcnt_t)((b) >> PAGESHIFT)) 900Sstevel@tonic-gate 910Sstevel@tonic-gate static struct memlist * 920Sstevel@tonic-gate dr_get_memlist(dr_mem_unit_t *mp) 930Sstevel@tonic-gate { 940Sstevel@tonic-gate struct memlist *mlist = NULL; 950Sstevel@tonic-gate sbd_error_t *err; 960Sstevel@tonic-gate static fn_t f = "dr_get_memlist"; 970Sstevel@tonic-gate 980Sstevel@tonic-gate PR_MEM("%s for %s...\n", f, mp->sbm_cm.sbdev_path); 990Sstevel@tonic-gate 1000Sstevel@tonic-gate /* 1010Sstevel@tonic-gate * Return cached memlist, if present. 1020Sstevel@tonic-gate * This memlist will be present following an 1030Sstevel@tonic-gate * unconfigure (a.k.a: detach) of this memunit. 1040Sstevel@tonic-gate * It should only be used in the case were a configure 1050Sstevel@tonic-gate * is bringing this memunit back in without going 1060Sstevel@tonic-gate * through the disconnect and connect states. 1070Sstevel@tonic-gate */ 1080Sstevel@tonic-gate if (mp->sbm_mlist) { 1090Sstevel@tonic-gate PR_MEM("%s: found cached memlist\n", f); 1100Sstevel@tonic-gate 1110Sstevel@tonic-gate mlist = memlist_dup(mp->sbm_mlist); 1120Sstevel@tonic-gate } else { 1130Sstevel@tonic-gate uint64_t basepa = _ptob64(mp->sbm_basepfn); 1140Sstevel@tonic-gate 1150Sstevel@tonic-gate /* attempt to construct a memlist using phys_install */ 1160Sstevel@tonic-gate 1170Sstevel@tonic-gate /* round down to slice base address */ 1180Sstevel@tonic-gate basepa &= ~(mp->sbm_slice_size - 1); 1190Sstevel@tonic-gate 1200Sstevel@tonic-gate /* get a copy of phys_install to edit */ 1210Sstevel@tonic-gate memlist_read_lock(); 1220Sstevel@tonic-gate mlist = memlist_dup(phys_install); 1230Sstevel@tonic-gate memlist_read_unlock(); 1240Sstevel@tonic-gate 1250Sstevel@tonic-gate /* trim lower irrelevant span */ 1260Sstevel@tonic-gate if (mlist) 1270Sstevel@tonic-gate mlist = memlist_del_span(mlist, 0ull, basepa); 1280Sstevel@tonic-gate 1290Sstevel@tonic-gate /* trim upper irrelevant span */ 1300Sstevel@tonic-gate if (mlist) { 1310Sstevel@tonic-gate uint64_t endpa; 1320Sstevel@tonic-gate 1330Sstevel@tonic-gate basepa += mp->sbm_slice_size; 1340Sstevel@tonic-gate endpa = _ptob64(physmax + 1); 1350Sstevel@tonic-gate if (endpa > basepa) 1360Sstevel@tonic-gate mlist = memlist_del_span( 1370Sstevel@tonic-gate mlist, 1380Sstevel@tonic-gate basepa, 1390Sstevel@tonic-gate endpa - basepa); 1400Sstevel@tonic-gate } 1410Sstevel@tonic-gate 1420Sstevel@tonic-gate if (mlist) { 1430Sstevel@tonic-gate /* successfully built a memlist */ 1440Sstevel@tonic-gate PR_MEM("%s: derived memlist from phys_install\n", f); 1450Sstevel@tonic-gate } 1460Sstevel@tonic-gate 1470Sstevel@tonic-gate /* if no mlist yet, try platform layer */ 1480Sstevel@tonic-gate if (!mlist) { 1490Sstevel@tonic-gate err = drmach_mem_get_memlist( 1500Sstevel@tonic-gate mp->sbm_cm.sbdev_id, &mlist); 1510Sstevel@tonic-gate if (err) { 1520Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 1530Sstevel@tonic-gate mlist = NULL; /* paranoia */ 1540Sstevel@tonic-gate } 1550Sstevel@tonic-gate } 1560Sstevel@tonic-gate } 1570Sstevel@tonic-gate 1580Sstevel@tonic-gate PR_MEM("%s: memlist for %s\n", f, mp->sbm_cm.sbdev_path); 1590Sstevel@tonic-gate PR_MEMLIST_DUMP(mlist); 1600Sstevel@tonic-gate 1610Sstevel@tonic-gate return (mlist); 1620Sstevel@tonic-gate } 1630Sstevel@tonic-gate 1640Sstevel@tonic-gate typedef struct { 1650Sstevel@tonic-gate kcondvar_t cond; 1660Sstevel@tonic-gate kmutex_t lock; 1670Sstevel@tonic-gate int error; 1680Sstevel@tonic-gate int done; 1690Sstevel@tonic-gate } dr_release_mem_sync_t; 1700Sstevel@tonic-gate 1710Sstevel@tonic-gate /* 1720Sstevel@tonic-gate * Memory has been logically removed by the time this routine is called. 1730Sstevel@tonic-gate */ 1740Sstevel@tonic-gate static void 1750Sstevel@tonic-gate dr_mem_del_done(void *arg, int error) 1760Sstevel@tonic-gate { 1770Sstevel@tonic-gate dr_release_mem_sync_t *ds = arg; 1780Sstevel@tonic-gate 1790Sstevel@tonic-gate mutex_enter(&ds->lock); 1800Sstevel@tonic-gate ds->error = error; 1810Sstevel@tonic-gate ds->done = 1; 1820Sstevel@tonic-gate cv_signal(&ds->cond); 1830Sstevel@tonic-gate mutex_exit(&ds->lock); 1840Sstevel@tonic-gate } 1850Sstevel@tonic-gate 1860Sstevel@tonic-gate /* 1870Sstevel@tonic-gate * When we reach here the memory being drained should have 1880Sstevel@tonic-gate * already been reserved in dr_pre_release_mem(). 1890Sstevel@tonic-gate * Our only task here is to kick off the "drain" and wait 1900Sstevel@tonic-gate * for it to finish. 1910Sstevel@tonic-gate */ 1920Sstevel@tonic-gate void 1930Sstevel@tonic-gate dr_release_mem(dr_common_unit_t *cp) 1940Sstevel@tonic-gate { 1950Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)cp; 1960Sstevel@tonic-gate int err; 1970Sstevel@tonic-gate dr_release_mem_sync_t rms; 1980Sstevel@tonic-gate static fn_t f = "dr_release_mem"; 1990Sstevel@tonic-gate 2000Sstevel@tonic-gate /* check that this memory unit has been reserved */ 2010Sstevel@tonic-gate if (!(mp->sbm_flags & DR_MFLAG_RELOWNER)) { 2020Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 2030Sstevel@tonic-gate return; 2040Sstevel@tonic-gate } 2050Sstevel@tonic-gate 2060Sstevel@tonic-gate bzero((void *) &rms, sizeof (rms)); 2070Sstevel@tonic-gate 2080Sstevel@tonic-gate mutex_init(&rms.lock, NULL, MUTEX_DRIVER, NULL); 2090Sstevel@tonic-gate cv_init(&rms.cond, NULL, CV_DRIVER, NULL); 2100Sstevel@tonic-gate 2110Sstevel@tonic-gate mutex_enter(&rms.lock); 2120Sstevel@tonic-gate err = kphysm_del_start(mp->sbm_memhandle, 2130Sstevel@tonic-gate dr_mem_del_done, (void *) &rms); 2140Sstevel@tonic-gate if (err == KPHYSM_OK) { 2150Sstevel@tonic-gate /* wait for completion or interrupt */ 2160Sstevel@tonic-gate while (!rms.done) { 2170Sstevel@tonic-gate if (cv_wait_sig(&rms.cond, &rms.lock) == 0) { 2180Sstevel@tonic-gate /* then there is a pending UNIX signal */ 2190Sstevel@tonic-gate (void) kphysm_del_cancel(mp->sbm_memhandle); 2200Sstevel@tonic-gate 2210Sstevel@tonic-gate /* wait for completion */ 2220Sstevel@tonic-gate while (!rms.done) 2230Sstevel@tonic-gate cv_wait(&rms.cond, &rms.lock); 2240Sstevel@tonic-gate } 2250Sstevel@tonic-gate } 2260Sstevel@tonic-gate /* get the result of the memory delete operation */ 2270Sstevel@tonic-gate err = rms.error; 2280Sstevel@tonic-gate } 2290Sstevel@tonic-gate mutex_exit(&rms.lock); 2300Sstevel@tonic-gate 2310Sstevel@tonic-gate cv_destroy(&rms.cond); 2320Sstevel@tonic-gate mutex_destroy(&rms.lock); 2330Sstevel@tonic-gate 2340Sstevel@tonic-gate if (err != KPHYSM_OK) { 2350Sstevel@tonic-gate int e_code; 2360Sstevel@tonic-gate 2370Sstevel@tonic-gate switch (err) { 2380Sstevel@tonic-gate case KPHYSM_ENOWORK: 2390Sstevel@tonic-gate e_code = ESBD_NOERROR; 2400Sstevel@tonic-gate break; 2410Sstevel@tonic-gate 2420Sstevel@tonic-gate case KPHYSM_EHANDLE: 2430Sstevel@tonic-gate case KPHYSM_ESEQUENCE: 2440Sstevel@tonic-gate e_code = ESBD_INTERNAL; 2450Sstevel@tonic-gate break; 2460Sstevel@tonic-gate 2470Sstevel@tonic-gate case KPHYSM_ENOTVIABLE: 2480Sstevel@tonic-gate e_code = ESBD_MEM_NOTVIABLE; 2490Sstevel@tonic-gate break; 2500Sstevel@tonic-gate 2510Sstevel@tonic-gate case KPHYSM_EREFUSED: 2520Sstevel@tonic-gate e_code = ESBD_MEM_REFUSED; 2530Sstevel@tonic-gate break; 2540Sstevel@tonic-gate 2550Sstevel@tonic-gate case KPHYSM_ENONRELOC: 2560Sstevel@tonic-gate e_code = ESBD_MEM_NONRELOC; 2570Sstevel@tonic-gate break; 2580Sstevel@tonic-gate 2590Sstevel@tonic-gate case KPHYSM_ECANCELLED: 2600Sstevel@tonic-gate e_code = ESBD_MEM_CANCELLED; 2610Sstevel@tonic-gate break; 2620Sstevel@tonic-gate 2630Sstevel@tonic-gate case KPHYSM_ERESOURCE: 2640Sstevel@tonic-gate e_code = ESBD_MEMFAIL; 2650Sstevel@tonic-gate break; 2660Sstevel@tonic-gate 2670Sstevel@tonic-gate default: 2680Sstevel@tonic-gate cmn_err(CE_WARN, 2690Sstevel@tonic-gate "%s: unexpected kphysm error code %d," 2700Sstevel@tonic-gate " id 0x%p", 2710Sstevel@tonic-gate f, err, mp->sbm_cm.sbdev_id); 2720Sstevel@tonic-gate 2730Sstevel@tonic-gate e_code = ESBD_IO; 2740Sstevel@tonic-gate break; 2750Sstevel@tonic-gate } 2760Sstevel@tonic-gate 2770Sstevel@tonic-gate if (e_code != ESBD_NOERROR) { 2780Sstevel@tonic-gate dr_dev_err(CE_IGNORE, &mp->sbm_cm, e_code); 2790Sstevel@tonic-gate } 2800Sstevel@tonic-gate } 2810Sstevel@tonic-gate } 2820Sstevel@tonic-gate 2830Sstevel@tonic-gate void 2840Sstevel@tonic-gate dr_attach_mem(dr_handle_t *hp, dr_common_unit_t *cp) 2850Sstevel@tonic-gate { 2860Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 2870Sstevel@tonic-gate 2880Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)cp; 2890Sstevel@tonic-gate struct memlist *ml, *mc; 2900Sstevel@tonic-gate sbd_error_t *err; 2910Sstevel@tonic-gate static fn_t f = "dr_attach_mem"; 2920Sstevel@tonic-gate 2930Sstevel@tonic-gate PR_MEM("%s...\n", f); 2940Sstevel@tonic-gate 2950Sstevel@tonic-gate dr_lock_status(hp->h_bd); 2960Sstevel@tonic-gate err = drmach_configure(cp->sbdev_id, 0); 2970Sstevel@tonic-gate dr_unlock_status(hp->h_bd); 2980Sstevel@tonic-gate if (err) { 2990Sstevel@tonic-gate DRERR_SET_C(&cp->sbdev_error, &err); 3000Sstevel@tonic-gate return; 3010Sstevel@tonic-gate } 3020Sstevel@tonic-gate 3030Sstevel@tonic-gate ml = dr_get_memlist(mp); 3040Sstevel@tonic-gate for (mc = ml; mc; mc = mc->next) { 3050Sstevel@tonic-gate int rv; 3060Sstevel@tonic-gate sbd_error_t *err; 3070Sstevel@tonic-gate 3080Sstevel@tonic-gate rv = kphysm_add_memory_dynamic( 3090Sstevel@tonic-gate (pfn_t)(mc->address >> PAGESHIFT), 3100Sstevel@tonic-gate (pgcnt_t)(mc->size >> PAGESHIFT)); 3110Sstevel@tonic-gate if (rv != KPHYSM_OK) { 3120Sstevel@tonic-gate /* 3130Sstevel@tonic-gate * translate kphysm error and 3140Sstevel@tonic-gate * store in devlist error 3150Sstevel@tonic-gate */ 3160Sstevel@tonic-gate switch (rv) { 3170Sstevel@tonic-gate case KPHYSM_ERESOURCE: 3180Sstevel@tonic-gate rv = ESBD_NOMEM; 3190Sstevel@tonic-gate break; 3200Sstevel@tonic-gate 3210Sstevel@tonic-gate case KPHYSM_EFAULT: 3220Sstevel@tonic-gate rv = ESBD_FAULT; 3230Sstevel@tonic-gate break; 3240Sstevel@tonic-gate 3250Sstevel@tonic-gate default: 3260Sstevel@tonic-gate rv = ESBD_INTERNAL; 3270Sstevel@tonic-gate break; 3280Sstevel@tonic-gate } 3290Sstevel@tonic-gate 3300Sstevel@tonic-gate if (rv == ESBD_INTERNAL) { 3310Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 3320Sstevel@tonic-gate } else 3330Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, rv); 3340Sstevel@tonic-gate break; 3350Sstevel@tonic-gate } 3360Sstevel@tonic-gate 3370Sstevel@tonic-gate err = drmach_mem_add_span( 3380Sstevel@tonic-gate mp->sbm_cm.sbdev_id, mc->address, mc->size); 3390Sstevel@tonic-gate if (err) { 3400Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 3410Sstevel@tonic-gate break; 3420Sstevel@tonic-gate } 3430Sstevel@tonic-gate } 3440Sstevel@tonic-gate 3450Sstevel@tonic-gate memlist_delete(ml); 3460Sstevel@tonic-gate 3470Sstevel@tonic-gate /* back out if configure failed */ 3480Sstevel@tonic-gate if (mp->sbm_cm.sbdev_error != NULL) { 3490Sstevel@tonic-gate dr_lock_status(hp->h_bd); 3501772Sjl139090 err = drmach_unconfigure(cp->sbdev_id, 3511772Sjl139090 DEVI_BRANCH_DESTROY); 3520Sstevel@tonic-gate if (err) 3530Sstevel@tonic-gate sbd_err_clear(&err); 3540Sstevel@tonic-gate dr_unlock_status(hp->h_bd); 3550Sstevel@tonic-gate } 3560Sstevel@tonic-gate } 3570Sstevel@tonic-gate 3580Sstevel@tonic-gate #define DR_SCRUB_VALUE 0x0d0e0a0d0b0e0e0fULL 3590Sstevel@tonic-gate 3600Sstevel@tonic-gate static void 3610Sstevel@tonic-gate dr_mem_ecache_scrub(dr_mem_unit_t *mp, struct memlist *mlist) 3620Sstevel@tonic-gate { 3630Sstevel@tonic-gate #ifdef DEBUG 3640Sstevel@tonic-gate clock_t stime = lbolt; 3650Sstevel@tonic-gate #endif /* DEBUG */ 3660Sstevel@tonic-gate 3670Sstevel@tonic-gate struct memlist *ml; 3680Sstevel@tonic-gate uint64_t scrub_value = DR_SCRUB_VALUE; 3690Sstevel@tonic-gate processorid_t cpuid; 3700Sstevel@tonic-gate static fn_t f = "dr_mem_ecache_scrub"; 3710Sstevel@tonic-gate 3720Sstevel@tonic-gate cpuid = drmach_mem_cpu_affinity(mp->sbm_cm.sbdev_id); 3730Sstevel@tonic-gate affinity_set(cpuid); 3740Sstevel@tonic-gate 3750Sstevel@tonic-gate PR_MEM("%s: using proc %d, memlist...\n", f, 3760Sstevel@tonic-gate (cpuid == CPU_CURRENT) ? CPU->cpu_id : cpuid); 3770Sstevel@tonic-gate PR_MEMLIST_DUMP(mlist); 3780Sstevel@tonic-gate 3790Sstevel@tonic-gate for (ml = mlist; ml; ml = ml->next) { 3800Sstevel@tonic-gate uint64_t dst_pa; 3810Sstevel@tonic-gate uint64_t nbytes; 3820Sstevel@tonic-gate 3830Sstevel@tonic-gate /* calculate the destination physical address */ 3840Sstevel@tonic-gate dst_pa = ml->address; 3850Sstevel@tonic-gate if (ml->address & PAGEOFFSET) 3860Sstevel@tonic-gate cmn_err(CE_WARN, 387930Smathue "%s: address (0x%lx) not on " 3880Sstevel@tonic-gate "page boundary", f, ml->address); 3890Sstevel@tonic-gate 3900Sstevel@tonic-gate nbytes = ml->size; 3910Sstevel@tonic-gate if (ml->size & PAGEOFFSET) 3920Sstevel@tonic-gate cmn_err(CE_WARN, 393930Smathue "%s: size (0x%lx) not on " 3940Sstevel@tonic-gate "page boundary", f, ml->size); 3950Sstevel@tonic-gate 3960Sstevel@tonic-gate /*LINTED*/ 3970Sstevel@tonic-gate while (nbytes > 0) { 3980Sstevel@tonic-gate /* write 64 bits to dst_pa */ 3990Sstevel@tonic-gate stdphys(dst_pa, scrub_value); 4000Sstevel@tonic-gate 4010Sstevel@tonic-gate /* increment/decrement by cacheline sizes */ 4020Sstevel@tonic-gate dst_pa += DRMACH_COHERENCY_UNIT; 4030Sstevel@tonic-gate nbytes -= DRMACH_COHERENCY_UNIT; 4040Sstevel@tonic-gate } 4050Sstevel@tonic-gate } 4060Sstevel@tonic-gate 4070Sstevel@tonic-gate /* 4080Sstevel@tonic-gate * flush this cpu's ecache and take care to ensure 4090Sstevel@tonic-gate * that all of it's bus transactions have retired. 4100Sstevel@tonic-gate */ 4110Sstevel@tonic-gate drmach_cpu_flush_ecache_sync(); 4120Sstevel@tonic-gate 4130Sstevel@tonic-gate affinity_clear(); 4140Sstevel@tonic-gate 4150Sstevel@tonic-gate #ifdef DEBUG 4160Sstevel@tonic-gate stime = lbolt - stime; 4170Sstevel@tonic-gate PR_MEM("%s: scrub ticks = %ld (%ld secs)\n", f, stime, stime / hz); 4180Sstevel@tonic-gate #endif /* DEBUG */ 4190Sstevel@tonic-gate } 4200Sstevel@tonic-gate 4210Sstevel@tonic-gate static int 4220Sstevel@tonic-gate dr_move_memory(dr_handle_t *hp, dr_mem_unit_t *s_mp, dr_mem_unit_t *t_mp) 4230Sstevel@tonic-gate { 4240Sstevel@tonic-gate time_t copytime; 4250Sstevel@tonic-gate drmachid_t cr_id; 4260Sstevel@tonic-gate dr_sr_handle_t *srhp; 427917Selowe struct memlist *c_ml, *d_ml; 4280Sstevel@tonic-gate sbd_error_t *err; 4290Sstevel@tonic-gate static fn_t f = "dr_move_memory"; 4300Sstevel@tonic-gate 4310Sstevel@tonic-gate PR_MEM("%s: (INLINE) moving memory from %s to %s\n", 4320Sstevel@tonic-gate f, 4330Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, 4340Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path); 4350Sstevel@tonic-gate 4360Sstevel@tonic-gate ASSERT(s_mp->sbm_flags & DR_MFLAG_SOURCE); 4370Sstevel@tonic-gate ASSERT(s_mp->sbm_peer == t_mp); 4380Sstevel@tonic-gate ASSERT(s_mp->sbm_mlist); 4390Sstevel@tonic-gate 4400Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_TARGET); 4410Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 4420Sstevel@tonic-gate 4430Sstevel@tonic-gate /* 4440Sstevel@tonic-gate * create a memlist of spans to copy by removing 4450Sstevel@tonic-gate * the spans that have been deleted, if any, from 4460Sstevel@tonic-gate * the full source board memlist. s_mp->sbm_del_mlist 4470Sstevel@tonic-gate * will be NULL if there were no spans deleted from 4480Sstevel@tonic-gate * the source board. 4490Sstevel@tonic-gate */ 4500Sstevel@tonic-gate c_ml = memlist_dup(s_mp->sbm_mlist); 4510Sstevel@tonic-gate d_ml = s_mp->sbm_del_mlist; 4520Sstevel@tonic-gate while (d_ml != NULL) { 4530Sstevel@tonic-gate c_ml = memlist_del_span(c_ml, d_ml->address, d_ml->size); 4540Sstevel@tonic-gate d_ml = d_ml->next; 4550Sstevel@tonic-gate } 4560Sstevel@tonic-gate 4570Sstevel@tonic-gate affinity_set(drmach_mem_cpu_affinity(t_mp->sbm_cm.sbdev_id)); 4580Sstevel@tonic-gate 4590Sstevel@tonic-gate err = drmach_copy_rename_init( 4600Sstevel@tonic-gate t_mp->sbm_cm.sbdev_id, _ptob64(t_mp->sbm_slice_offset), 4610Sstevel@tonic-gate s_mp->sbm_cm.sbdev_id, c_ml, &cr_id); 4620Sstevel@tonic-gate if (err) { 4630Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 4640Sstevel@tonic-gate affinity_clear(); 4650Sstevel@tonic-gate return (-1); 4660Sstevel@tonic-gate } 4670Sstevel@tonic-gate 4680Sstevel@tonic-gate srhp = dr_get_sr_handle(hp); 4690Sstevel@tonic-gate ASSERT(srhp); 4700Sstevel@tonic-gate 4710Sstevel@tonic-gate copytime = lbolt; 4720Sstevel@tonic-gate 4730Sstevel@tonic-gate /* Quiesce the OS. */ 4740Sstevel@tonic-gate if (dr_suspend(srhp)) { 4750Sstevel@tonic-gate cmn_err(CE_WARN, "%s: failed to quiesce OS" 4760Sstevel@tonic-gate " for copy-rename", f); 4770Sstevel@tonic-gate 4780Sstevel@tonic-gate dr_release_sr_handle(srhp); 4790Sstevel@tonic-gate err = drmach_copy_rename_fini(cr_id); 4800Sstevel@tonic-gate if (err) { 4810Sstevel@tonic-gate /* 4820Sstevel@tonic-gate * no error is expected since the program has 4830Sstevel@tonic-gate * not yet run. 4840Sstevel@tonic-gate */ 4850Sstevel@tonic-gate 4860Sstevel@tonic-gate /* catch this in debug kernels */ 4870Sstevel@tonic-gate ASSERT(0); 4880Sstevel@tonic-gate 4890Sstevel@tonic-gate sbd_err_clear(&err); 4900Sstevel@tonic-gate } 4910Sstevel@tonic-gate 4920Sstevel@tonic-gate /* suspend error reached via hp */ 4930Sstevel@tonic-gate s_mp->sbm_cm.sbdev_error = hp->h_err; 4940Sstevel@tonic-gate hp->h_err = NULL; 4950Sstevel@tonic-gate 4960Sstevel@tonic-gate affinity_clear(); 4970Sstevel@tonic-gate return (-1); 4980Sstevel@tonic-gate } 4990Sstevel@tonic-gate 5000Sstevel@tonic-gate /* 5010Sstevel@tonic-gate * Rename memory for lgroup. 5020Sstevel@tonic-gate * Source and target board numbers are packaged in arg. 5030Sstevel@tonic-gate */ 5040Sstevel@tonic-gate { 5050Sstevel@tonic-gate dr_board_t *t_bp, *s_bp; 5060Sstevel@tonic-gate 5070Sstevel@tonic-gate s_bp = s_mp->sbm_cm.sbdev_bp; 5080Sstevel@tonic-gate t_bp = t_mp->sbm_cm.sbdev_bp; 5090Sstevel@tonic-gate 5100Sstevel@tonic-gate lgrp_plat_config(LGRP_CONFIG_MEM_RENAME, 5110Sstevel@tonic-gate (uintptr_t)(s_bp->b_num | (t_bp->b_num << 16))); 5120Sstevel@tonic-gate } 5130Sstevel@tonic-gate 5140Sstevel@tonic-gate drmach_copy_rename(cr_id); 5150Sstevel@tonic-gate 5160Sstevel@tonic-gate /* Resume the OS. */ 5170Sstevel@tonic-gate dr_resume(srhp); 5180Sstevel@tonic-gate 5190Sstevel@tonic-gate copytime = lbolt - copytime; 5200Sstevel@tonic-gate 5210Sstevel@tonic-gate dr_release_sr_handle(srhp); 5220Sstevel@tonic-gate err = drmach_copy_rename_fini(cr_id); 5230Sstevel@tonic-gate if (err) 5240Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 5250Sstevel@tonic-gate 5260Sstevel@tonic-gate affinity_clear(); 5270Sstevel@tonic-gate 5280Sstevel@tonic-gate PR_MEM("%s: copy-rename elapsed time = %ld ticks (%ld secs)\n", 5290Sstevel@tonic-gate f, copytime, copytime / hz); 5300Sstevel@tonic-gate 5310Sstevel@tonic-gate /* return -1 if dr_suspend or copy/rename recorded an error */ 5320Sstevel@tonic-gate return (err == NULL ? 0 : -1); 5330Sstevel@tonic-gate } 5340Sstevel@tonic-gate 5350Sstevel@tonic-gate /* 5360Sstevel@tonic-gate * If detaching node contains memory that is "non-permanent" 5370Sstevel@tonic-gate * then the memory adr's are simply cleared. If the memory 5380Sstevel@tonic-gate * is non-relocatable, then do a copy-rename. 5390Sstevel@tonic-gate */ 5400Sstevel@tonic-gate void 5410Sstevel@tonic-gate dr_detach_mem(dr_handle_t *hp, dr_common_unit_t *cp) 5420Sstevel@tonic-gate { 5430Sstevel@tonic-gate int rv = 0; 5440Sstevel@tonic-gate dr_mem_unit_t *s_mp = (dr_mem_unit_t *)cp; 5450Sstevel@tonic-gate dr_mem_unit_t *t_mp; 5460Sstevel@tonic-gate dr_state_t state; 5470Sstevel@tonic-gate static fn_t f = "dr_detach_mem"; 5480Sstevel@tonic-gate 5490Sstevel@tonic-gate PR_MEM("%s...\n", f); 5500Sstevel@tonic-gate 5510Sstevel@tonic-gate /* lookup target mem unit and target board structure, if any */ 5520Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_SOURCE) { 5530Sstevel@tonic-gate t_mp = s_mp->sbm_peer; 5540Sstevel@tonic-gate ASSERT(t_mp != NULL); 5550Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 5560Sstevel@tonic-gate } else { 5570Sstevel@tonic-gate t_mp = NULL; 5580Sstevel@tonic-gate } 5590Sstevel@tonic-gate 5600Sstevel@tonic-gate /* verify mem unit's state is UNREFERENCED */ 5610Sstevel@tonic-gate state = s_mp->sbm_cm.sbdev_state; 5620Sstevel@tonic-gate if (state != DR_STATE_UNREFERENCED) { 5630Sstevel@tonic-gate dr_dev_err(CE_IGNORE, &s_mp->sbm_cm, ESBD_STATE); 5640Sstevel@tonic-gate return; 5650Sstevel@tonic-gate } 5660Sstevel@tonic-gate 5670Sstevel@tonic-gate /* verify target mem unit's state is UNREFERENCED, if any */ 5680Sstevel@tonic-gate if (t_mp != NULL) { 5690Sstevel@tonic-gate state = t_mp->sbm_cm.sbdev_state; 5700Sstevel@tonic-gate if (state != DR_STATE_UNREFERENCED) { 5710Sstevel@tonic-gate dr_dev_err(CE_IGNORE, &t_mp->sbm_cm, ESBD_STATE); 5720Sstevel@tonic-gate return; 5730Sstevel@tonic-gate } 5740Sstevel@tonic-gate } 5750Sstevel@tonic-gate 5760Sstevel@tonic-gate /* 5770Sstevel@tonic-gate * Scrub deleted memory. This will cause all cachelines 5780Sstevel@tonic-gate * referencing the memory to only be in the local cpu's 5790Sstevel@tonic-gate * ecache. 5800Sstevel@tonic-gate */ 5810Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_RELDONE) { 5820Sstevel@tonic-gate /* no del mlist for src<=dst mem size copy/rename */ 5830Sstevel@tonic-gate if (s_mp->sbm_del_mlist) 5840Sstevel@tonic-gate dr_mem_ecache_scrub(s_mp, s_mp->sbm_del_mlist); 5850Sstevel@tonic-gate } 5860Sstevel@tonic-gate if (t_mp != NULL && (t_mp->sbm_flags & DR_MFLAG_RELDONE)) { 5870Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist); 5880Sstevel@tonic-gate dr_mem_ecache_scrub(t_mp, t_mp->sbm_del_mlist); 5890Sstevel@tonic-gate } 5900Sstevel@tonic-gate 5910Sstevel@tonic-gate /* 5920Sstevel@tonic-gate * If there is no target board (no copy/rename was needed), then 5930Sstevel@tonic-gate * we're done! 5940Sstevel@tonic-gate */ 5950Sstevel@tonic-gate if (t_mp == NULL) { 5960Sstevel@tonic-gate sbd_error_t *err; 5970Sstevel@tonic-gate /* 5980Sstevel@tonic-gate * Reprogram interconnect hardware and disable 5990Sstevel@tonic-gate * memory controllers for memory node that's going away. 6000Sstevel@tonic-gate */ 6010Sstevel@tonic-gate 6020Sstevel@tonic-gate err = drmach_mem_disable(s_mp->sbm_cm.sbdev_id); 6030Sstevel@tonic-gate if (err) { 6040Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 6050Sstevel@tonic-gate rv = -1; 6060Sstevel@tonic-gate } 6070Sstevel@tonic-gate } else { 6080Sstevel@tonic-gate rv = dr_move_memory(hp, s_mp, t_mp); 6090Sstevel@tonic-gate PR_MEM("%s: %s memory COPY-RENAME (board %d -> %d)\n", 6100Sstevel@tonic-gate f, 6110Sstevel@tonic-gate rv ? "FAILED" : "COMPLETED", 6120Sstevel@tonic-gate s_mp->sbm_cm.sbdev_bp->b_num, 6130Sstevel@tonic-gate t_mp->sbm_cm.sbdev_bp->b_num); 6140Sstevel@tonic-gate 6150Sstevel@tonic-gate if (rv != 0) 6160Sstevel@tonic-gate (void) dr_cancel_mem(s_mp); 6170Sstevel@tonic-gate } 6180Sstevel@tonic-gate 6190Sstevel@tonic-gate if (rv == 0) { 6200Sstevel@tonic-gate sbd_error_t *err; 6210Sstevel@tonic-gate 6220Sstevel@tonic-gate dr_lock_status(hp->h_bd); 6230Sstevel@tonic-gate err = drmach_unconfigure(s_mp->sbm_cm.sbdev_id, 6241772Sjl139090 DEVI_BRANCH_DESTROY); 6250Sstevel@tonic-gate dr_unlock_status(hp->h_bd); 6260Sstevel@tonic-gate if (err) 6270Sstevel@tonic-gate sbd_err_clear(&err); 6280Sstevel@tonic-gate } 6290Sstevel@tonic-gate } 6300Sstevel@tonic-gate 6310Sstevel@tonic-gate #ifndef _STARFIRE 6320Sstevel@tonic-gate /* 6330Sstevel@tonic-gate * XXX workaround for certain lab configurations (see also starcat drmach.c) 6340Sstevel@tonic-gate * Temporary code to get around observed incorrect results from 6350Sstevel@tonic-gate * kphysm_del_span_query when the queried span contains address spans 6360Sstevel@tonic-gate * not occupied by memory in between spans that do have memory. 6370Sstevel@tonic-gate * This routine acts as a wrapper to kphysm_del_span_query. It builds 6380Sstevel@tonic-gate * a memlist from phys_install of spans that exist between base and 6390Sstevel@tonic-gate * base + npages, inclusively. Kphysm_del_span_query is called for each 6400Sstevel@tonic-gate * node in the memlist with the results accumulated in *mp. 6410Sstevel@tonic-gate */ 6420Sstevel@tonic-gate static int 6430Sstevel@tonic-gate dr_del_span_query(pfn_t base, pgcnt_t npages, memquery_t *mp) 6440Sstevel@tonic-gate { 6450Sstevel@tonic-gate uint64_t pa = _ptob64(base); 6460Sstevel@tonic-gate uint64_t sm = ~ (137438953472ull - 1); 6470Sstevel@tonic-gate uint64_t sa = pa & sm; 6480Sstevel@tonic-gate struct memlist *mlist, *ml; 6490Sstevel@tonic-gate int rv; 6500Sstevel@tonic-gate 6510Sstevel@tonic-gate npages = npages; /* silence lint */ 6520Sstevel@tonic-gate memlist_read_lock(); 6530Sstevel@tonic-gate mlist = memlist_dup(phys_install); 6540Sstevel@tonic-gate memlist_read_unlock(); 6550Sstevel@tonic-gate 6560Sstevel@tonic-gate again: 6570Sstevel@tonic-gate for (ml = mlist; ml; ml = ml->next) { 6580Sstevel@tonic-gate if ((ml->address & sm) != sa) { 6591772Sjl139090 mlist = memlist_del_span(mlist, 6601772Sjl139090 ml->address, ml->size); 6610Sstevel@tonic-gate goto again; 6620Sstevel@tonic-gate } 6630Sstevel@tonic-gate } 6640Sstevel@tonic-gate 6650Sstevel@tonic-gate mp->phys_pages = 0; 6660Sstevel@tonic-gate mp->managed = 0; 6670Sstevel@tonic-gate mp->nonrelocatable = 0; 6680Sstevel@tonic-gate mp->first_nonrelocatable = (pfn_t)-1; /* XXX */ 6690Sstevel@tonic-gate mp->last_nonrelocatable = 0; 6700Sstevel@tonic-gate 6710Sstevel@tonic-gate for (ml = mlist; ml; ml = ml->next) { 6720Sstevel@tonic-gate memquery_t mq; 6730Sstevel@tonic-gate 6740Sstevel@tonic-gate rv = kphysm_del_span_query( 6750Sstevel@tonic-gate _b64top(ml->address), _b64top(ml->size), &mq); 6760Sstevel@tonic-gate if (rv) 6770Sstevel@tonic-gate break; 6780Sstevel@tonic-gate 6790Sstevel@tonic-gate mp->phys_pages += mq.phys_pages; 6800Sstevel@tonic-gate mp->managed += mq.managed; 6810Sstevel@tonic-gate mp->nonrelocatable += mq.nonrelocatable; 6820Sstevel@tonic-gate 6830Sstevel@tonic-gate if (mq.nonrelocatable != 0) { 6840Sstevel@tonic-gate if (mq.first_nonrelocatable < mp->first_nonrelocatable) 6850Sstevel@tonic-gate mp->first_nonrelocatable = 6860Sstevel@tonic-gate mq.first_nonrelocatable; 6870Sstevel@tonic-gate if (mq.last_nonrelocatable > mp->last_nonrelocatable) 6880Sstevel@tonic-gate mp->last_nonrelocatable = 6890Sstevel@tonic-gate mq.last_nonrelocatable; 6900Sstevel@tonic-gate } 6910Sstevel@tonic-gate } 6920Sstevel@tonic-gate 6930Sstevel@tonic-gate if (mp->nonrelocatable == 0) 6940Sstevel@tonic-gate mp->first_nonrelocatable = 0; /* XXX */ 6950Sstevel@tonic-gate 6960Sstevel@tonic-gate memlist_delete(mlist); 6970Sstevel@tonic-gate return (rv); 6980Sstevel@tonic-gate } 6990Sstevel@tonic-gate 7000Sstevel@tonic-gate #define kphysm_del_span_query dr_del_span_query 7010Sstevel@tonic-gate #endif /* _STARFIRE */ 7020Sstevel@tonic-gate 7030Sstevel@tonic-gate /* 7040Sstevel@tonic-gate * NOTE: This routine is only partially smart about multiple 7050Sstevel@tonic-gate * mem-units. Need to make mem-status structure smart 7060Sstevel@tonic-gate * about them also. 7070Sstevel@tonic-gate */ 7080Sstevel@tonic-gate int 7090Sstevel@tonic-gate dr_mem_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp) 7100Sstevel@tonic-gate { 7110Sstevel@tonic-gate int m, mix; 7120Sstevel@tonic-gate memdelstat_t mdst; 7130Sstevel@tonic-gate memquery_t mq; 7140Sstevel@tonic-gate dr_board_t *bp; 7150Sstevel@tonic-gate dr_mem_unit_t *mp; 7160Sstevel@tonic-gate sbd_mem_stat_t *msp; 7170Sstevel@tonic-gate static fn_t f = "dr_mem_status"; 7180Sstevel@tonic-gate 7190Sstevel@tonic-gate bp = hp->h_bd; 7200Sstevel@tonic-gate devset &= DR_DEVS_PRESENT(bp); 7210Sstevel@tonic-gate 7220Sstevel@tonic-gate for (m = mix = 0; m < MAX_MEM_UNITS_PER_BOARD; m++) { 7230Sstevel@tonic-gate int rv; 7240Sstevel@tonic-gate sbd_error_t *err; 7250Sstevel@tonic-gate drmach_status_t pstat; 7260Sstevel@tonic-gate dr_mem_unit_t *p_mp; 7270Sstevel@tonic-gate 7280Sstevel@tonic-gate if (DEVSET_IN_SET(devset, SBD_COMP_MEM, m) == 0) 7290Sstevel@tonic-gate continue; 7300Sstevel@tonic-gate 7310Sstevel@tonic-gate mp = dr_get_mem_unit(bp, m); 7320Sstevel@tonic-gate 7330Sstevel@tonic-gate if (mp->sbm_cm.sbdev_state == DR_STATE_EMPTY) { 7340Sstevel@tonic-gate /* present, but not fully initialized */ 7350Sstevel@tonic-gate continue; 7360Sstevel@tonic-gate } 7370Sstevel@tonic-gate 7380Sstevel@tonic-gate if (mp->sbm_cm.sbdev_id == (drmachid_t)0) 7390Sstevel@tonic-gate continue; 7400Sstevel@tonic-gate 7410Sstevel@tonic-gate /* fetch platform status */ 7420Sstevel@tonic-gate err = drmach_status(mp->sbm_cm.sbdev_id, &pstat); 7430Sstevel@tonic-gate if (err) { 7440Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 7450Sstevel@tonic-gate continue; 7460Sstevel@tonic-gate } 7470Sstevel@tonic-gate 7480Sstevel@tonic-gate msp = &dsp->d_mem; 7490Sstevel@tonic-gate bzero((caddr_t)msp, sizeof (*msp)); 7500Sstevel@tonic-gate 7510Sstevel@tonic-gate strncpy(msp->ms_cm.c_id.c_name, pstat.type, 7520Sstevel@tonic-gate sizeof (msp->ms_cm.c_id.c_name)); 7530Sstevel@tonic-gate msp->ms_cm.c_id.c_type = mp->sbm_cm.sbdev_type; 7540Sstevel@tonic-gate msp->ms_cm.c_id.c_unit = SBD_NULL_UNIT; 7550Sstevel@tonic-gate msp->ms_cm.c_cond = mp->sbm_cm.sbdev_cond; 7560Sstevel@tonic-gate msp->ms_cm.c_busy = mp->sbm_cm.sbdev_busy | pstat.busy; 7570Sstevel@tonic-gate msp->ms_cm.c_time = mp->sbm_cm.sbdev_time; 7580Sstevel@tonic-gate msp->ms_cm.c_ostate = mp->sbm_cm.sbdev_ostate; 7590Sstevel@tonic-gate 7600Sstevel@tonic-gate msp->ms_totpages = mp->sbm_npages; 7610Sstevel@tonic-gate msp->ms_basepfn = mp->sbm_basepfn; 7620Sstevel@tonic-gate msp->ms_pageslost = mp->sbm_pageslost; 7630Sstevel@tonic-gate msp->ms_cage_enabled = kcage_on; 7640Sstevel@tonic-gate 7650Sstevel@tonic-gate if (mp->sbm_flags & DR_MFLAG_RESERVED) 7660Sstevel@tonic-gate p_mp = mp->sbm_peer; 7670Sstevel@tonic-gate else 7680Sstevel@tonic-gate p_mp = NULL; 7690Sstevel@tonic-gate 7700Sstevel@tonic-gate if (p_mp == NULL) { 7710Sstevel@tonic-gate msp->ms_peer_is_target = 0; 7720Sstevel@tonic-gate msp->ms_peer_ap_id[0] = '\0'; 7730Sstevel@tonic-gate } else if (p_mp->sbm_flags & DR_MFLAG_RESERVED) { 7740Sstevel@tonic-gate char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 7750Sstevel@tonic-gate char *minor; 7760Sstevel@tonic-gate 7770Sstevel@tonic-gate /* 7780Sstevel@tonic-gate * b_dip doesn't have to be held for ddi_pathname() 7790Sstevel@tonic-gate * because the board struct (dr_board_t) will be 7800Sstevel@tonic-gate * destroyed before b_dip detaches. 7810Sstevel@tonic-gate */ 7820Sstevel@tonic-gate (void) ddi_pathname(bp->b_dip, path); 7830Sstevel@tonic-gate minor = strchr(p_mp->sbm_cm.sbdev_path, ':'); 7840Sstevel@tonic-gate 7850Sstevel@tonic-gate snprintf(msp->ms_peer_ap_id, 7860Sstevel@tonic-gate sizeof (msp->ms_peer_ap_id), "%s%s", 7870Sstevel@tonic-gate path, (minor == NULL) ? "" : minor); 7880Sstevel@tonic-gate 7890Sstevel@tonic-gate kmem_free(path, MAXPATHLEN); 7900Sstevel@tonic-gate 7910Sstevel@tonic-gate if (p_mp->sbm_flags & DR_MFLAG_TARGET) 7920Sstevel@tonic-gate msp->ms_peer_is_target = 1; 7930Sstevel@tonic-gate } 7940Sstevel@tonic-gate 7950Sstevel@tonic-gate if (mp->sbm_flags & DR_MFLAG_RELOWNER) 7960Sstevel@tonic-gate rv = kphysm_del_status(mp->sbm_memhandle, &mdst); 7970Sstevel@tonic-gate else 7980Sstevel@tonic-gate rv = KPHYSM_EHANDLE; /* force 'if' to fail */ 7990Sstevel@tonic-gate 8000Sstevel@tonic-gate if (rv == KPHYSM_OK) { 8010Sstevel@tonic-gate /* 8020Sstevel@tonic-gate * Any pages above managed is "free", 8030Sstevel@tonic-gate * i.e. it's collected. 8040Sstevel@tonic-gate */ 8050Sstevel@tonic-gate msp->ms_detpages += (uint_t)(mdst.collected + 8060Sstevel@tonic-gate mdst.phys_pages - mdst.managed); 8070Sstevel@tonic-gate } else { 8080Sstevel@tonic-gate /* 8090Sstevel@tonic-gate * If we're UNREFERENCED or UNCONFIGURED, 8100Sstevel@tonic-gate * then the number of detached pages is 8110Sstevel@tonic-gate * however many pages are on the board. 8120Sstevel@tonic-gate * I.e. detached = not in use by OS. 8130Sstevel@tonic-gate */ 8140Sstevel@tonic-gate switch (msp->ms_cm.c_ostate) { 8150Sstevel@tonic-gate /* 8160Sstevel@tonic-gate * changed to use cfgadm states 8170Sstevel@tonic-gate * 8180Sstevel@tonic-gate * was: 8190Sstevel@tonic-gate * case DR_STATE_UNREFERENCED: 8200Sstevel@tonic-gate * case DR_STATE_UNCONFIGURED: 8210Sstevel@tonic-gate */ 8220Sstevel@tonic-gate case SBD_STAT_UNCONFIGURED: 8230Sstevel@tonic-gate msp->ms_detpages = msp->ms_totpages; 8240Sstevel@tonic-gate break; 8250Sstevel@tonic-gate 8260Sstevel@tonic-gate default: 8270Sstevel@tonic-gate break; 8280Sstevel@tonic-gate } 8290Sstevel@tonic-gate } 8300Sstevel@tonic-gate 8310Sstevel@tonic-gate /* 8320Sstevel@tonic-gate * kphysm_del_span_query can report non-reloc pages = total 8330Sstevel@tonic-gate * pages for memory that is not yet configured 8340Sstevel@tonic-gate */ 8350Sstevel@tonic-gate if (mp->sbm_cm.sbdev_state != DR_STATE_UNCONFIGURED) { 8360Sstevel@tonic-gate 8370Sstevel@tonic-gate rv = kphysm_del_span_query(mp->sbm_basepfn, 8380Sstevel@tonic-gate mp->sbm_npages, &mq); 8390Sstevel@tonic-gate 8400Sstevel@tonic-gate if (rv == KPHYSM_OK) { 8410Sstevel@tonic-gate msp->ms_managed_pages = mq.managed; 8420Sstevel@tonic-gate msp->ms_noreloc_pages = mq.nonrelocatable; 8430Sstevel@tonic-gate msp->ms_noreloc_first = 8440Sstevel@tonic-gate mq.first_nonrelocatable; 8450Sstevel@tonic-gate msp->ms_noreloc_last = 8460Sstevel@tonic-gate mq.last_nonrelocatable; 8470Sstevel@tonic-gate msp->ms_cm.c_sflags = 0; 8480Sstevel@tonic-gate if (mq.nonrelocatable) { 8490Sstevel@tonic-gate SBD_SET_SUSPEND(SBD_CMD_UNCONFIGURE, 8500Sstevel@tonic-gate msp->ms_cm.c_sflags); 8510Sstevel@tonic-gate } 8520Sstevel@tonic-gate } else { 8530Sstevel@tonic-gate PR_MEM("%s: kphysm_del_span_query() = %d\n", 8540Sstevel@tonic-gate f, rv); 8550Sstevel@tonic-gate } 8560Sstevel@tonic-gate } 8570Sstevel@tonic-gate 8580Sstevel@tonic-gate /* 8590Sstevel@tonic-gate * Check source unit state during copy-rename 8600Sstevel@tonic-gate */ 8610Sstevel@tonic-gate if ((mp->sbm_flags & DR_MFLAG_SOURCE) && 8620Sstevel@tonic-gate (mp->sbm_cm.sbdev_state == DR_STATE_UNREFERENCED || 8630Sstevel@tonic-gate mp->sbm_cm.sbdev_state == DR_STATE_RELEASE)) 8640Sstevel@tonic-gate msp->ms_cm.c_ostate = SBD_STAT_CONFIGURED; 8650Sstevel@tonic-gate 8660Sstevel@tonic-gate mix++; 8670Sstevel@tonic-gate dsp++; 8680Sstevel@tonic-gate } 8690Sstevel@tonic-gate 8700Sstevel@tonic-gate return (mix); 8710Sstevel@tonic-gate } 8720Sstevel@tonic-gate 8730Sstevel@tonic-gate int 8740Sstevel@tonic-gate dr_pre_attach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 8750Sstevel@tonic-gate { 8760Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 8770Sstevel@tonic-gate 8780Sstevel@tonic-gate int err_flag = 0; 8790Sstevel@tonic-gate int d; 8800Sstevel@tonic-gate sbd_error_t *err; 8810Sstevel@tonic-gate static fn_t f = "dr_pre_attach_mem"; 8820Sstevel@tonic-gate 8830Sstevel@tonic-gate PR_MEM("%s...\n", f); 8840Sstevel@tonic-gate 8850Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 8860Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 8870Sstevel@tonic-gate dr_state_t state; 8880Sstevel@tonic-gate 8890Sstevel@tonic-gate cmn_err(CE_CONT, "OS configure %s", mp->sbm_cm.sbdev_path); 8900Sstevel@tonic-gate 8910Sstevel@tonic-gate state = mp->sbm_cm.sbdev_state; 8920Sstevel@tonic-gate switch (state) { 8930Sstevel@tonic-gate case DR_STATE_UNCONFIGURED: 8940Sstevel@tonic-gate PR_MEM("%s: recovering from UNCONFIG for %s\n", 8950Sstevel@tonic-gate f, 8960Sstevel@tonic-gate mp->sbm_cm.sbdev_path); 8970Sstevel@tonic-gate 8980Sstevel@tonic-gate /* use memlist cached by dr_post_detach_mem_unit */ 8990Sstevel@tonic-gate ASSERT(mp->sbm_mlist != NULL); 9000Sstevel@tonic-gate PR_MEM("%s: re-configuring cached memlist for %s:\n", 9010Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 9020Sstevel@tonic-gate PR_MEMLIST_DUMP(mp->sbm_mlist); 9030Sstevel@tonic-gate 9040Sstevel@tonic-gate /* kphysm del handle should be have been freed */ 9050Sstevel@tonic-gate ASSERT((mp->sbm_flags & DR_MFLAG_RELOWNER) == 0); 9060Sstevel@tonic-gate 9070Sstevel@tonic-gate /*FALLTHROUGH*/ 9080Sstevel@tonic-gate 9090Sstevel@tonic-gate case DR_STATE_CONNECTED: 9100Sstevel@tonic-gate PR_MEM("%s: reprogramming mem hardware on %s\n", 9110Sstevel@tonic-gate f, mp->sbm_cm.sbdev_bp->b_path); 9120Sstevel@tonic-gate 9130Sstevel@tonic-gate PR_MEM("%s: enabling %s\n", 9140Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 9150Sstevel@tonic-gate 9160Sstevel@tonic-gate err = drmach_mem_enable(mp->sbm_cm.sbdev_id); 9170Sstevel@tonic-gate if (err) { 9180Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 9190Sstevel@tonic-gate err_flag = 1; 9200Sstevel@tonic-gate } 9210Sstevel@tonic-gate break; 9220Sstevel@tonic-gate 9230Sstevel@tonic-gate default: 9240Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_STATE); 9250Sstevel@tonic-gate err_flag = 1; 9260Sstevel@tonic-gate break; 9270Sstevel@tonic-gate } 9280Sstevel@tonic-gate 9290Sstevel@tonic-gate /* exit for loop if error encountered */ 9300Sstevel@tonic-gate if (err_flag) 9310Sstevel@tonic-gate break; 9320Sstevel@tonic-gate } 9330Sstevel@tonic-gate 9340Sstevel@tonic-gate return (err_flag ? -1 : 0); 9350Sstevel@tonic-gate } 9360Sstevel@tonic-gate 9370Sstevel@tonic-gate int 9380Sstevel@tonic-gate dr_post_attach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 9390Sstevel@tonic-gate { 9400Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 9410Sstevel@tonic-gate 9420Sstevel@tonic-gate int d; 9430Sstevel@tonic-gate static fn_t f = "dr_post_attach_mem"; 9440Sstevel@tonic-gate 9450Sstevel@tonic-gate PR_MEM("%s...\n", f); 9460Sstevel@tonic-gate 9470Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 9480Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 9490Sstevel@tonic-gate struct memlist *mlist, *ml; 9500Sstevel@tonic-gate 9510Sstevel@tonic-gate mlist = dr_get_memlist(mp); 9520Sstevel@tonic-gate if (mlist == NULL) { 9530Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_MEMFAIL); 9540Sstevel@tonic-gate continue; 9550Sstevel@tonic-gate } 9560Sstevel@tonic-gate 9570Sstevel@tonic-gate /* 9580Sstevel@tonic-gate * Verify the memory really did successfully attach 9590Sstevel@tonic-gate * by checking for its existence in phys_install. 9600Sstevel@tonic-gate */ 9610Sstevel@tonic-gate memlist_read_lock(); 9620Sstevel@tonic-gate if (memlist_intersect(phys_install, mlist) == 0) { 9630Sstevel@tonic-gate memlist_read_unlock(); 9640Sstevel@tonic-gate 9650Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 9660Sstevel@tonic-gate 9670Sstevel@tonic-gate PR_MEM("%s: %s memlist not in phys_install", 9680Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 9690Sstevel@tonic-gate 9700Sstevel@tonic-gate memlist_delete(mlist); 9710Sstevel@tonic-gate continue; 9720Sstevel@tonic-gate } 9730Sstevel@tonic-gate memlist_read_unlock(); 9740Sstevel@tonic-gate 9750Sstevel@tonic-gate for (ml = mlist; ml != NULL; ml = ml->next) { 9760Sstevel@tonic-gate sbd_error_t *err; 9770Sstevel@tonic-gate 9780Sstevel@tonic-gate err = drmach_mem_add_span( 9790Sstevel@tonic-gate mp->sbm_cm.sbdev_id, 9800Sstevel@tonic-gate ml->address, 9810Sstevel@tonic-gate ml->size); 9820Sstevel@tonic-gate if (err) 9830Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 9840Sstevel@tonic-gate } 9850Sstevel@tonic-gate 9860Sstevel@tonic-gate memlist_delete(mlist); 9870Sstevel@tonic-gate 9880Sstevel@tonic-gate /* 9890Sstevel@tonic-gate * Destroy cached memlist, if any. 9900Sstevel@tonic-gate * There will be a cached memlist in sbm_mlist if 9910Sstevel@tonic-gate * this board is being configured directly after 9920Sstevel@tonic-gate * an unconfigure. 9930Sstevel@tonic-gate * To support this transition, dr_post_detach_mem 9940Sstevel@tonic-gate * left a copy of the last known memlist in sbm_mlist. 9950Sstevel@tonic-gate * This memlist could differ from any derived from 9960Sstevel@tonic-gate * hardware if while this memunit was last configured 9970Sstevel@tonic-gate * the system detected and deleted bad pages from 9980Sstevel@tonic-gate * phys_install. The location of those bad pages 9990Sstevel@tonic-gate * will be reflected in the cached memlist. 10000Sstevel@tonic-gate */ 10010Sstevel@tonic-gate if (mp->sbm_mlist) { 10020Sstevel@tonic-gate memlist_delete(mp->sbm_mlist); 10030Sstevel@tonic-gate mp->sbm_mlist = NULL; 10040Sstevel@tonic-gate } 10050Sstevel@tonic-gate 10060Sstevel@tonic-gate /* 10070Sstevel@tonic-gate * TODO: why is this call to dr_init_mem_unit_data here? 10080Sstevel@tonic-gate * this has been done at discovery or connect time, so this is 10090Sstevel@tonic-gate * probably redundant and unnecessary. 10100Sstevel@tonic-gate */ 10110Sstevel@tonic-gate dr_init_mem_unit_data(mp); 10120Sstevel@tonic-gate } 10130Sstevel@tonic-gate 10140Sstevel@tonic-gate return (0); 10150Sstevel@tonic-gate } 10160Sstevel@tonic-gate 10170Sstevel@tonic-gate int 10180Sstevel@tonic-gate dr_pre_detach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 10190Sstevel@tonic-gate { 10200Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 10210Sstevel@tonic-gate 10220Sstevel@tonic-gate int d; 10230Sstevel@tonic-gate 10240Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 10250Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 10260Sstevel@tonic-gate 10270Sstevel@tonic-gate cmn_err(CE_CONT, "OS unconfigure %s", mp->sbm_cm.sbdev_path); 10280Sstevel@tonic-gate } 10290Sstevel@tonic-gate 10300Sstevel@tonic-gate return (0); 10310Sstevel@tonic-gate } 10320Sstevel@tonic-gate 10330Sstevel@tonic-gate 10340Sstevel@tonic-gate int 10350Sstevel@tonic-gate dr_post_detach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 10360Sstevel@tonic-gate { 10370Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 10380Sstevel@tonic-gate 10390Sstevel@tonic-gate int d, rv; 10400Sstevel@tonic-gate static fn_t f = "dr_post_detach_mem"; 10410Sstevel@tonic-gate 10420Sstevel@tonic-gate PR_MEM("%s...\n", f); 10430Sstevel@tonic-gate 10440Sstevel@tonic-gate rv = 0; 10450Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 10460Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 10470Sstevel@tonic-gate 10480Sstevel@tonic-gate ASSERT(mp->sbm_cm.sbdev_bp == hp->h_bd); 10490Sstevel@tonic-gate 10500Sstevel@tonic-gate if (dr_post_detach_mem_unit(mp)) 10510Sstevel@tonic-gate rv = -1; 10520Sstevel@tonic-gate } 10530Sstevel@tonic-gate 10540Sstevel@tonic-gate return (rv); 10550Sstevel@tonic-gate } 10560Sstevel@tonic-gate 10570Sstevel@tonic-gate static void 10580Sstevel@tonic-gate dr_add_memory_spans(dr_mem_unit_t *mp, struct memlist *ml) 10590Sstevel@tonic-gate { 10600Sstevel@tonic-gate static fn_t f = "dr_add_memory_spans"; 10610Sstevel@tonic-gate 10620Sstevel@tonic-gate PR_MEM("%s...", f); 10630Sstevel@tonic-gate PR_MEMLIST_DUMP(ml); 10640Sstevel@tonic-gate 10650Sstevel@tonic-gate #ifdef DEBUG 10660Sstevel@tonic-gate memlist_read_lock(); 10670Sstevel@tonic-gate if (memlist_intersect(phys_install, ml)) { 10680Sstevel@tonic-gate PR_MEM("%s:WARNING: memlist intersects with phys_install\n", f); 10690Sstevel@tonic-gate } 10700Sstevel@tonic-gate memlist_read_unlock(); 10710Sstevel@tonic-gate #endif 10720Sstevel@tonic-gate 10730Sstevel@tonic-gate for (; ml; ml = ml->next) { 10740Sstevel@tonic-gate pfn_t base; 10750Sstevel@tonic-gate pgcnt_t npgs; 10760Sstevel@tonic-gate int rv; 10770Sstevel@tonic-gate sbd_error_t *err; 10780Sstevel@tonic-gate 10790Sstevel@tonic-gate base = _b64top(ml->address); 10800Sstevel@tonic-gate npgs = _b64top(ml->size); 10810Sstevel@tonic-gate 10820Sstevel@tonic-gate rv = kphysm_add_memory_dynamic(base, npgs); 10830Sstevel@tonic-gate 10840Sstevel@tonic-gate err = drmach_mem_add_span( 10850Sstevel@tonic-gate mp->sbm_cm.sbdev_id, 10860Sstevel@tonic-gate ml->address, 10870Sstevel@tonic-gate ml->size); 10880Sstevel@tonic-gate 10890Sstevel@tonic-gate if (err) 10900Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 10910Sstevel@tonic-gate 10920Sstevel@tonic-gate if (rv != KPHYSM_OK) { 10930Sstevel@tonic-gate cmn_err(CE_WARN, "%s:" 10940Sstevel@tonic-gate " unexpected kphysm_add_memory_dynamic" 10950Sstevel@tonic-gate " return value %d;" 10960Sstevel@tonic-gate " basepfn=0x%lx, npages=%ld\n", 10970Sstevel@tonic-gate f, rv, base, npgs); 10980Sstevel@tonic-gate 10990Sstevel@tonic-gate continue; 11000Sstevel@tonic-gate } 11010Sstevel@tonic-gate } 11020Sstevel@tonic-gate } 11030Sstevel@tonic-gate 11040Sstevel@tonic-gate static int 11050Sstevel@tonic-gate dr_post_detach_mem_unit(dr_mem_unit_t *s_mp) 11060Sstevel@tonic-gate { 11070Sstevel@tonic-gate uint64_t sz = s_mp->sbm_slice_size; 11080Sstevel@tonic-gate uint64_t sm = sz - 1; 11090Sstevel@tonic-gate /* old and new below refer to PAs before and after copy-rename */ 11100Sstevel@tonic-gate uint64_t s_old_basepa, s_new_basepa; 11110Sstevel@tonic-gate uint64_t t_old_basepa, t_new_basepa; 11120Sstevel@tonic-gate uint64_t t_new_smallsize = 0; 11130Sstevel@tonic-gate dr_mem_unit_t *t_mp, *x_mp; 11140Sstevel@tonic-gate struct memlist *ml; 11150Sstevel@tonic-gate int rv; 11160Sstevel@tonic-gate sbd_error_t *err; 11170Sstevel@tonic-gate static fn_t f = "dr_post_detach_mem_unit"; 11180Sstevel@tonic-gate 11190Sstevel@tonic-gate PR_MEM("%s...\n", f); 11200Sstevel@tonic-gate 11210Sstevel@tonic-gate /* s_mp->sbm_del_mlist could be NULL, meaning no deleted spans */ 11220Sstevel@tonic-gate PR_MEM("%s: %s: deleted memlist (EMPTY maybe okay):\n", 11230Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 11240Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_del_mlist); 11250Sstevel@tonic-gate 11260Sstevel@tonic-gate /* sanity check */ 11270Sstevel@tonic-gate ASSERT(s_mp->sbm_del_mlist == NULL || 11280Sstevel@tonic-gate (s_mp->sbm_flags & DR_MFLAG_RELDONE) != 0); 11290Sstevel@tonic-gate 11300Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_SOURCE) { 11310Sstevel@tonic-gate t_mp = s_mp->sbm_peer; 11320Sstevel@tonic-gate ASSERT(t_mp != NULL); 11330Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_TARGET); 11340Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 11350Sstevel@tonic-gate 11360Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_RELDONE); 11370Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist); 11380Sstevel@tonic-gate 11390Sstevel@tonic-gate PR_MEM("%s: target %s: deleted memlist:\n", 11400Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 11410Sstevel@tonic-gate PR_MEMLIST_DUMP(t_mp->sbm_del_mlist); 11420Sstevel@tonic-gate } else { 11430Sstevel@tonic-gate /* this is no target unit */ 11440Sstevel@tonic-gate t_mp = NULL; 11450Sstevel@tonic-gate } 11460Sstevel@tonic-gate 11470Sstevel@tonic-gate /* 11480Sstevel@tonic-gate * Verify the memory really did successfully detach 11490Sstevel@tonic-gate * by checking for its non-existence in phys_install. 11500Sstevel@tonic-gate */ 11510Sstevel@tonic-gate rv = 0; 11520Sstevel@tonic-gate memlist_read_lock(); 11530Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_RELDONE) { 11540Sstevel@tonic-gate x_mp = s_mp; 11550Sstevel@tonic-gate rv = memlist_intersect(phys_install, x_mp->sbm_del_mlist); 11560Sstevel@tonic-gate } 11570Sstevel@tonic-gate if (rv == 0 && t_mp && (t_mp->sbm_flags & DR_MFLAG_RELDONE)) { 11580Sstevel@tonic-gate x_mp = t_mp; 11590Sstevel@tonic-gate rv = memlist_intersect(phys_install, x_mp->sbm_del_mlist); 11600Sstevel@tonic-gate } 11610Sstevel@tonic-gate memlist_read_unlock(); 11620Sstevel@tonic-gate 11630Sstevel@tonic-gate if (rv) { 11640Sstevel@tonic-gate /* error: memlist still in phys_install */ 11650Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&x_mp->sbm_cm); 11660Sstevel@tonic-gate } 11670Sstevel@tonic-gate 11680Sstevel@tonic-gate /* 11690Sstevel@tonic-gate * clean mem unit state and bail out if an error has been recorded. 11700Sstevel@tonic-gate */ 11710Sstevel@tonic-gate rv = 0; 11720Sstevel@tonic-gate if (s_mp->sbm_cm.sbdev_error) { 11730Sstevel@tonic-gate PR_MEM("%s: %s flags=%x", f, 11740Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, s_mp->sbm_flags); 11750Sstevel@tonic-gate DR_DEV_CLR_UNREFERENCED(&s_mp->sbm_cm); 11760Sstevel@tonic-gate DR_DEV_CLR_RELEASED(&s_mp->sbm_cm); 11770Sstevel@tonic-gate dr_device_transition(&s_mp->sbm_cm, DR_STATE_CONFIGURED); 11780Sstevel@tonic-gate rv = -1; 11790Sstevel@tonic-gate } 11800Sstevel@tonic-gate if (t_mp != NULL && t_mp->sbm_cm.sbdev_error != NULL) { 11810Sstevel@tonic-gate PR_MEM("%s: %s flags=%x", f, 11820Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, s_mp->sbm_flags); 11830Sstevel@tonic-gate DR_DEV_CLR_UNREFERENCED(&t_mp->sbm_cm); 11840Sstevel@tonic-gate DR_DEV_CLR_RELEASED(&t_mp->sbm_cm); 11850Sstevel@tonic-gate dr_device_transition(&t_mp->sbm_cm, DR_STATE_CONFIGURED); 11860Sstevel@tonic-gate rv = -1; 11870Sstevel@tonic-gate } 11880Sstevel@tonic-gate if (rv) 11890Sstevel@tonic-gate goto cleanup; 11900Sstevel@tonic-gate 11910Sstevel@tonic-gate s_old_basepa = _ptob64(s_mp->sbm_basepfn); 11920Sstevel@tonic-gate err = drmach_mem_get_base_physaddr(s_mp->sbm_cm.sbdev_id, 11930Sstevel@tonic-gate &s_new_basepa); 11940Sstevel@tonic-gate ASSERT(err == NULL); 11950Sstevel@tonic-gate 1196930Smathue PR_MEM("%s:s_old_basepa: 0x%lx\n", f, s_old_basepa); 1197930Smathue PR_MEM("%s:s_new_basepa: 0x%lx\n", f, s_new_basepa); 11980Sstevel@tonic-gate 11990Sstevel@tonic-gate if (t_mp != NULL) { 12000Sstevel@tonic-gate struct memlist *s_copy_mlist; 12010Sstevel@tonic-gate 12020Sstevel@tonic-gate t_old_basepa = _ptob64(t_mp->sbm_basepfn); 12030Sstevel@tonic-gate err = drmach_mem_get_base_physaddr(t_mp->sbm_cm.sbdev_id, 12040Sstevel@tonic-gate &t_new_basepa); 12050Sstevel@tonic-gate ASSERT(err == NULL); 12060Sstevel@tonic-gate 1207930Smathue PR_MEM("%s:t_old_basepa: 0x%lx\n", f, t_old_basepa); 1208930Smathue PR_MEM("%s:t_new_basepa: 0x%lx\n", f, t_new_basepa); 12090Sstevel@tonic-gate 12100Sstevel@tonic-gate /* 12110Sstevel@tonic-gate * Construct copy list with original source addresses. 12120Sstevel@tonic-gate * Used to add back excess target mem. 12130Sstevel@tonic-gate */ 12140Sstevel@tonic-gate s_copy_mlist = memlist_dup(s_mp->sbm_mlist); 12150Sstevel@tonic-gate for (ml = s_mp->sbm_del_mlist; ml; ml = ml->next) { 12160Sstevel@tonic-gate s_copy_mlist = memlist_del_span(s_copy_mlist, 12170Sstevel@tonic-gate ml->address, ml->size); 12180Sstevel@tonic-gate } 12190Sstevel@tonic-gate 12200Sstevel@tonic-gate PR_MEM("%s: source copy list:\n:", f); 12210Sstevel@tonic-gate PR_MEMLIST_DUMP(s_copy_mlist); 12220Sstevel@tonic-gate 12230Sstevel@tonic-gate /* 12240Sstevel@tonic-gate * We had to swap mem-units, so update 12250Sstevel@tonic-gate * memlists accordingly with new base 12260Sstevel@tonic-gate * addresses. 12270Sstevel@tonic-gate */ 12280Sstevel@tonic-gate for (ml = t_mp->sbm_mlist; ml; ml = ml->next) { 12290Sstevel@tonic-gate ml->address -= t_old_basepa; 12300Sstevel@tonic-gate ml->address += t_new_basepa; 12310Sstevel@tonic-gate } 12320Sstevel@tonic-gate 12330Sstevel@tonic-gate /* 12340Sstevel@tonic-gate * There is no need to explicitly rename the target delete 12350Sstevel@tonic-gate * memlist, because sbm_del_mlist and sbm_mlist always 12360Sstevel@tonic-gate * point to the same memlist for a copy/rename operation. 12370Sstevel@tonic-gate */ 12380Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist == t_mp->sbm_mlist); 12390Sstevel@tonic-gate 12400Sstevel@tonic-gate PR_MEM("%s: renamed target memlist and delete memlist:\n", f); 12410Sstevel@tonic-gate PR_MEMLIST_DUMP(t_mp->sbm_mlist); 12420Sstevel@tonic-gate 12430Sstevel@tonic-gate for (ml = s_mp->sbm_mlist; ml; ml = ml->next) { 12440Sstevel@tonic-gate ml->address -= s_old_basepa; 12450Sstevel@tonic-gate ml->address += s_new_basepa; 12460Sstevel@tonic-gate } 12470Sstevel@tonic-gate 12480Sstevel@tonic-gate PR_MEM("%s: renamed source memlist:\n", f); 12490Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_mlist); 12500Sstevel@tonic-gate 12510Sstevel@tonic-gate /* 12520Sstevel@tonic-gate * Keep track of dynamically added segments 12530Sstevel@tonic-gate * since they cannot be split if we need to delete 12540Sstevel@tonic-gate * excess source memory later for this board. 12550Sstevel@tonic-gate */ 12560Sstevel@tonic-gate if (t_mp->sbm_dyn_segs) 12570Sstevel@tonic-gate memlist_delete(t_mp->sbm_dyn_segs); 12580Sstevel@tonic-gate t_mp->sbm_dyn_segs = s_mp->sbm_dyn_segs; 12590Sstevel@tonic-gate s_mp->sbm_dyn_segs = NULL; 12600Sstevel@tonic-gate 12610Sstevel@tonic-gate /* 12620Sstevel@tonic-gate * If the target memory range with the new target base PA 12630Sstevel@tonic-gate * extends beyond the usable slice, prevent any "target excess" 12640Sstevel@tonic-gate * from being added back after this copy/rename and 12650Sstevel@tonic-gate * calculate the new smaller size of the target board 12660Sstevel@tonic-gate * to be set as part of target cleanup. The base + npages 12670Sstevel@tonic-gate * must only include the range of memory up to the end of 12680Sstevel@tonic-gate * this slice. This will only be used after a category 4 12690Sstevel@tonic-gate * large-to-small target type copy/rename - see comments 12700Sstevel@tonic-gate * in dr_select_mem_target. 12710Sstevel@tonic-gate */ 12720Sstevel@tonic-gate if (((t_new_basepa & sm) + _ptob64(t_mp->sbm_npages)) > sz) { 12730Sstevel@tonic-gate t_new_smallsize = sz - (t_new_basepa & sm); 12740Sstevel@tonic-gate } 12750Sstevel@tonic-gate 12760Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_MEMRESIZE && 12770Sstevel@tonic-gate t_new_smallsize == 0) { 12780Sstevel@tonic-gate struct memlist *t_excess_mlist; 12790Sstevel@tonic-gate 12800Sstevel@tonic-gate /* 12810Sstevel@tonic-gate * Add back excess target memory. 12820Sstevel@tonic-gate * Subtract out the portion of the target memory 12830Sstevel@tonic-gate * node that was taken over by the source memory 12840Sstevel@tonic-gate * node. 12850Sstevel@tonic-gate */ 12860Sstevel@tonic-gate t_excess_mlist = memlist_dup(t_mp->sbm_mlist); 12870Sstevel@tonic-gate for (ml = s_copy_mlist; ml; ml = ml->next) { 12880Sstevel@tonic-gate t_excess_mlist = 12890Sstevel@tonic-gate memlist_del_span(t_excess_mlist, 12900Sstevel@tonic-gate ml->address, ml->size); 12910Sstevel@tonic-gate } 12920Sstevel@tonic-gate 12930Sstevel@tonic-gate /* 12940Sstevel@tonic-gate * Update dynamically added segs 12950Sstevel@tonic-gate */ 12960Sstevel@tonic-gate for (ml = s_mp->sbm_del_mlist; ml; ml = ml->next) { 12970Sstevel@tonic-gate t_mp->sbm_dyn_segs = 12980Sstevel@tonic-gate memlist_del_span(t_mp->sbm_dyn_segs, 12990Sstevel@tonic-gate ml->address, ml->size); 13000Sstevel@tonic-gate } 13010Sstevel@tonic-gate for (ml = t_excess_mlist; ml; ml = ml->next) { 13020Sstevel@tonic-gate t_mp->sbm_dyn_segs = 13030Sstevel@tonic-gate memlist_cat_span(t_mp->sbm_dyn_segs, 13040Sstevel@tonic-gate ml->address, ml->size); 13050Sstevel@tonic-gate } 13060Sstevel@tonic-gate PR_MEM("%s: %s: updated dynamic seg list:\n", 13070Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 13080Sstevel@tonic-gate PR_MEMLIST_DUMP(t_mp->sbm_dyn_segs); 13090Sstevel@tonic-gate 13100Sstevel@tonic-gate PR_MEM("%s: adding back remaining portion" 13110Sstevel@tonic-gate " of %s, memlist:\n", 13120Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 13130Sstevel@tonic-gate PR_MEMLIST_DUMP(t_excess_mlist); 13140Sstevel@tonic-gate 13150Sstevel@tonic-gate dr_add_memory_spans(s_mp, t_excess_mlist); 13160Sstevel@tonic-gate memlist_delete(t_excess_mlist); 13170Sstevel@tonic-gate } 13180Sstevel@tonic-gate memlist_delete(s_copy_mlist); 13190Sstevel@tonic-gate 13200Sstevel@tonic-gate #ifdef DEBUG 13210Sstevel@tonic-gate /* 13220Sstevel@tonic-gate * Renaming s_mp->sbm_del_mlist is not necessary. This 13230Sstevel@tonic-gate * list is not used beyond this point, and in fact, is 13240Sstevel@tonic-gate * disposed of at the end of this function. 13250Sstevel@tonic-gate */ 13260Sstevel@tonic-gate for (ml = s_mp->sbm_del_mlist; ml; ml = ml->next) { 13270Sstevel@tonic-gate ml->address -= s_old_basepa; 13280Sstevel@tonic-gate ml->address += s_new_basepa; 13290Sstevel@tonic-gate } 13300Sstevel@tonic-gate 13310Sstevel@tonic-gate PR_MEM("%s: renamed source delete memlist", f); 13320Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_del_mlist); 13330Sstevel@tonic-gate #endif 13340Sstevel@tonic-gate 13350Sstevel@tonic-gate } 13360Sstevel@tonic-gate 13370Sstevel@tonic-gate if (t_mp != NULL) { 13380Sstevel@tonic-gate /* delete target's entire address space */ 13390Sstevel@tonic-gate err = drmach_mem_del_span( 13400Sstevel@tonic-gate t_mp->sbm_cm.sbdev_id, t_old_basepa & ~ sm, sz); 13410Sstevel@tonic-gate if (err) 13420Sstevel@tonic-gate DRERR_SET_C(&t_mp->sbm_cm.sbdev_error, &err); 13430Sstevel@tonic-gate ASSERT(err == NULL); 13440Sstevel@tonic-gate 13450Sstevel@tonic-gate /* 13460Sstevel@tonic-gate * After the copy/rename, the original address space 13470Sstevel@tonic-gate * for the source board (which is now located on the 13480Sstevel@tonic-gate * target board) may now have some excess to be deleted. 13490Sstevel@tonic-gate * The amount is calculated by masking the slice 13500Sstevel@tonic-gate * info and keeping the slice offset from t_new_basepa. 13510Sstevel@tonic-gate */ 13520Sstevel@tonic-gate err = drmach_mem_del_span(s_mp->sbm_cm.sbdev_id, 13530Sstevel@tonic-gate s_old_basepa & ~ sm, t_new_basepa & sm); 13540Sstevel@tonic-gate if (err) 13550Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 13560Sstevel@tonic-gate ASSERT(err == NULL); 13570Sstevel@tonic-gate 13580Sstevel@tonic-gate } else { 13590Sstevel@tonic-gate /* delete board's entire address space */ 13600Sstevel@tonic-gate err = drmach_mem_del_span(s_mp->sbm_cm.sbdev_id, 13610Sstevel@tonic-gate s_old_basepa & ~ sm, sz); 13620Sstevel@tonic-gate if (err) 13630Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 13640Sstevel@tonic-gate ASSERT(err == NULL); 13650Sstevel@tonic-gate } 13660Sstevel@tonic-gate 13670Sstevel@tonic-gate cleanup: 13680Sstevel@tonic-gate /* clean up target mem unit */ 13690Sstevel@tonic-gate if (t_mp != NULL) { 13700Sstevel@tonic-gate memlist_delete(t_mp->sbm_del_mlist); 13710Sstevel@tonic-gate /* no need to delete sbm_mlist, it shares sbm_del_mlist */ 13720Sstevel@tonic-gate 13730Sstevel@tonic-gate t_mp->sbm_del_mlist = NULL; 13740Sstevel@tonic-gate t_mp->sbm_mlist = NULL; 13750Sstevel@tonic-gate t_mp->sbm_peer = NULL; 13760Sstevel@tonic-gate t_mp->sbm_flags = 0; 13770Sstevel@tonic-gate t_mp->sbm_cm.sbdev_busy = 0; 13780Sstevel@tonic-gate dr_init_mem_unit_data(t_mp); 13790Sstevel@tonic-gate 13800Sstevel@tonic-gate /* reduce target size if new PAs go past end of usable slice */ 13810Sstevel@tonic-gate if (t_new_smallsize > 0) { 13820Sstevel@tonic-gate t_mp->sbm_npages = _b64top(t_new_smallsize); 1383930Smathue PR_MEM("%s: target new size 0x%lx bytes\n", 13840Sstevel@tonic-gate f, t_new_smallsize); 13850Sstevel@tonic-gate } 13860Sstevel@tonic-gate } 13870Sstevel@tonic-gate if (t_mp != NULL && t_mp->sbm_cm.sbdev_error == NULL) { 13880Sstevel@tonic-gate /* 13890Sstevel@tonic-gate * now that copy/rename has completed, undo this 13900Sstevel@tonic-gate * work that was done in dr_release_mem_done. 13910Sstevel@tonic-gate */ 13920Sstevel@tonic-gate DR_DEV_CLR_UNREFERENCED(&t_mp->sbm_cm); 13930Sstevel@tonic-gate DR_DEV_CLR_RELEASED(&t_mp->sbm_cm); 13940Sstevel@tonic-gate dr_device_transition(&t_mp->sbm_cm, DR_STATE_CONFIGURED); 13950Sstevel@tonic-gate } 13960Sstevel@tonic-gate 13970Sstevel@tonic-gate /* 13980Sstevel@tonic-gate * clean up (source) board's mem unit structure. 13990Sstevel@tonic-gate * NOTE: sbm_mlist is retained if no error has been record (in other 14000Sstevel@tonic-gate * words, when s_mp->sbm_cm.sbdev_error is NULL). This memlist is 14010Sstevel@tonic-gate * referred to elsewhere as the cached memlist. The cached memlist 14020Sstevel@tonic-gate * is used to re-attach (configure back in) this memunit from the 14030Sstevel@tonic-gate * unconfigured state. The memlist is retained because it may 14040Sstevel@tonic-gate * represent bad pages that were detected while the memory was 14050Sstevel@tonic-gate * configured into the OS. The OS deletes bad pages from phys_install. 14060Sstevel@tonic-gate * Those deletes, if any, will be represented in the cached mlist. 14070Sstevel@tonic-gate */ 14080Sstevel@tonic-gate if (s_mp->sbm_del_mlist && s_mp->sbm_del_mlist != s_mp->sbm_mlist) 14090Sstevel@tonic-gate memlist_delete(s_mp->sbm_del_mlist); 14100Sstevel@tonic-gate 14110Sstevel@tonic-gate if (s_mp->sbm_cm.sbdev_error && s_mp->sbm_mlist) { 14120Sstevel@tonic-gate memlist_delete(s_mp->sbm_mlist); 14130Sstevel@tonic-gate s_mp->sbm_mlist = NULL; 14140Sstevel@tonic-gate } 14150Sstevel@tonic-gate 14160Sstevel@tonic-gate if (s_mp->sbm_dyn_segs != NULL && s_mp->sbm_cm.sbdev_error == 0) { 14170Sstevel@tonic-gate memlist_delete(s_mp->sbm_dyn_segs); 14180Sstevel@tonic-gate s_mp->sbm_dyn_segs = NULL; 14190Sstevel@tonic-gate } 14200Sstevel@tonic-gate 14210Sstevel@tonic-gate s_mp->sbm_del_mlist = NULL; 14220Sstevel@tonic-gate s_mp->sbm_peer = NULL; 14230Sstevel@tonic-gate s_mp->sbm_flags = 0; 14240Sstevel@tonic-gate s_mp->sbm_cm.sbdev_busy = 0; 14250Sstevel@tonic-gate dr_init_mem_unit_data(s_mp); 14260Sstevel@tonic-gate 14270Sstevel@tonic-gate PR_MEM("%s: cached memlist for %s:", f, s_mp->sbm_cm.sbdev_path); 14280Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_mlist); 14290Sstevel@tonic-gate 14300Sstevel@tonic-gate return (0); 14310Sstevel@tonic-gate } 14320Sstevel@tonic-gate 14330Sstevel@tonic-gate /* 14340Sstevel@tonic-gate * Successful return from this function will have the memory 14350Sstevel@tonic-gate * handle in bp->b_dev[..mem-unit...].sbm_memhandle allocated 14360Sstevel@tonic-gate * and waiting. This routine's job is to select the memory that 14370Sstevel@tonic-gate * actually has to be released (detached) which may not necessarily 14380Sstevel@tonic-gate * be the same memory node that came in in devlist[], 14390Sstevel@tonic-gate * i.e. a copy-rename is needed. 14400Sstevel@tonic-gate */ 14410Sstevel@tonic-gate int 14420Sstevel@tonic-gate dr_pre_release_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 14430Sstevel@tonic-gate { 14440Sstevel@tonic-gate int d; 14450Sstevel@tonic-gate int err_flag = 0; 14460Sstevel@tonic-gate static fn_t f = "dr_pre_release_mem"; 14470Sstevel@tonic-gate 14480Sstevel@tonic-gate PR_MEM("%s...\n", f); 14490Sstevel@tonic-gate 14500Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 14510Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 14520Sstevel@tonic-gate int rv; 14530Sstevel@tonic-gate memquery_t mq; 14540Sstevel@tonic-gate struct memlist *ml; 14550Sstevel@tonic-gate 14560Sstevel@tonic-gate if (mp->sbm_cm.sbdev_error) { 14570Sstevel@tonic-gate err_flag = 1; 14580Sstevel@tonic-gate continue; 14590Sstevel@tonic-gate } else if (!kcage_on) { 14600Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_KCAGE_OFF); 14610Sstevel@tonic-gate err_flag = 1; 14620Sstevel@tonic-gate continue; 14630Sstevel@tonic-gate } 14640Sstevel@tonic-gate 14650Sstevel@tonic-gate if (mp->sbm_flags & DR_MFLAG_RESERVED) { 14660Sstevel@tonic-gate /* 14670Sstevel@tonic-gate * Board is currently involved in a delete 14680Sstevel@tonic-gate * memory operation. Can't detach this guy until 14690Sstevel@tonic-gate * that operation completes. 14700Sstevel@tonic-gate */ 14710Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_INVAL); 14720Sstevel@tonic-gate err_flag = 1; 14730Sstevel@tonic-gate break; 14740Sstevel@tonic-gate } 14750Sstevel@tonic-gate 14760Sstevel@tonic-gate /* 14770Sstevel@tonic-gate * Check whether the detaching memory requires a 14780Sstevel@tonic-gate * copy-rename. 14790Sstevel@tonic-gate */ 14800Sstevel@tonic-gate ASSERT(mp->sbm_npages != 0); 14810Sstevel@tonic-gate rv = kphysm_del_span_query( 14820Sstevel@tonic-gate mp->sbm_basepfn, mp->sbm_npages, &mq); 14830Sstevel@tonic-gate if (rv != KPHYSM_OK) { 14840Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 14850Sstevel@tonic-gate err_flag = 1; 14860Sstevel@tonic-gate break; 14870Sstevel@tonic-gate } 14880Sstevel@tonic-gate 14890Sstevel@tonic-gate if (mq.nonrelocatable != 0) { 14900Sstevel@tonic-gate if (!(dr_cmd_flags(hp) & 14910Sstevel@tonic-gate (SBD_FLAG_FORCE | SBD_FLAG_QUIESCE_OKAY))) { 14920Sstevel@tonic-gate /* caller wasn't prompted for a suspend */ 14930Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, 14940Sstevel@tonic-gate ESBD_QUIESCE_REQD); 14950Sstevel@tonic-gate err_flag = 1; 14960Sstevel@tonic-gate break; 14970Sstevel@tonic-gate } 14980Sstevel@tonic-gate } 14990Sstevel@tonic-gate 15000Sstevel@tonic-gate /* flags should be clean at this time */ 15010Sstevel@tonic-gate ASSERT(mp->sbm_flags == 0); 15020Sstevel@tonic-gate 15030Sstevel@tonic-gate ASSERT(mp->sbm_mlist == NULL); /* should be null */ 15040Sstevel@tonic-gate ASSERT(mp->sbm_del_mlist == NULL); /* should be null */ 15050Sstevel@tonic-gate if (mp->sbm_mlist != NULL) { 15060Sstevel@tonic-gate memlist_delete(mp->sbm_mlist); 15070Sstevel@tonic-gate mp->sbm_mlist = NULL; 15080Sstevel@tonic-gate } 15090Sstevel@tonic-gate 15100Sstevel@tonic-gate ml = dr_get_memlist(mp); 15110Sstevel@tonic-gate if (ml == NULL) { 15120Sstevel@tonic-gate err_flag = 1; 15130Sstevel@tonic-gate PR_MEM("%s: no memlist found for %s\n", 15140Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 15150Sstevel@tonic-gate continue; 15160Sstevel@tonic-gate } 15170Sstevel@tonic-gate 15180Sstevel@tonic-gate /* allocate a kphysm handle */ 15190Sstevel@tonic-gate rv = kphysm_del_gethandle(&mp->sbm_memhandle); 15200Sstevel@tonic-gate if (rv != KPHYSM_OK) { 15210Sstevel@tonic-gate memlist_delete(ml); 15220Sstevel@tonic-gate 15230Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 15240Sstevel@tonic-gate err_flag = 1; 15250Sstevel@tonic-gate break; 15260Sstevel@tonic-gate } 15270Sstevel@tonic-gate mp->sbm_flags |= DR_MFLAG_RELOWNER; 15280Sstevel@tonic-gate 15290Sstevel@tonic-gate if ((mq.nonrelocatable != 0) || 15300Sstevel@tonic-gate dr_reserve_mem_spans(&mp->sbm_memhandle, ml)) { 15310Sstevel@tonic-gate /* 15320Sstevel@tonic-gate * Either the detaching memory node contains 15330Sstevel@tonic-gate * non-reloc memory or we failed to reserve the 15340Sstevel@tonic-gate * detaching memory node (which did _not_ have 15350Sstevel@tonic-gate * any non-reloc memory, i.e. some non-reloc mem 15360Sstevel@tonic-gate * got onboard). 15370Sstevel@tonic-gate */ 15380Sstevel@tonic-gate 15390Sstevel@tonic-gate if (dr_select_mem_target(hp, mp, ml)) { 15400Sstevel@tonic-gate int rv; 15410Sstevel@tonic-gate 15420Sstevel@tonic-gate /* 15430Sstevel@tonic-gate * We had no luck locating a target 15440Sstevel@tonic-gate * memory node to be the recipient of 15450Sstevel@tonic-gate * the non-reloc memory on the node 15460Sstevel@tonic-gate * we're trying to detach. 15470Sstevel@tonic-gate * Clean up be disposing the mem handle 15480Sstevel@tonic-gate * and the mem list. 15490Sstevel@tonic-gate */ 15500Sstevel@tonic-gate rv = kphysm_del_release(mp->sbm_memhandle); 15510Sstevel@tonic-gate if (rv != KPHYSM_OK) { 15520Sstevel@tonic-gate /* 15530Sstevel@tonic-gate * can do nothing but complain 15540Sstevel@tonic-gate * and hope helpful for debug 15550Sstevel@tonic-gate */ 15560Sstevel@tonic-gate cmn_err(CE_WARN, "%s: unexpected" 15570Sstevel@tonic-gate " kphysm_del_release return" 15580Sstevel@tonic-gate " value %d", 15590Sstevel@tonic-gate f, rv); 15600Sstevel@tonic-gate } 15610Sstevel@tonic-gate mp->sbm_flags &= ~DR_MFLAG_RELOWNER; 15620Sstevel@tonic-gate 15630Sstevel@tonic-gate memlist_delete(ml); 15640Sstevel@tonic-gate 15650Sstevel@tonic-gate /* make sure sbm_flags is clean */ 15660Sstevel@tonic-gate ASSERT(mp->sbm_flags == 0); 15670Sstevel@tonic-gate 15680Sstevel@tonic-gate dr_dev_err(CE_WARN, 15690Sstevel@tonic-gate &mp->sbm_cm, ESBD_NO_TARGET); 15700Sstevel@tonic-gate 15710Sstevel@tonic-gate err_flag = 1; 15720Sstevel@tonic-gate break; 15730Sstevel@tonic-gate } 15740Sstevel@tonic-gate 15750Sstevel@tonic-gate /* 15760Sstevel@tonic-gate * ml is not memlist_delete'd here because 15770Sstevel@tonic-gate * it has been assigned to mp->sbm_mlist 15780Sstevel@tonic-gate * by dr_select_mem_target. 15790Sstevel@tonic-gate */ 15800Sstevel@tonic-gate } else { 15810Sstevel@tonic-gate /* no target needed to detach this board */ 15820Sstevel@tonic-gate mp->sbm_flags |= DR_MFLAG_RESERVED; 15830Sstevel@tonic-gate mp->sbm_peer = NULL; 15840Sstevel@tonic-gate mp->sbm_del_mlist = ml; 15850Sstevel@tonic-gate mp->sbm_mlist = ml; 15860Sstevel@tonic-gate mp->sbm_cm.sbdev_busy = 1; 15870Sstevel@tonic-gate } 15880Sstevel@tonic-gate #ifdef DEBUG 15890Sstevel@tonic-gate ASSERT(mp->sbm_mlist != NULL); 15900Sstevel@tonic-gate 15910Sstevel@tonic-gate if (mp->sbm_flags & DR_MFLAG_SOURCE) { 15920Sstevel@tonic-gate PR_MEM("%s: release of %s requires copy/rename;" 15930Sstevel@tonic-gate " selected target board %s\n", 15940Sstevel@tonic-gate f, 15950Sstevel@tonic-gate mp->sbm_cm.sbdev_path, 15960Sstevel@tonic-gate mp->sbm_peer->sbm_cm.sbdev_path); 15970Sstevel@tonic-gate } else { 15980Sstevel@tonic-gate PR_MEM("%s: copy/rename not required to release %s\n", 15990Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 16000Sstevel@tonic-gate } 16010Sstevel@tonic-gate 16020Sstevel@tonic-gate ASSERT(mp->sbm_flags & DR_MFLAG_RELOWNER); 16030Sstevel@tonic-gate ASSERT(mp->sbm_flags & DR_MFLAG_RESERVED); 16040Sstevel@tonic-gate #endif 16050Sstevel@tonic-gate } 16060Sstevel@tonic-gate 16070Sstevel@tonic-gate return (err_flag ? -1 : 0); 16080Sstevel@tonic-gate } 16090Sstevel@tonic-gate 16100Sstevel@tonic-gate void 16110Sstevel@tonic-gate dr_release_mem_done(dr_common_unit_t *cp) 16120Sstevel@tonic-gate { 16130Sstevel@tonic-gate dr_mem_unit_t *s_mp = (dr_mem_unit_t *)cp; 16140Sstevel@tonic-gate dr_mem_unit_t *t_mp, *mp; 16150Sstevel@tonic-gate int rv; 16160Sstevel@tonic-gate static fn_t f = "dr_release_mem_done"; 16170Sstevel@tonic-gate 16180Sstevel@tonic-gate /* 16190Sstevel@tonic-gate * This unit will be flagged with DR_MFLAG_SOURCE, if it 16200Sstevel@tonic-gate * has a target unit. 16210Sstevel@tonic-gate */ 16220Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_SOURCE) { 16230Sstevel@tonic-gate t_mp = s_mp->sbm_peer; 16240Sstevel@tonic-gate ASSERT(t_mp != NULL); 16250Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 16260Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_TARGET); 16270Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_RESERVED); 16280Sstevel@tonic-gate } else { 16290Sstevel@tonic-gate /* this is no target unit */ 16300Sstevel@tonic-gate t_mp = NULL; 16310Sstevel@tonic-gate } 16320Sstevel@tonic-gate 16330Sstevel@tonic-gate /* free delete handle */ 16340Sstevel@tonic-gate ASSERT(s_mp->sbm_flags & DR_MFLAG_RELOWNER); 16350Sstevel@tonic-gate ASSERT(s_mp->sbm_flags & DR_MFLAG_RESERVED); 16360Sstevel@tonic-gate rv = kphysm_del_release(s_mp->sbm_memhandle); 16370Sstevel@tonic-gate if (rv != KPHYSM_OK) { 16380Sstevel@tonic-gate /* 16390Sstevel@tonic-gate * can do nothing but complain 16400Sstevel@tonic-gate * and hope helpful for debug 16410Sstevel@tonic-gate */ 16420Sstevel@tonic-gate cmn_err(CE_WARN, "%s: unexpected kphysm_del_release" 16430Sstevel@tonic-gate " return value %d", f, rv); 16440Sstevel@tonic-gate } 16450Sstevel@tonic-gate s_mp->sbm_flags &= ~DR_MFLAG_RELOWNER; 16460Sstevel@tonic-gate 16470Sstevel@tonic-gate /* 16480Sstevel@tonic-gate * If an error was encountered during release, clean up 16490Sstevel@tonic-gate * the source (and target, if present) unit data. 16500Sstevel@tonic-gate */ 16510Sstevel@tonic-gate /* XXX Can we know that sbdev_error was encountered during release? */ 16520Sstevel@tonic-gate if (s_mp->sbm_cm.sbdev_error != NULL) { 16530Sstevel@tonic-gate PR_MEM("%s: %s: error %d noted\n", 16540Sstevel@tonic-gate f, 16550Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, 16560Sstevel@tonic-gate s_mp->sbm_cm.sbdev_error->e_code); 16570Sstevel@tonic-gate 16580Sstevel@tonic-gate if (t_mp != NULL) { 16590Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist == t_mp->sbm_mlist); 16600Sstevel@tonic-gate t_mp->sbm_del_mlist = NULL; 16610Sstevel@tonic-gate 16620Sstevel@tonic-gate if (t_mp->sbm_mlist != NULL) { 16630Sstevel@tonic-gate memlist_delete(t_mp->sbm_mlist); 16640Sstevel@tonic-gate t_mp->sbm_mlist = NULL; 16650Sstevel@tonic-gate } 16660Sstevel@tonic-gate 16670Sstevel@tonic-gate t_mp->sbm_peer = NULL; 16680Sstevel@tonic-gate t_mp->sbm_flags = 0; 16690Sstevel@tonic-gate t_mp->sbm_cm.sbdev_busy = 0; 16700Sstevel@tonic-gate } 16710Sstevel@tonic-gate 16720Sstevel@tonic-gate if (s_mp->sbm_del_mlist != s_mp->sbm_mlist) 16730Sstevel@tonic-gate memlist_delete(s_mp->sbm_del_mlist); 16740Sstevel@tonic-gate s_mp->sbm_del_mlist = NULL; 16750Sstevel@tonic-gate 16760Sstevel@tonic-gate if (s_mp->sbm_mlist != NULL) { 16770Sstevel@tonic-gate memlist_delete(s_mp->sbm_mlist); 16780Sstevel@tonic-gate s_mp->sbm_mlist = NULL; 16790Sstevel@tonic-gate } 16800Sstevel@tonic-gate 16810Sstevel@tonic-gate s_mp->sbm_peer = NULL; 16820Sstevel@tonic-gate s_mp->sbm_flags = 0; 16830Sstevel@tonic-gate s_mp->sbm_cm.sbdev_busy = 0; 16840Sstevel@tonic-gate 16850Sstevel@tonic-gate /* bail out */ 16860Sstevel@tonic-gate return; 16870Sstevel@tonic-gate } 16880Sstevel@tonic-gate 16890Sstevel@tonic-gate DR_DEV_SET_RELEASED(&s_mp->sbm_cm); 16900Sstevel@tonic-gate dr_device_transition(&s_mp->sbm_cm, DR_STATE_RELEASE); 16910Sstevel@tonic-gate 16920Sstevel@tonic-gate if (t_mp != NULL) { 16930Sstevel@tonic-gate /* 16940Sstevel@tonic-gate * the kphysm delete operation that drained the source 16950Sstevel@tonic-gate * board also drained this target board. Since the source 16960Sstevel@tonic-gate * board drain is now known to have succeeded, we know this 16970Sstevel@tonic-gate * target board is drained too. 16980Sstevel@tonic-gate * 16990Sstevel@tonic-gate * because DR_DEV_SET_RELEASED and dr_device_transition 17000Sstevel@tonic-gate * is done here, the dr_release_dev_done should not 17010Sstevel@tonic-gate * fail. 17020Sstevel@tonic-gate */ 17030Sstevel@tonic-gate DR_DEV_SET_RELEASED(&t_mp->sbm_cm); 17040Sstevel@tonic-gate dr_device_transition(&t_mp->sbm_cm, DR_STATE_RELEASE); 17050Sstevel@tonic-gate 17060Sstevel@tonic-gate /* 17070Sstevel@tonic-gate * NOTE: do not transition target's board state, 17080Sstevel@tonic-gate * even if the mem-unit was the last configure 17090Sstevel@tonic-gate * unit of the board. When copy/rename completes 17100Sstevel@tonic-gate * this mem-unit will transitioned back to 17110Sstevel@tonic-gate * the configured state. In the meantime, the 17120Sstevel@tonic-gate * board's must remain as is. 17130Sstevel@tonic-gate */ 17140Sstevel@tonic-gate } 17150Sstevel@tonic-gate 17160Sstevel@tonic-gate /* if board(s) had deleted memory, verify it is gone */ 17170Sstevel@tonic-gate rv = 0; 17180Sstevel@tonic-gate memlist_read_lock(); 17190Sstevel@tonic-gate if (s_mp->sbm_del_mlist != NULL) { 17200Sstevel@tonic-gate mp = s_mp; 17210Sstevel@tonic-gate rv = memlist_intersect(phys_install, mp->sbm_del_mlist); 17220Sstevel@tonic-gate } 17230Sstevel@tonic-gate if (rv == 0 && t_mp && t_mp->sbm_del_mlist != NULL) { 17240Sstevel@tonic-gate mp = t_mp; 17250Sstevel@tonic-gate rv = memlist_intersect(phys_install, mp->sbm_del_mlist); 17260Sstevel@tonic-gate } 17270Sstevel@tonic-gate memlist_read_unlock(); 17280Sstevel@tonic-gate if (rv) { 17290Sstevel@tonic-gate cmn_err(CE_WARN, "%s: %smem-unit (%d.%d): " 17300Sstevel@tonic-gate "deleted memory still found in phys_install", 17310Sstevel@tonic-gate f, 17320Sstevel@tonic-gate (mp == t_mp ? "target " : ""), 17330Sstevel@tonic-gate mp->sbm_cm.sbdev_bp->b_num, 17340Sstevel@tonic-gate mp->sbm_cm.sbdev_unum); 17350Sstevel@tonic-gate 17360Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&s_mp->sbm_cm); 17370Sstevel@tonic-gate return; 17380Sstevel@tonic-gate } 17390Sstevel@tonic-gate 17400Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_RELDONE; 17410Sstevel@tonic-gate if (t_mp != NULL) 17420Sstevel@tonic-gate t_mp->sbm_flags |= DR_MFLAG_RELDONE; 17430Sstevel@tonic-gate 17440Sstevel@tonic-gate /* this should not fail */ 17450Sstevel@tonic-gate if (dr_release_dev_done(&s_mp->sbm_cm) != 0) { 17460Sstevel@tonic-gate /* catch this in debug kernels */ 17470Sstevel@tonic-gate ASSERT(0); 17480Sstevel@tonic-gate return; 17490Sstevel@tonic-gate } 17500Sstevel@tonic-gate 17510Sstevel@tonic-gate PR_MEM("%s: marking %s release DONE\n", 17520Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 17530Sstevel@tonic-gate 17540Sstevel@tonic-gate s_mp->sbm_cm.sbdev_ostate = SBD_STAT_UNCONFIGURED; 17550Sstevel@tonic-gate 17560Sstevel@tonic-gate if (t_mp != NULL) { 17570Sstevel@tonic-gate /* should not fail */ 17580Sstevel@tonic-gate rv = dr_release_dev_done(&t_mp->sbm_cm); 17590Sstevel@tonic-gate if (rv != 0) { 17600Sstevel@tonic-gate /* catch this in debug kernels */ 17610Sstevel@tonic-gate ASSERT(0); 17620Sstevel@tonic-gate return; 17630Sstevel@tonic-gate } 17640Sstevel@tonic-gate 17650Sstevel@tonic-gate PR_MEM("%s: marking %s release DONE\n", 17660Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 17670Sstevel@tonic-gate 17680Sstevel@tonic-gate t_mp->sbm_cm.sbdev_ostate = SBD_STAT_UNCONFIGURED; 17690Sstevel@tonic-gate } 17700Sstevel@tonic-gate } 17710Sstevel@tonic-gate 17720Sstevel@tonic-gate /*ARGSUSED*/ 17730Sstevel@tonic-gate int 17740Sstevel@tonic-gate dr_disconnect_mem(dr_mem_unit_t *mp) 17750Sstevel@tonic-gate { 17760Sstevel@tonic-gate static fn_t f = "dr_disconnect_mem"; 17770Sstevel@tonic-gate update_membounds_t umb; 17780Sstevel@tonic-gate 17790Sstevel@tonic-gate #ifdef DEBUG 17800Sstevel@tonic-gate int state = mp->sbm_cm.sbdev_state; 17810Sstevel@tonic-gate ASSERT(state == DR_STATE_CONNECTED || 17820Sstevel@tonic-gate state == DR_STATE_UNCONFIGURED); 17830Sstevel@tonic-gate #endif 17840Sstevel@tonic-gate 17850Sstevel@tonic-gate PR_MEM("%s...\n", f); 17860Sstevel@tonic-gate 17870Sstevel@tonic-gate if (mp->sbm_del_mlist && mp->sbm_del_mlist != mp->sbm_mlist) 17880Sstevel@tonic-gate memlist_delete(mp->sbm_del_mlist); 17890Sstevel@tonic-gate mp->sbm_del_mlist = NULL; 17900Sstevel@tonic-gate 17910Sstevel@tonic-gate if (mp->sbm_mlist) { 17920Sstevel@tonic-gate memlist_delete(mp->sbm_mlist); 17930Sstevel@tonic-gate mp->sbm_mlist = NULL; 17940Sstevel@tonic-gate } 17950Sstevel@tonic-gate 17960Sstevel@tonic-gate /* 17970Sstevel@tonic-gate * Remove memory from lgroup 17980Sstevel@tonic-gate * For now, only board info is required. 17990Sstevel@tonic-gate */ 18000Sstevel@tonic-gate umb.u_board = mp->sbm_cm.sbdev_bp->b_num; 18010Sstevel@tonic-gate umb.u_base = (uint64_t)-1; 18020Sstevel@tonic-gate umb.u_len = (uint64_t)-1; 18030Sstevel@tonic-gate 18040Sstevel@tonic-gate lgrp_plat_config(LGRP_CONFIG_MEM_DEL, (uintptr_t)&umb); 18050Sstevel@tonic-gate 18060Sstevel@tonic-gate return (0); 18070Sstevel@tonic-gate } 18080Sstevel@tonic-gate 18090Sstevel@tonic-gate int 18100Sstevel@tonic-gate dr_cancel_mem(dr_mem_unit_t *s_mp) 18110Sstevel@tonic-gate { 18120Sstevel@tonic-gate dr_mem_unit_t *t_mp; 18130Sstevel@tonic-gate dr_state_t state; 18140Sstevel@tonic-gate static fn_t f = "dr_cancel_mem"; 18150Sstevel@tonic-gate 18160Sstevel@tonic-gate state = s_mp->sbm_cm.sbdev_state; 18170Sstevel@tonic-gate 18180Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_TARGET) { 18190Sstevel@tonic-gate /* must cancel source board, not target board */ 18200Sstevel@tonic-gate /* TODO: set error */ 18210Sstevel@tonic-gate return (-1); 18220Sstevel@tonic-gate } else if (s_mp->sbm_flags & DR_MFLAG_SOURCE) { 18230Sstevel@tonic-gate t_mp = s_mp->sbm_peer; 18240Sstevel@tonic-gate ASSERT(t_mp != NULL); 18250Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 18260Sstevel@tonic-gate 18270Sstevel@tonic-gate /* must always match the source board's state */ 18280Sstevel@tonic-gate /* TODO: is this assertion correct? */ 18290Sstevel@tonic-gate ASSERT(t_mp->sbm_cm.sbdev_state == state); 18300Sstevel@tonic-gate } else { 18310Sstevel@tonic-gate /* this is no target unit */ 18320Sstevel@tonic-gate t_mp = NULL; 18330Sstevel@tonic-gate } 18340Sstevel@tonic-gate 18350Sstevel@tonic-gate switch (state) { 18360Sstevel@tonic-gate case DR_STATE_UNREFERENCED: /* state set by dr_release_dev_done */ 18370Sstevel@tonic-gate ASSERT((s_mp->sbm_flags & DR_MFLAG_RELOWNER) == 0); 18380Sstevel@tonic-gate 18390Sstevel@tonic-gate if (t_mp != NULL && t_mp->sbm_del_mlist != NULL) { 18400Sstevel@tonic-gate PR_MEM("%s: undoing target %s memory delete\n", 18410Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 18420Sstevel@tonic-gate dr_add_memory_spans(t_mp, t_mp->sbm_del_mlist); 18430Sstevel@tonic-gate 18440Sstevel@tonic-gate DR_DEV_CLR_UNREFERENCED(&t_mp->sbm_cm); 18450Sstevel@tonic-gate } 18460Sstevel@tonic-gate 18470Sstevel@tonic-gate if (s_mp->sbm_del_mlist != NULL) { 18480Sstevel@tonic-gate PR_MEM("%s: undoing %s memory delete\n", 18490Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 18500Sstevel@tonic-gate 18510Sstevel@tonic-gate dr_add_memory_spans(s_mp, s_mp->sbm_del_mlist); 18520Sstevel@tonic-gate } 18530Sstevel@tonic-gate 18540Sstevel@tonic-gate /*FALLTHROUGH*/ 18550Sstevel@tonic-gate 18560Sstevel@tonic-gate /* TODO: should no longer be possible to see the release state here */ 18570Sstevel@tonic-gate case DR_STATE_RELEASE: /* state set by dr_release_mem_done */ 18580Sstevel@tonic-gate 18590Sstevel@tonic-gate ASSERT((s_mp->sbm_flags & DR_MFLAG_RELOWNER) == 0); 18600Sstevel@tonic-gate 18610Sstevel@tonic-gate if (t_mp != NULL) { 18620Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist == t_mp->sbm_mlist); 18630Sstevel@tonic-gate t_mp->sbm_del_mlist = NULL; 18640Sstevel@tonic-gate 18650Sstevel@tonic-gate if (t_mp->sbm_mlist != NULL) { 18660Sstevel@tonic-gate memlist_delete(t_mp->sbm_mlist); 18670Sstevel@tonic-gate t_mp->sbm_mlist = NULL; 18680Sstevel@tonic-gate } 18690Sstevel@tonic-gate 18700Sstevel@tonic-gate t_mp->sbm_peer = NULL; 18710Sstevel@tonic-gate t_mp->sbm_flags = 0; 18720Sstevel@tonic-gate t_mp->sbm_cm.sbdev_busy = 0; 18730Sstevel@tonic-gate dr_init_mem_unit_data(t_mp); 18740Sstevel@tonic-gate 18750Sstevel@tonic-gate DR_DEV_CLR_RELEASED(&t_mp->sbm_cm); 18760Sstevel@tonic-gate 18770Sstevel@tonic-gate dr_device_transition( 18780Sstevel@tonic-gate &t_mp->sbm_cm, DR_STATE_CONFIGURED); 18790Sstevel@tonic-gate } 18800Sstevel@tonic-gate 18810Sstevel@tonic-gate if (s_mp->sbm_del_mlist != s_mp->sbm_mlist) 18820Sstevel@tonic-gate memlist_delete(s_mp->sbm_del_mlist); 18830Sstevel@tonic-gate s_mp->sbm_del_mlist = NULL; 18840Sstevel@tonic-gate 18850Sstevel@tonic-gate if (s_mp->sbm_mlist != NULL) { 18860Sstevel@tonic-gate memlist_delete(s_mp->sbm_mlist); 18870Sstevel@tonic-gate s_mp->sbm_mlist = NULL; 18880Sstevel@tonic-gate } 18890Sstevel@tonic-gate 18900Sstevel@tonic-gate s_mp->sbm_peer = NULL; 18910Sstevel@tonic-gate s_mp->sbm_flags = 0; 18920Sstevel@tonic-gate s_mp->sbm_cm.sbdev_busy = 0; 18930Sstevel@tonic-gate dr_init_mem_unit_data(s_mp); 18940Sstevel@tonic-gate 18950Sstevel@tonic-gate return (0); 18960Sstevel@tonic-gate 18970Sstevel@tonic-gate default: 18980Sstevel@tonic-gate PR_MEM("%s: WARNING unexpected state (%d) for %s\n", 18990Sstevel@tonic-gate f, (int)state, s_mp->sbm_cm.sbdev_path); 19000Sstevel@tonic-gate 19010Sstevel@tonic-gate return (-1); 19020Sstevel@tonic-gate } 19030Sstevel@tonic-gate /*NOTREACHED*/ 19040Sstevel@tonic-gate } 19050Sstevel@tonic-gate 19060Sstevel@tonic-gate void 19070Sstevel@tonic-gate dr_init_mem_unit(dr_mem_unit_t *mp) 19080Sstevel@tonic-gate { 19090Sstevel@tonic-gate dr_state_t new_state; 19100Sstevel@tonic-gate 19110Sstevel@tonic-gate 19120Sstevel@tonic-gate if (DR_DEV_IS_ATTACHED(&mp->sbm_cm)) { 19130Sstevel@tonic-gate new_state = DR_STATE_CONFIGURED; 19140Sstevel@tonic-gate mp->sbm_cm.sbdev_cond = SBD_COND_OK; 19150Sstevel@tonic-gate } else if (DR_DEV_IS_PRESENT(&mp->sbm_cm)) { 19160Sstevel@tonic-gate new_state = DR_STATE_CONNECTED; 19170Sstevel@tonic-gate mp->sbm_cm.sbdev_cond = SBD_COND_OK; 19180Sstevel@tonic-gate } else if (mp->sbm_cm.sbdev_id != (drmachid_t)0) { 19190Sstevel@tonic-gate new_state = DR_STATE_OCCUPIED; 19200Sstevel@tonic-gate } else { 19210Sstevel@tonic-gate new_state = DR_STATE_EMPTY; 19220Sstevel@tonic-gate } 19230Sstevel@tonic-gate 19240Sstevel@tonic-gate if (DR_DEV_IS_PRESENT(&mp->sbm_cm)) 19250Sstevel@tonic-gate dr_init_mem_unit_data(mp); 19260Sstevel@tonic-gate 19270Sstevel@tonic-gate /* delay transition until fully initialized */ 19280Sstevel@tonic-gate dr_device_transition(&mp->sbm_cm, new_state); 19290Sstevel@tonic-gate } 19300Sstevel@tonic-gate 19310Sstevel@tonic-gate static void 19320Sstevel@tonic-gate dr_init_mem_unit_data(dr_mem_unit_t *mp) 19330Sstevel@tonic-gate { 19340Sstevel@tonic-gate drmachid_t id = mp->sbm_cm.sbdev_id; 19350Sstevel@tonic-gate uint64_t bytes; 19360Sstevel@tonic-gate sbd_error_t *err; 19370Sstevel@tonic-gate static fn_t f = "dr_init_mem_unit_data"; 19380Sstevel@tonic-gate update_membounds_t umb; 19390Sstevel@tonic-gate 19400Sstevel@tonic-gate PR_MEM("%s...\n", f); 19410Sstevel@tonic-gate 19420Sstevel@tonic-gate /* a little sanity checking */ 19430Sstevel@tonic-gate ASSERT(mp->sbm_peer == NULL); 19440Sstevel@tonic-gate ASSERT(mp->sbm_flags == 0); 19450Sstevel@tonic-gate 19460Sstevel@tonic-gate /* get basepfn of mem unit */ 19470Sstevel@tonic-gate err = drmach_mem_get_base_physaddr(id, &bytes); 19480Sstevel@tonic-gate if (err) { 19490Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 19500Sstevel@tonic-gate mp->sbm_basepfn = (pfn_t)-1; 19510Sstevel@tonic-gate } else 19520Sstevel@tonic-gate mp->sbm_basepfn = _b64top(bytes); 19530Sstevel@tonic-gate 19540Sstevel@tonic-gate /* attempt to get number of pages from PDA */ 19550Sstevel@tonic-gate err = drmach_mem_get_size(id, &bytes); 19560Sstevel@tonic-gate if (err) { 19570Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 19580Sstevel@tonic-gate mp->sbm_npages = 0; 19590Sstevel@tonic-gate } else 19600Sstevel@tonic-gate mp->sbm_npages = _b64top(bytes); 19610Sstevel@tonic-gate 19620Sstevel@tonic-gate /* if didn't work, calculate using memlist */ 19630Sstevel@tonic-gate if (mp->sbm_npages == 0) { 19640Sstevel@tonic-gate struct memlist *ml, *mlist; 19650Sstevel@tonic-gate /* 19660Sstevel@tonic-gate * Either we couldn't open the PDA or our 19670Sstevel@tonic-gate * PDA has garbage in it. We must have the 19680Sstevel@tonic-gate * page count consistent and whatever the 19690Sstevel@tonic-gate * OS states has precedence over the PDA 19700Sstevel@tonic-gate * so let's check the kernel. 19710Sstevel@tonic-gate */ 19720Sstevel@tonic-gate /* TODO: curious comment. it suggests pda query should happen if this fails */ 19730Sstevel@tonic-gate PR_MEM("%s: PDA query failed for npages." 19740Sstevel@tonic-gate " Checking memlist for %s\n", 19750Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 19760Sstevel@tonic-gate 19770Sstevel@tonic-gate mlist = dr_get_memlist(mp); 19780Sstevel@tonic-gate for (ml = mlist; ml; ml = ml->next) 19790Sstevel@tonic-gate mp->sbm_npages += btop(ml->size); 19800Sstevel@tonic-gate memlist_delete(mlist); 19810Sstevel@tonic-gate } 19820Sstevel@tonic-gate 19830Sstevel@tonic-gate err = drmach_mem_get_alignment(id, &bytes); 19840Sstevel@tonic-gate if (err) { 19850Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 19860Sstevel@tonic-gate mp->sbm_alignment_mask = 0; 19870Sstevel@tonic-gate } else 19880Sstevel@tonic-gate mp->sbm_alignment_mask = _b64top(bytes); 19890Sstevel@tonic-gate 19900Sstevel@tonic-gate err = drmach_mem_get_slice_size(id, &bytes); 19910Sstevel@tonic-gate if (err) { 19920Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 19930Sstevel@tonic-gate mp->sbm_slice_size = 0; /* paranoia */ 19940Sstevel@tonic-gate } else 19950Sstevel@tonic-gate mp->sbm_slice_size = bytes; 19960Sstevel@tonic-gate 19970Sstevel@tonic-gate /* 19980Sstevel@tonic-gate * Add memory to lgroup 19990Sstevel@tonic-gate */ 20000Sstevel@tonic-gate umb.u_board = mp->sbm_cm.sbdev_bp->b_num; 20010Sstevel@tonic-gate umb.u_base = (uint64_t)mp->sbm_basepfn << MMU_PAGESHIFT; 20020Sstevel@tonic-gate umb.u_len = (uint64_t)mp->sbm_npages << MMU_PAGESHIFT; 20030Sstevel@tonic-gate 20040Sstevel@tonic-gate lgrp_plat_config(LGRP_CONFIG_MEM_ADD, (uintptr_t)&umb); 20050Sstevel@tonic-gate 2006930Smathue PR_MEM("%s: %s (basepfn = 0x%lx, npgs = %ld)\n", 20070Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path, mp->sbm_basepfn, mp->sbm_npages); 20080Sstevel@tonic-gate } 20090Sstevel@tonic-gate 20100Sstevel@tonic-gate static int 20110Sstevel@tonic-gate dr_reserve_mem_spans(memhandle_t *mhp, struct memlist *ml) 20120Sstevel@tonic-gate { 20130Sstevel@tonic-gate int err; 20140Sstevel@tonic-gate pfn_t base; 20150Sstevel@tonic-gate pgcnt_t npgs; 20160Sstevel@tonic-gate struct memlist *mc; 20170Sstevel@tonic-gate static fn_t f = "dr_reserve_mem_spans"; 20180Sstevel@tonic-gate 20190Sstevel@tonic-gate PR_MEM("%s...\n", f); 20200Sstevel@tonic-gate 20210Sstevel@tonic-gate /* 20220Sstevel@tonic-gate * Walk the supplied memlist scheduling each span for removal 20230Sstevel@tonic-gate * with kphysm_del_span. It is possible that a span may intersect 20240Sstevel@tonic-gate * an area occupied by the cage. 20250Sstevel@tonic-gate */ 20260Sstevel@tonic-gate for (mc = ml; mc != NULL; mc = mc->next) { 20270Sstevel@tonic-gate base = _b64top(mc->address); 20280Sstevel@tonic-gate npgs = _b64top(mc->size); 20290Sstevel@tonic-gate 20300Sstevel@tonic-gate err = kphysm_del_span(*mhp, base, npgs); 20310Sstevel@tonic-gate if (err != KPHYSM_OK) { 20320Sstevel@tonic-gate cmn_err(CE_WARN, "%s memory reserve failed." 20330Sstevel@tonic-gate " unexpected kphysm_del_span return value %d;" 20340Sstevel@tonic-gate " basepfn=0x%lx npages=%ld", 20350Sstevel@tonic-gate f, err, base, npgs); 20360Sstevel@tonic-gate 20370Sstevel@tonic-gate return (-1); 20380Sstevel@tonic-gate } 20390Sstevel@tonic-gate } 20400Sstevel@tonic-gate 20410Sstevel@tonic-gate return (0); 20420Sstevel@tonic-gate } 20430Sstevel@tonic-gate 20440Sstevel@tonic-gate /* debug counters */ 20450Sstevel@tonic-gate int dr_smt_realigned; 20460Sstevel@tonic-gate int dr_smt_preference[4]; 20470Sstevel@tonic-gate 20480Sstevel@tonic-gate #ifdef DEBUG 20490Sstevel@tonic-gate uint_t dr_ignore_board; /* if bit[bnum-1] set, board won't be candidate */ 20500Sstevel@tonic-gate #endif 20510Sstevel@tonic-gate 20520Sstevel@tonic-gate /* 20530Sstevel@tonic-gate * Find and reserve a copy/rename target board suitable for the 20540Sstevel@tonic-gate * given source board. 20550Sstevel@tonic-gate * All boards in the system are examined and categorized in relation to 20560Sstevel@tonic-gate * their memory size versus the source board's memory size. Order of 20570Sstevel@tonic-gate * preference is: 20580Sstevel@tonic-gate * 1st: board has same memory size 20590Sstevel@tonic-gate * 2nd: board has larger memory size 20600Sstevel@tonic-gate * 3rd: board has smaller memory size 20610Sstevel@tonic-gate * 4th: board has smaller memory size, available memory will be reduced. 20620Sstevel@tonic-gate * Boards in category 3 and 4 will have their MC's reprogrammed to locate the 20630Sstevel@tonic-gate * span to which the MC responds to address span that appropriately covers 20640Sstevel@tonic-gate * the nonrelocatable span of the source board. 20650Sstevel@tonic-gate */ 20660Sstevel@tonic-gate static int 20670Sstevel@tonic-gate dr_select_mem_target(dr_handle_t *hp, 20680Sstevel@tonic-gate dr_mem_unit_t *s_mp, struct memlist *s_ml) 20690Sstevel@tonic-gate { 20700Sstevel@tonic-gate pgcnt_t sz = _b64top(s_mp->sbm_slice_size); 20710Sstevel@tonic-gate pgcnt_t sm = sz - 1; /* mem_slice_mask */ 20720Sstevel@tonic-gate pfn_t s_phi, t_phi; 20730Sstevel@tonic-gate 20740Sstevel@tonic-gate int n_sets = 4; /* same, larger, smaller, clipped */ 20750Sstevel@tonic-gate int preference; /* lower value is higher preference */ 20760Sstevel@tonic-gate int n_units_per_set; 20770Sstevel@tonic-gate int idx; 20780Sstevel@tonic-gate dr_mem_unit_t **sets; 20790Sstevel@tonic-gate 20800Sstevel@tonic-gate int t_bd; 20810Sstevel@tonic-gate int t_unit; 20820Sstevel@tonic-gate int rv; 20830Sstevel@tonic-gate int allow_src_memrange_modify; 20840Sstevel@tonic-gate int allow_targ_memrange_modify; 20850Sstevel@tonic-gate drmachid_t t_id; 20860Sstevel@tonic-gate dr_board_t *s_bp, *t_bp; 20870Sstevel@tonic-gate dr_mem_unit_t *t_mp, *c_mp; 20880Sstevel@tonic-gate struct memlist *d_ml, *t_ml, *x_ml; 20890Sstevel@tonic-gate memquery_t s_mq = {0}; 20900Sstevel@tonic-gate static fn_t f = "dr_select_mem_target"; 20910Sstevel@tonic-gate 20920Sstevel@tonic-gate PR_MEM("%s...\n", f); 20930Sstevel@tonic-gate 20940Sstevel@tonic-gate ASSERT(s_ml != NULL); 20950Sstevel@tonic-gate 20960Sstevel@tonic-gate n_units_per_set = MAX_BOARDS * MAX_MEM_UNITS_PER_BOARD; 20970Sstevel@tonic-gate sets = GETSTRUCT(dr_mem_unit_t *, n_units_per_set * n_sets); 20980Sstevel@tonic-gate 20990Sstevel@tonic-gate s_bp = hp->h_bd; 21000Sstevel@tonic-gate /* calculate the offset into the slice of the last source board pfn */ 21010Sstevel@tonic-gate ASSERT(s_mp->sbm_npages != 0); 21020Sstevel@tonic-gate s_phi = (s_mp->sbm_basepfn + s_mp->sbm_npages - 1) & sm; 21030Sstevel@tonic-gate 21040Sstevel@tonic-gate allow_src_memrange_modify = drmach_allow_memrange_modify(s_bp->b_id); 21050Sstevel@tonic-gate 21060Sstevel@tonic-gate /* 21070Sstevel@tonic-gate * Make one pass through all memory units on all boards 21080Sstevel@tonic-gate * and categorize them with respect to the source board. 21090Sstevel@tonic-gate */ 21100Sstevel@tonic-gate for (t_bd = 0; t_bd < MAX_BOARDS; t_bd++) { 21110Sstevel@tonic-gate /* 21120Sstevel@tonic-gate * The board structs are a contiguous array 21130Sstevel@tonic-gate * so we take advantage of that to find the 21140Sstevel@tonic-gate * correct board struct pointer for a given 21150Sstevel@tonic-gate * board number. 21160Sstevel@tonic-gate */ 21170Sstevel@tonic-gate t_bp = dr_lookup_board(t_bd); 21180Sstevel@tonic-gate 21190Sstevel@tonic-gate /* source board can not be its own target */ 21200Sstevel@tonic-gate if (s_bp->b_num == t_bp->b_num) 21210Sstevel@tonic-gate continue; 21220Sstevel@tonic-gate 21230Sstevel@tonic-gate for (t_unit = 0; t_unit < MAX_MEM_UNITS_PER_BOARD; t_unit++) { 21240Sstevel@tonic-gate 21250Sstevel@tonic-gate t_mp = dr_get_mem_unit(t_bp, t_unit); 21260Sstevel@tonic-gate 21270Sstevel@tonic-gate /* this memory node must be attached */ 21280Sstevel@tonic-gate if (!DR_DEV_IS_ATTACHED(&t_mp->sbm_cm)) 21290Sstevel@tonic-gate continue; 21300Sstevel@tonic-gate 21310Sstevel@tonic-gate /* source unit can not be its own target */ 21320Sstevel@tonic-gate if (s_mp == t_mp) { 21330Sstevel@tonic-gate /* catch this is debug kernels */ 21340Sstevel@tonic-gate ASSERT(0); 21350Sstevel@tonic-gate continue; 21360Sstevel@tonic-gate } 21370Sstevel@tonic-gate 21380Sstevel@tonic-gate /* 21390Sstevel@tonic-gate * this memory node must not already be reserved 21400Sstevel@tonic-gate * by some other memory delete operation. 21410Sstevel@tonic-gate */ 21420Sstevel@tonic-gate if (t_mp->sbm_flags & DR_MFLAG_RESERVED) 21430Sstevel@tonic-gate continue; 21440Sstevel@tonic-gate 21450Sstevel@tonic-gate /* 21460Sstevel@tonic-gate * categorize the memory node 21470Sstevel@tonic-gate * If this is a smaller memory node, create a 21480Sstevel@tonic-gate * temporary, edited copy of the source board's 21490Sstevel@tonic-gate * memlist containing only the span of the non- 21500Sstevel@tonic-gate * relocatable pages. 21510Sstevel@tonic-gate */ 21520Sstevel@tonic-gate t_phi = (t_mp->sbm_basepfn + t_mp->sbm_npages - 1) & sm; 21530Sstevel@tonic-gate t_id = t_mp->sbm_cm.sbdev_bp->b_id; 21540Sstevel@tonic-gate allow_targ_memrange_modify = 21550Sstevel@tonic-gate drmach_allow_memrange_modify(t_id); 21560Sstevel@tonic-gate if (t_mp->sbm_npages == s_mp->sbm_npages && 21570Sstevel@tonic-gate t_phi == s_phi) { 21580Sstevel@tonic-gate preference = 0; 21590Sstevel@tonic-gate t_mp->sbm_slice_offset = 0; 21600Sstevel@tonic-gate } else if (t_mp->sbm_npages > s_mp->sbm_npages && 21610Sstevel@tonic-gate t_phi > s_phi) { 21620Sstevel@tonic-gate /* 21630Sstevel@tonic-gate * Selecting this target will require modifying 21640Sstevel@tonic-gate * the source and/or target physical address 21650Sstevel@tonic-gate * ranges. Skip if not supported by platform. 21660Sstevel@tonic-gate */ 21670Sstevel@tonic-gate if (!allow_src_memrange_modify || 21680Sstevel@tonic-gate !allow_targ_memrange_modify) { 21690Sstevel@tonic-gate PR_MEM("%s: skip target %s, memory " 21700Sstevel@tonic-gate "range relocation not supported " 21710Sstevel@tonic-gate "by platform\n", f, 21720Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path); 21730Sstevel@tonic-gate continue; 21740Sstevel@tonic-gate } 21750Sstevel@tonic-gate preference = 1; 21760Sstevel@tonic-gate t_mp->sbm_slice_offset = 0; 21770Sstevel@tonic-gate } else { 21780Sstevel@tonic-gate pfn_t pfn = 0; 21790Sstevel@tonic-gate 21800Sstevel@tonic-gate /* 21810Sstevel@tonic-gate * Selecting this target will require modifying 21820Sstevel@tonic-gate * the source and/or target physical address 21830Sstevel@tonic-gate * ranges. Skip if not supported by platform. 21840Sstevel@tonic-gate */ 21850Sstevel@tonic-gate if (!allow_src_memrange_modify || 21860Sstevel@tonic-gate !allow_targ_memrange_modify) { 21870Sstevel@tonic-gate PR_MEM("%s: skip target %s, memory " 21880Sstevel@tonic-gate "range relocation not supported " 21890Sstevel@tonic-gate "by platform\n", f, 21900Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path); 21910Sstevel@tonic-gate continue; 21920Sstevel@tonic-gate } 21930Sstevel@tonic-gate 21940Sstevel@tonic-gate /* 21950Sstevel@tonic-gate * Check if its mc can be programmed to relocate 21960Sstevel@tonic-gate * the active address range to match the 21970Sstevel@tonic-gate * nonrelocatable span of the source board. 21980Sstevel@tonic-gate */ 21990Sstevel@tonic-gate preference = 2; 22000Sstevel@tonic-gate 22010Sstevel@tonic-gate if (s_mq.phys_pages == 0) { 22020Sstevel@tonic-gate /* 22030Sstevel@tonic-gate * find non-relocatable span on 22040Sstevel@tonic-gate * source board. 22050Sstevel@tonic-gate */ 22060Sstevel@tonic-gate rv = kphysm_del_span_query( 22070Sstevel@tonic-gate s_mp->sbm_basepfn, 22080Sstevel@tonic-gate s_mp->sbm_npages, &s_mq); 22090Sstevel@tonic-gate if (rv != KPHYSM_OK) { 22100Sstevel@tonic-gate PR_MEM("%s: %s: unexpected" 22110Sstevel@tonic-gate " kphysm_del_span_query" 22120Sstevel@tonic-gate " return value %d;" 22130Sstevel@tonic-gate " basepfn 0x%lx, npages %ld\n", 22140Sstevel@tonic-gate f, 22150Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, 22160Sstevel@tonic-gate rv, 22170Sstevel@tonic-gate s_mp->sbm_basepfn, 22180Sstevel@tonic-gate s_mp->sbm_npages); 22190Sstevel@tonic-gate 22200Sstevel@tonic-gate /* paranoia */ 22210Sstevel@tonic-gate s_mq.phys_pages = 0; 22220Sstevel@tonic-gate 22230Sstevel@tonic-gate continue; 22240Sstevel@tonic-gate } 22250Sstevel@tonic-gate 22260Sstevel@tonic-gate /* more paranoia */ 22270Sstevel@tonic-gate ASSERT(s_mq.phys_pages != 0); 22280Sstevel@tonic-gate ASSERT(s_mq.nonrelocatable != 0); 22290Sstevel@tonic-gate 22300Sstevel@tonic-gate /* 22310Sstevel@tonic-gate * this should not happen 22320Sstevel@tonic-gate * if it does, it simply means that 22330Sstevel@tonic-gate * we can not proceed with qualifying 22340Sstevel@tonic-gate * this target candidate. 22350Sstevel@tonic-gate */ 22360Sstevel@tonic-gate if (s_mq.nonrelocatable == 0) 22370Sstevel@tonic-gate continue; 22380Sstevel@tonic-gate 22390Sstevel@tonic-gate PR_MEM("%s: %s: nonrelocatable" 22400Sstevel@tonic-gate " span (0x%lx..0x%lx)\n", 22410Sstevel@tonic-gate f, 22420Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, 22430Sstevel@tonic-gate s_mq.first_nonrelocatable, 22440Sstevel@tonic-gate s_mq.last_nonrelocatable); 22450Sstevel@tonic-gate } 22460Sstevel@tonic-gate 22470Sstevel@tonic-gate /* 22480Sstevel@tonic-gate * Round down the starting pfn of the 22490Sstevel@tonic-gate * nonrelocatable span on the source board 22500Sstevel@tonic-gate * to nearest programmable boundary possible 22510Sstevel@tonic-gate * with this target candidate. 22520Sstevel@tonic-gate */ 22530Sstevel@tonic-gate pfn = s_mq.first_nonrelocatable & 22540Sstevel@tonic-gate ~t_mp->sbm_alignment_mask; 22550Sstevel@tonic-gate 22560Sstevel@tonic-gate /* skip candidate if memory is too small */ 22570Sstevel@tonic-gate if (pfn + t_mp->sbm_npages < 22580Sstevel@tonic-gate s_mq.last_nonrelocatable) 22590Sstevel@tonic-gate continue; 22600Sstevel@tonic-gate 22610Sstevel@tonic-gate /* 22620Sstevel@tonic-gate * reprogramming an mc to relocate its 22630Sstevel@tonic-gate * active address range means the beginning 22640Sstevel@tonic-gate * address to which the DIMMS respond will 22650Sstevel@tonic-gate * be somewhere above the slice boundary 22660Sstevel@tonic-gate * address. The larger the size of memory 22670Sstevel@tonic-gate * on this unit, the more likely part of it 22680Sstevel@tonic-gate * will exist beyond the end of the slice. 22690Sstevel@tonic-gate * The portion of the memory that does is 22700Sstevel@tonic-gate * unavailable to the system until the mc 22710Sstevel@tonic-gate * reprogrammed to a more favorable base 22720Sstevel@tonic-gate * address. 22730Sstevel@tonic-gate * An attempt is made to avoid the loss by 22740Sstevel@tonic-gate * recalculating the mc base address relative 22750Sstevel@tonic-gate * to the end of the slice. This may produce 22760Sstevel@tonic-gate * a more favorable result. If not, we lower 22770Sstevel@tonic-gate * the board's preference rating so that it 22780Sstevel@tonic-gate * is one the last candidate boards to be 22790Sstevel@tonic-gate * considered. 22800Sstevel@tonic-gate */ 22810Sstevel@tonic-gate if ((pfn + t_mp->sbm_npages) & ~sm) { 22820Sstevel@tonic-gate pfn_t p; 22830Sstevel@tonic-gate 22840Sstevel@tonic-gate ASSERT(sz >= t_mp->sbm_npages); 22850Sstevel@tonic-gate 22860Sstevel@tonic-gate /* 22870Sstevel@tonic-gate * calculate an alternative starting 22880Sstevel@tonic-gate * address relative to the end of the 22890Sstevel@tonic-gate * slice's address space. 22900Sstevel@tonic-gate */ 22910Sstevel@tonic-gate p = pfn & ~sm; 22920Sstevel@tonic-gate p = p + (sz - t_mp->sbm_npages); 22930Sstevel@tonic-gate p = p & ~t_mp->sbm_alignment_mask; 22940Sstevel@tonic-gate 22950Sstevel@tonic-gate if ((p > s_mq.first_nonrelocatable) || 22960Sstevel@tonic-gate (p + t_mp->sbm_npages < 22970Sstevel@tonic-gate s_mq.last_nonrelocatable)) { 22980Sstevel@tonic-gate 22990Sstevel@tonic-gate /* 23000Sstevel@tonic-gate * alternative starting addr 23010Sstevel@tonic-gate * won't work. Lower preference 23020Sstevel@tonic-gate * rating of this board, since 23030Sstevel@tonic-gate * some number of pages will 23040Sstevel@tonic-gate * unavailable for use. 23050Sstevel@tonic-gate */ 23060Sstevel@tonic-gate preference = 3; 23070Sstevel@tonic-gate } else { 23080Sstevel@tonic-gate dr_smt_realigned++; 23090Sstevel@tonic-gate pfn = p; 23100Sstevel@tonic-gate } 23110Sstevel@tonic-gate } 23120Sstevel@tonic-gate 23130Sstevel@tonic-gate /* 23140Sstevel@tonic-gate * translate calculated pfn to an offset 23150Sstevel@tonic-gate * relative to the slice boundary. If the 23160Sstevel@tonic-gate * candidate board is selected, this offset 23170Sstevel@tonic-gate * will be used to calculate the values 23180Sstevel@tonic-gate * programmed into the mc. 23190Sstevel@tonic-gate */ 23200Sstevel@tonic-gate t_mp->sbm_slice_offset = pfn & sm; 23210Sstevel@tonic-gate PR_MEM("%s: %s:" 23220Sstevel@tonic-gate " proposed mc offset 0x%lx\n", 23230Sstevel@tonic-gate f, 23240Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path, 23250Sstevel@tonic-gate t_mp->sbm_slice_offset); 23260Sstevel@tonic-gate } 23270Sstevel@tonic-gate 23280Sstevel@tonic-gate dr_smt_preference[preference]++; 23290Sstevel@tonic-gate 23300Sstevel@tonic-gate /* calculate index to start of preference set */ 23310Sstevel@tonic-gate idx = n_units_per_set * preference; 23320Sstevel@tonic-gate /* calculate offset to respective element */ 23330Sstevel@tonic-gate idx += t_bd * MAX_MEM_UNITS_PER_BOARD + t_unit; 23340Sstevel@tonic-gate 23350Sstevel@tonic-gate ASSERT(idx < n_units_per_set * n_sets); 23360Sstevel@tonic-gate sets[idx] = t_mp; 23370Sstevel@tonic-gate } 23380Sstevel@tonic-gate } 23390Sstevel@tonic-gate 23400Sstevel@tonic-gate /* 23410Sstevel@tonic-gate * NOTE: this would be a good place to sort each candidate 23420Sstevel@tonic-gate * set in to some desired order, e.g. memory size in ascending 23430Sstevel@tonic-gate * order. Without an additional sorting step here, the order 23440Sstevel@tonic-gate * within a set is ascending board number order. 23450Sstevel@tonic-gate */ 23460Sstevel@tonic-gate 23470Sstevel@tonic-gate c_mp = NULL; 23480Sstevel@tonic-gate x_ml = NULL; 23490Sstevel@tonic-gate t_ml = NULL; 23500Sstevel@tonic-gate for (idx = 0; idx < n_units_per_set * n_sets; idx++) { 23510Sstevel@tonic-gate memquery_t mq; 23520Sstevel@tonic-gate 23530Sstevel@tonic-gate /* cleanup t_ml after previous pass */ 23540Sstevel@tonic-gate if (t_ml != NULL) { 23550Sstevel@tonic-gate memlist_delete(t_ml); 23560Sstevel@tonic-gate t_ml = NULL; 23570Sstevel@tonic-gate } 23580Sstevel@tonic-gate 23590Sstevel@tonic-gate /* get candidate target board mem unit */ 23600Sstevel@tonic-gate t_mp = sets[idx]; 23610Sstevel@tonic-gate if (t_mp == NULL) 23620Sstevel@tonic-gate continue; 23630Sstevel@tonic-gate 23640Sstevel@tonic-gate /* get target board memlist */ 23650Sstevel@tonic-gate t_ml = dr_get_memlist(t_mp); 23660Sstevel@tonic-gate if (t_ml == NULL) { 23670Sstevel@tonic-gate cmn_err(CE_WARN, "%s: no memlist for" 23680Sstevel@tonic-gate " mem-unit %d, board %d", 23690Sstevel@tonic-gate f, 23700Sstevel@tonic-gate t_mp->sbm_cm.sbdev_bp->b_num, 23710Sstevel@tonic-gate t_mp->sbm_cm.sbdev_unum); 23720Sstevel@tonic-gate 23730Sstevel@tonic-gate continue; 23740Sstevel@tonic-gate } 23750Sstevel@tonic-gate 23760Sstevel@tonic-gate /* get appropriate source board memlist */ 23770Sstevel@tonic-gate t_phi = (t_mp->sbm_basepfn + t_mp->sbm_npages - 1) & sm; 23780Sstevel@tonic-gate if (t_mp->sbm_npages < s_mp->sbm_npages || t_phi < s_phi) { 23790Sstevel@tonic-gate spgcnt_t excess; 23800Sstevel@tonic-gate 23810Sstevel@tonic-gate /* 23820Sstevel@tonic-gate * make a copy of the source board memlist 23830Sstevel@tonic-gate * then edit it to remove the spans that 23840Sstevel@tonic-gate * are outside the calculated span of 23850Sstevel@tonic-gate * [pfn..s_mq.last_nonrelocatable]. 23860Sstevel@tonic-gate */ 23870Sstevel@tonic-gate if (x_ml != NULL) 23880Sstevel@tonic-gate memlist_delete(x_ml); 23890Sstevel@tonic-gate 23900Sstevel@tonic-gate x_ml = memlist_dup(s_ml); 23910Sstevel@tonic-gate if (x_ml == NULL) { 23920Sstevel@tonic-gate PR_MEM("%s: memlist_dup failed\n", f); 23930Sstevel@tonic-gate /* TODO: should abort */ 23940Sstevel@tonic-gate continue; 23950Sstevel@tonic-gate } 23960Sstevel@tonic-gate 23970Sstevel@tonic-gate /* trim off lower portion */ 23980Sstevel@tonic-gate excess = t_mp->sbm_slice_offset - 23990Sstevel@tonic-gate (s_mp->sbm_basepfn & sm); 24000Sstevel@tonic-gate 24010Sstevel@tonic-gate if (excess > 0) { 24020Sstevel@tonic-gate x_ml = memlist_del_span( 24030Sstevel@tonic-gate x_ml, 24040Sstevel@tonic-gate _ptob64(s_mp->sbm_basepfn), 24050Sstevel@tonic-gate _ptob64(excess)); 24060Sstevel@tonic-gate } 24070Sstevel@tonic-gate ASSERT(x_ml); 24080Sstevel@tonic-gate 24090Sstevel@tonic-gate /* 24100Sstevel@tonic-gate * Since this candidate target board is smaller 24110Sstevel@tonic-gate * than the source board, s_mq must have been 24120Sstevel@tonic-gate * initialized in previous loop while processing 24130Sstevel@tonic-gate * this or some other candidate board. 24140Sstevel@tonic-gate * FIXME: this is weak. 24150Sstevel@tonic-gate */ 24160Sstevel@tonic-gate ASSERT(s_mq.phys_pages != 0); 24170Sstevel@tonic-gate 24180Sstevel@tonic-gate /* trim off upper portion */ 24190Sstevel@tonic-gate excess = (s_mp->sbm_basepfn + s_mp->sbm_npages) 24200Sstevel@tonic-gate - (s_mq.last_nonrelocatable + 1); 24210Sstevel@tonic-gate if (excess > 0) { 24220Sstevel@tonic-gate pfn_t p; 24230Sstevel@tonic-gate 24240Sstevel@tonic-gate p = s_mq.last_nonrelocatable + 1; 24250Sstevel@tonic-gate x_ml = memlist_del_span( 24260Sstevel@tonic-gate x_ml, 24270Sstevel@tonic-gate _ptob64(p), 24280Sstevel@tonic-gate _ptob64(excess)); 24290Sstevel@tonic-gate } 24300Sstevel@tonic-gate 24310Sstevel@tonic-gate PR_MEM("%s: %s: edited source memlist:\n", 24320Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 24330Sstevel@tonic-gate PR_MEMLIST_DUMP(x_ml); 24340Sstevel@tonic-gate 24350Sstevel@tonic-gate #ifdef DEBUG 24360Sstevel@tonic-gate /* sanity check memlist */ 24370Sstevel@tonic-gate d_ml = x_ml; 24380Sstevel@tonic-gate while (d_ml->next != NULL) 24390Sstevel@tonic-gate d_ml = d_ml->next; 24400Sstevel@tonic-gate 24410Sstevel@tonic-gate ASSERT(d_ml->address + d_ml->size == 24420Sstevel@tonic-gate _ptob64(s_mq.last_nonrelocatable + 1)); 24430Sstevel@tonic-gate #endif 24440Sstevel@tonic-gate 24450Sstevel@tonic-gate /* 24460Sstevel@tonic-gate * x_ml now describes only the portion of the 24470Sstevel@tonic-gate * source board that will be moved during the 24480Sstevel@tonic-gate * copy/rename operation. 24490Sstevel@tonic-gate */ 24500Sstevel@tonic-gate d_ml = x_ml; 24510Sstevel@tonic-gate } else { 24520Sstevel@tonic-gate /* use original memlist; all spans will be moved */ 24530Sstevel@tonic-gate d_ml = s_ml; 24540Sstevel@tonic-gate } 24550Sstevel@tonic-gate 24560Sstevel@tonic-gate /* verify target can support source memory spans. */ 24570Sstevel@tonic-gate if (memlist_canfit(d_ml, t_ml) == 0) { 24580Sstevel@tonic-gate PR_MEM("%s: source memlist won't" 24590Sstevel@tonic-gate " fit in target memlist\n", f); 24600Sstevel@tonic-gate PR_MEM("%s: source memlist:\n", f); 24610Sstevel@tonic-gate PR_MEMLIST_DUMP(d_ml); 24620Sstevel@tonic-gate PR_MEM("%s: target memlist:\n", f); 24630Sstevel@tonic-gate PR_MEMLIST_DUMP(t_ml); 24640Sstevel@tonic-gate 24650Sstevel@tonic-gate continue; 24660Sstevel@tonic-gate } 24670Sstevel@tonic-gate 24680Sstevel@tonic-gate /* NOTE: the value of d_ml is not used beyond this point */ 24690Sstevel@tonic-gate 24700Sstevel@tonic-gate PR_MEM("%s: checking for no-reloc in %s, " 24710Sstevel@tonic-gate " basepfn=0x%lx, npages=%ld\n", 24720Sstevel@tonic-gate f, 24730Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path, 24740Sstevel@tonic-gate t_mp->sbm_basepfn, 24750Sstevel@tonic-gate t_mp->sbm_npages); 24760Sstevel@tonic-gate 24770Sstevel@tonic-gate rv = kphysm_del_span_query( 24780Sstevel@tonic-gate t_mp->sbm_basepfn, t_mp->sbm_npages, &mq); 24790Sstevel@tonic-gate if (rv != KPHYSM_OK) { 24800Sstevel@tonic-gate PR_MEM("%s: kphysm_del_span_query:" 24810Sstevel@tonic-gate " unexpected return value %d\n", f, rv); 24820Sstevel@tonic-gate 24830Sstevel@tonic-gate continue; 24840Sstevel@tonic-gate } 24850Sstevel@tonic-gate 24860Sstevel@tonic-gate if (mq.nonrelocatable != 0) { 24870Sstevel@tonic-gate PR_MEM("%s: candidate %s has" 24880Sstevel@tonic-gate " nonrelocatable span [0x%lx..0x%lx]\n", 24890Sstevel@tonic-gate f, 24900Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path, 24910Sstevel@tonic-gate mq.first_nonrelocatable, 24920Sstevel@tonic-gate mq.last_nonrelocatable); 24930Sstevel@tonic-gate 24940Sstevel@tonic-gate continue; 24950Sstevel@tonic-gate } 24960Sstevel@tonic-gate 24970Sstevel@tonic-gate #ifdef DEBUG 24980Sstevel@tonic-gate /* 24990Sstevel@tonic-gate * This is a debug tool for excluding certain boards 25000Sstevel@tonic-gate * from being selected as a target board candidate. 25010Sstevel@tonic-gate * dr_ignore_board is only tested by this driver. 25020Sstevel@tonic-gate * It must be set with adb, obp, /etc/system or your 25030Sstevel@tonic-gate * favorite debugger. 25040Sstevel@tonic-gate */ 25050Sstevel@tonic-gate if (dr_ignore_board & 25060Sstevel@tonic-gate (1 << (t_mp->sbm_cm.sbdev_bp->b_num - 1))) { 25070Sstevel@tonic-gate PR_MEM("%s: dr_ignore_board flag set," 25080Sstevel@tonic-gate " ignoring %s as candidate\n", 25090Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 25100Sstevel@tonic-gate continue; 25110Sstevel@tonic-gate } 25120Sstevel@tonic-gate #endif 25130Sstevel@tonic-gate 25140Sstevel@tonic-gate /* 25150Sstevel@tonic-gate * Reserve excess source board memory, if any. 25160Sstevel@tonic-gate * 25170Sstevel@tonic-gate * When the number of pages on the candidate target 25180Sstevel@tonic-gate * board is less than the number of pages on the source, 25190Sstevel@tonic-gate * then some spans (clearly) of the source board's address 25200Sstevel@tonic-gate * space will not be covered by physical memory after the 25210Sstevel@tonic-gate * copy/rename completes. The following code block 25220Sstevel@tonic-gate * schedules those spans to be deleted. 25230Sstevel@tonic-gate */ 25240Sstevel@tonic-gate if (t_mp->sbm_npages < s_mp->sbm_npages || t_phi < s_phi) { 25250Sstevel@tonic-gate pfn_t pfn; 25260Sstevel@tonic-gate uint64_t s_del_pa; 25270Sstevel@tonic-gate struct memlist *ml; 25280Sstevel@tonic-gate 25290Sstevel@tonic-gate d_ml = memlist_dup(s_ml); 25300Sstevel@tonic-gate if (d_ml == NULL) { 25310Sstevel@tonic-gate PR_MEM("%s: cant dup src brd memlist\n", f); 25320Sstevel@tonic-gate /* TODO: should abort */ 25330Sstevel@tonic-gate continue; 25340Sstevel@tonic-gate } 25350Sstevel@tonic-gate 25360Sstevel@tonic-gate /* calculate base pfn relative to target board */ 25370Sstevel@tonic-gate pfn = s_mp->sbm_basepfn & ~sm; 25380Sstevel@tonic-gate pfn += t_mp->sbm_slice_offset; 25390Sstevel@tonic-gate 25400Sstevel@tonic-gate /* 25410Sstevel@tonic-gate * cannot split dynamically added segment 25420Sstevel@tonic-gate */ 25430Sstevel@tonic-gate s_del_pa = _ptob64(pfn + t_mp->sbm_npages); 25440Sstevel@tonic-gate PR_MEM("%s: proposed src delete pa=0x%lx\n", f, 25450Sstevel@tonic-gate s_del_pa); 25460Sstevel@tonic-gate PR_MEM("%s: checking for split of dyn seg list:\n", f); 25470Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_dyn_segs); 25480Sstevel@tonic-gate for (ml = s_mp->sbm_dyn_segs; ml; ml = ml->next) { 25490Sstevel@tonic-gate if (s_del_pa > ml->address && 25500Sstevel@tonic-gate s_del_pa < ml->address + ml->size) { 25510Sstevel@tonic-gate s_del_pa = ml->address; 25520Sstevel@tonic-gate break; 25530Sstevel@tonic-gate } 25540Sstevel@tonic-gate } 25550Sstevel@tonic-gate 25560Sstevel@tonic-gate /* remove span that will reside on candidate board */ 25570Sstevel@tonic-gate d_ml = memlist_del_span(d_ml, _ptob64(pfn), 25580Sstevel@tonic-gate s_del_pa - _ptob64(pfn)); 25590Sstevel@tonic-gate 25600Sstevel@tonic-gate PR_MEM("%s: %s: reserving src brd memlist:\n", 25610Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 25620Sstevel@tonic-gate PR_MEMLIST_DUMP(d_ml); 25630Sstevel@tonic-gate 25640Sstevel@tonic-gate /* reserve excess spans */ 25650Sstevel@tonic-gate if (dr_reserve_mem_spans( 25660Sstevel@tonic-gate &s_mp->sbm_memhandle, d_ml) != 0) { 25670Sstevel@tonic-gate 25680Sstevel@tonic-gate /* likely more non-reloc pages appeared */ 25690Sstevel@tonic-gate /* TODO: restart from top? */ 25700Sstevel@tonic-gate continue; 25710Sstevel@tonic-gate } 25720Sstevel@tonic-gate } else { 25730Sstevel@tonic-gate /* no excess source board memory */ 25740Sstevel@tonic-gate d_ml = NULL; 25750Sstevel@tonic-gate } 25760Sstevel@tonic-gate 25770Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_RESERVED; 25780Sstevel@tonic-gate 25790Sstevel@tonic-gate /* 25800Sstevel@tonic-gate * reserve all memory on target board. 25810Sstevel@tonic-gate * NOTE: source board's memhandle is used. 25820Sstevel@tonic-gate * 25830Sstevel@tonic-gate * If this succeeds (eq 0), then target selection is 25840Sstevel@tonic-gate * complete and all unwanted memory spans, both source and 25850Sstevel@tonic-gate * target, have been reserved. Loop is terminated. 25860Sstevel@tonic-gate */ 25870Sstevel@tonic-gate if (dr_reserve_mem_spans(&s_mp->sbm_memhandle, t_ml) == 0) { 25880Sstevel@tonic-gate PR_MEM("%s: %s: target board memory reserved\n", 25890Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 25900Sstevel@tonic-gate 25910Sstevel@tonic-gate /* a candidate target board is now reserved */ 25920Sstevel@tonic-gate t_mp->sbm_flags |= DR_MFLAG_RESERVED; 25930Sstevel@tonic-gate c_mp = t_mp; 25940Sstevel@tonic-gate 25950Sstevel@tonic-gate /* *** EXITING LOOP *** */ 25960Sstevel@tonic-gate break; 25970Sstevel@tonic-gate } 25980Sstevel@tonic-gate 25990Sstevel@tonic-gate /* did not successfully reserve the target board. */ 26000Sstevel@tonic-gate PR_MEM("%s: could not reserve target %s\n", 26010Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 26020Sstevel@tonic-gate 26030Sstevel@tonic-gate /* 26040Sstevel@tonic-gate * NOTE: an undo of the dr_reserve_mem_span work 26050Sstevel@tonic-gate * will happen automatically when the memhandle 26060Sstevel@tonic-gate * (s_mp->sbm_memhandle) is kphysm_del_release'd. 26070Sstevel@tonic-gate */ 26080Sstevel@tonic-gate 26090Sstevel@tonic-gate s_mp->sbm_flags &= ~DR_MFLAG_RESERVED; 26100Sstevel@tonic-gate } 26110Sstevel@tonic-gate 26120Sstevel@tonic-gate /* clean up after memlist editing logic */ 26130Sstevel@tonic-gate if (x_ml != NULL) 26140Sstevel@tonic-gate memlist_delete(x_ml); 26150Sstevel@tonic-gate 26160Sstevel@tonic-gate FREESTRUCT(sets, dr_mem_unit_t *, n_units_per_set * n_sets); 26170Sstevel@tonic-gate 26180Sstevel@tonic-gate /* 26190Sstevel@tonic-gate * c_mp will be NULL when the entire sets[] array 26200Sstevel@tonic-gate * has been searched without reserving a target board. 26210Sstevel@tonic-gate */ 26220Sstevel@tonic-gate if (c_mp == NULL) { 26230Sstevel@tonic-gate PR_MEM("%s: %s: target selection failed.\n", 26240Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 26250Sstevel@tonic-gate 26260Sstevel@tonic-gate if (t_ml != NULL) 26270Sstevel@tonic-gate memlist_delete(t_ml); 26280Sstevel@tonic-gate 26290Sstevel@tonic-gate return (-1); 26300Sstevel@tonic-gate } 26310Sstevel@tonic-gate 26320Sstevel@tonic-gate PR_MEM("%s: found target %s for source %s\n", 26330Sstevel@tonic-gate f, 26340Sstevel@tonic-gate c_mp->sbm_cm.sbdev_path, 26350Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path); 26360Sstevel@tonic-gate 26370Sstevel@tonic-gate s_mp->sbm_peer = c_mp; 26380Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_SOURCE; 26390Sstevel@tonic-gate s_mp->sbm_del_mlist = d_ml; /* spans to be deleted, if any */ 26400Sstevel@tonic-gate s_mp->sbm_mlist = s_ml; 26410Sstevel@tonic-gate s_mp->sbm_cm.sbdev_busy = 1; 26420Sstevel@tonic-gate 26430Sstevel@tonic-gate c_mp->sbm_peer = s_mp; 26440Sstevel@tonic-gate c_mp->sbm_flags |= DR_MFLAG_TARGET; 26450Sstevel@tonic-gate c_mp->sbm_del_mlist = t_ml; /* spans to be deleted */ 26460Sstevel@tonic-gate c_mp->sbm_mlist = t_ml; 26470Sstevel@tonic-gate c_mp->sbm_cm.sbdev_busy = 1; 26480Sstevel@tonic-gate 26490Sstevel@tonic-gate s_mp->sbm_flags &= ~DR_MFLAG_MEMRESIZE; 26500Sstevel@tonic-gate if (c_mp->sbm_npages > s_mp->sbm_npages) { 26510Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_MEMUPSIZE; 2652930Smathue PR_MEM("%s: upsize detected (source=%ld < target=%ld)\n", 26530Sstevel@tonic-gate f, s_mp->sbm_npages, c_mp->sbm_npages); 26540Sstevel@tonic-gate } else if (c_mp->sbm_npages < s_mp->sbm_npages) { 26550Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_MEMDOWNSIZE; 2656930Smathue PR_MEM("%s: downsize detected (source=%ld > target=%ld)\n", 26570Sstevel@tonic-gate f, s_mp->sbm_npages, c_mp->sbm_npages); 26580Sstevel@tonic-gate } 26590Sstevel@tonic-gate 26600Sstevel@tonic-gate return (0); 26610Sstevel@tonic-gate } 26620Sstevel@tonic-gate 26630Sstevel@tonic-gate /* 26640Sstevel@tonic-gate * Memlist support. 26650Sstevel@tonic-gate */ 26660Sstevel@tonic-gate 26670Sstevel@tonic-gate /* 26680Sstevel@tonic-gate * Determine whether the source memlist (s_mlist) will 26690Sstevel@tonic-gate * fit into the target memlist (t_mlist) in terms of 26700Sstevel@tonic-gate * size and holes (i.e. based on same relative base address). 26710Sstevel@tonic-gate */ 26720Sstevel@tonic-gate static int 26730Sstevel@tonic-gate memlist_canfit(struct memlist *s_mlist, struct memlist *t_mlist) 26740Sstevel@tonic-gate { 26750Sstevel@tonic-gate int rv = 0; 26760Sstevel@tonic-gate uint64_t s_basepa, t_basepa; 26770Sstevel@tonic-gate struct memlist *s_ml, *t_ml; 26780Sstevel@tonic-gate 26790Sstevel@tonic-gate if ((s_mlist == NULL) || (t_mlist == NULL)) 26800Sstevel@tonic-gate return (0); 26810Sstevel@tonic-gate 26820Sstevel@tonic-gate /* 26830Sstevel@tonic-gate * Base both memlists on common base address (0). 26840Sstevel@tonic-gate */ 26850Sstevel@tonic-gate s_basepa = s_mlist->address; 26860Sstevel@tonic-gate t_basepa = t_mlist->address; 26870Sstevel@tonic-gate 26880Sstevel@tonic-gate for (s_ml = s_mlist; s_ml; s_ml = s_ml->next) 26890Sstevel@tonic-gate s_ml->address -= s_basepa; 26900Sstevel@tonic-gate 26910Sstevel@tonic-gate for (t_ml = t_mlist; t_ml; t_ml = t_ml->next) 26920Sstevel@tonic-gate t_ml->address -= t_basepa; 26930Sstevel@tonic-gate 26940Sstevel@tonic-gate s_ml = s_mlist; 26950Sstevel@tonic-gate for (t_ml = t_mlist; t_ml && s_ml; t_ml = t_ml->next) { 26960Sstevel@tonic-gate uint64_t s_start, s_end; 26970Sstevel@tonic-gate uint64_t t_start, t_end; 26980Sstevel@tonic-gate 26990Sstevel@tonic-gate t_start = t_ml->address; 27000Sstevel@tonic-gate t_end = t_start + t_ml->size; 27010Sstevel@tonic-gate 27020Sstevel@tonic-gate for (; s_ml; s_ml = s_ml->next) { 27030Sstevel@tonic-gate s_start = s_ml->address; 27040Sstevel@tonic-gate s_end = s_start + s_ml->size; 27050Sstevel@tonic-gate 27060Sstevel@tonic-gate if ((s_start < t_start) || (s_end > t_end)) 27070Sstevel@tonic-gate break; 27080Sstevel@tonic-gate } 27090Sstevel@tonic-gate } 27100Sstevel@tonic-gate /* 27110Sstevel@tonic-gate * If we ran out of source memlist chunks that mean 27120Sstevel@tonic-gate * we found a home for all of them. 27130Sstevel@tonic-gate */ 27140Sstevel@tonic-gate if (s_ml == NULL) 27150Sstevel@tonic-gate rv = 1; 27160Sstevel@tonic-gate 27170Sstevel@tonic-gate /* 27180Sstevel@tonic-gate * Need to add base addresses back since memlists 27190Sstevel@tonic-gate * are probably in use by caller. 27200Sstevel@tonic-gate */ 27210Sstevel@tonic-gate for (s_ml = s_mlist; s_ml; s_ml = s_ml->next) 27220Sstevel@tonic-gate s_ml->address += s_basepa; 27230Sstevel@tonic-gate 27240Sstevel@tonic-gate for (t_ml = t_mlist; t_ml; t_ml = t_ml->next) 27250Sstevel@tonic-gate t_ml->address += t_basepa; 27260Sstevel@tonic-gate 27270Sstevel@tonic-gate return (rv); 27280Sstevel@tonic-gate } 2729